Example #1
0
    def add(src, description, kernel=None, ramdisk=None, public=True):
        """adds an image to imagestore

        @type src: str
        @param src: location of the partition image on disk

        @type description: str
        @param description: string describing the image contents

        @type kernel: bool or str
        @param kernel: either TRUE meaning this partition is a kernel image or
                       a string of the image id for the kernel

        @type ramdisk: bool or str
        @param ramdisk: either TRUE meaning this partition is a ramdisk image
                        or a string of the image id for the ramdisk


        @type public: bool
        @param public: determine if this is a public image or private

        @rtype: str
        @return: a string with the image id
        """

        image_type = "machine"
        image_id = utils.generate_uid("ami")

        if kernel is True:
            image_type = "kernel"
            image_id = utils.generate_uid("aki")
        if ramdisk is True:
            image_type = "ramdisk"
            image_id = utils.generate_uid("ari")

        image_path = os.path.join(FLAGS.images_path, image_id)
        os.makedirs(image_path)

        shutil.copyfile(src, os.path.join(image_path, "image"))

        info = {
            "imageId": image_id,
            "imageLocation": description,
            "imageOwnerId": "system",
            "isPublic": public,
            "architecture": "x86_64",
            "imageType": image_type,
            "state": "available",
        }

        if type(kernel) is str and len(kernel) > 0:
            info["kernelId"] = kernel

        if type(ramdisk) is str and len(ramdisk) > 0:
            info["ramdiskId"] = ramdisk

        with open(os.path.join(image_path, "info.json"), "w") as f:
            json.dump(info, f)

        return image_id
Example #2
0
    def add(src, description, kernel=None, ramdisk=None, public=True):
        """adds an image to imagestore

        @type src: str
        @param src: location of the partition image on disk

        @type description: str
        @param description: string describing the image contents

        @type kernel: bool or str
        @param kernel: either TRUE meaning this partition is a kernel image or
                       a string of the image id for the kernel

        @type ramdisk: bool or str
        @param ramdisk: either TRUE meaning this partition is a ramdisk image
                        or a string of the image id for the ramdisk


        @type public: bool
        @param public: determine if this is a public image or private

        @rtype: str
        @return: a string with the image id
        """

        image_type = 'machine'
        image_id = utils.generate_uid('ami')

        if kernel is True:
            image_type = 'kernel'
            image_id = utils.generate_uid('aki')
        if ramdisk is True:
            image_type = 'ramdisk'
            image_id = utils.generate_uid('ari')

        image_path = os.path.join(FLAGS.images_path, image_id)
        os.makedirs(image_path)

        shutil.copyfile(src, os.path.join(image_path, 'image'))

        info = {
            'imageId': image_id,
            'imageLocation': description,
            'imageOwnerId': 'system',
            'isPublic': public,
            'architecture': 'x86_64',
            'imageType': image_type,
            'state': 'available'
        }

        if type(kernel) is str and len(kernel) > 0:
            info['kernelId'] = kernel

        if type(ramdisk) is str and len(ramdisk) > 0:
            info['ramdiskId'] = ramdisk

        with open(os.path.join(image_path, 'info.json'), "w") as f:
            json.dump(info, f)

        return image_id
Example #3
0
    def add(src, description, kernel=None, ramdisk=None, public=True):
        """adds an image to imagestore

        @type src: str
        @param src: location of the partition image on disk

        @type description: str
        @param description: string describing the image contents

        @type kernel: bool or str
        @param kernel: either TRUE meaning this partition is a kernel image or
                       a string of the image id for the kernel

        @type ramdisk: bool or str
        @param ramdisk: either TRUE meaning this partition is a ramdisk image or
                        a string of the image id for the ramdisk


        @type public: bool
        @param public: determine if this is a public image or private
        
        @rtype: str
        @return: a string with the image id
        """
        
        image_type = 'machine'
        image_id = utils.generate_uid('ami')

        if kernel is True:
            image_type = 'kernel'
            image_id = utils.generate_uid('aki')
        if ramdisk is True:
            image_type = 'ramdisk'
            image_id = utils.generate_uid('ari')

        image_path = os.path.join(FLAGS.images_path, image_id)
        os.makedirs(image_path)

        shutil.copyfile(src, os.path.join(image_path, 'image'))

        info = {
            'imageId': image_id,
            'imageLocation': description,
            'imageOwnerId': 'system',
            'isPublic': public,
            'architecture': 'x86_64',
            'type': image_type,
            'state': 'available'
        }
        
        if type(kernel) is str and len(kernel) > 0:
            info['kernelId'] = kernel

        if type(ramdisk) is str and len(ramdisk) > 0:
            info['ramdiskId'] = ramdisk

        with open(os.path.join(image_path, 'info.json'), "w") as f:
            json.dump(info, f)

        return image_id
Example #4
0
    def fake_instance_create(values):
        """ Stubs out the db.instance_create method """

        type_data = INSTANCE_TYPES[values['instance_type']]

        base_options = {
            'name': values['name'],
            'id': values['id'],
            'reservation_id': utils.generate_uid('r'),
            'image_id': values['image_id'],
            'kernel_id': values['kernel_id'],
            'ramdisk_id': values['ramdisk_id'],
            'state_description': 'scheduling',
            'user_id': values['user_id'],
            'project_id': values['project_id'],
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type': values['instance_type'],
            'memory_mb': type_data['memory_mb'],
            'mac_address': values['mac_address'],
            'vcpus': type_data['vcpus'],
            'local_gb': type_data['local_gb'],
            'os_type': values['os_type']
        }

        return FakeModel(base_options)
Example #5
0
    def fake_instance_create(context, values):
        """Stubs out the db.instance_create method."""

        if "instance_type" not in values:
            return

        instance_type = values["instance_type"]

        base_options = {
            "name": values["name"],
            "id": values["id"],
            "uuid": str(uuid.uuid4()),
            "reservation_id": utils.generate_uid("r"),
            "image_ref": values["image_ref"],
            "kernel_id": values["kernel_id"],
            "ramdisk_id": values["ramdisk_id"],
            "vm_state": vm_states.BUILDING,
            "task_state": task_states.SCHEDULING,
            "user_id": values["user_id"],
            "project_id": values["project_id"],
            "launch_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
            "instance_type": instance_type,
            "memory_mb": instance_type["memory_mb"],
            "vcpus": instance_type["vcpus"],
            "mac_addresses": [{"address": values["mac_address"]}],
            "root_gb": instance_type["root_gb"],
        }
        return FakeModel(base_options)
Example #6
0
    def fake_instance_create(context, values):
        """Stubs out the db.instance_create method."""

        type_data = INSTANCE_TYPES[values['instance_type']]

        base_options = {
            'name': values['name'],
            'id': values['id'],
            'uuid': utils.gen_uuid(),
            'reservation_id': utils.generate_uid('r'),
            'image_ref': values['image_ref'],
            'kernel_id': values['kernel_id'],
            'ramdisk_id': values['ramdisk_id'],
            'vm_state': vm_states.BUILDING,
            'task_state': task_states.SCHEDULING,
            'user_id': values['user_id'],
            'project_id': values['project_id'],
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type': values['instance_type'],
            'memory_mb': type_data['memory_mb'],
            'vcpus': type_data['vcpus'],
            'mac_addresses': [{
                'address': values['mac_address']
            }],
            'root_gb': type_data['root_gb'],
        }
        return FakeModel(base_options)
Example #7
0
    def create(self, req, body):

        default_hints = {}

        delta_days = datetime.timedelta(days=CONF.reservation_length_days)
        delta_hours = datetime.timedelta(hours=CONF.reservation_length_hours)
        delta_minutes = datetime.timedelta(minutes=CONF.reservation_length_minutes)
        default_hints['reserved'] = True
        if CONF.reservation_start_date == 'now':
            base = datetime.datetime.utcnow()
        else:
            base = datetime.datetime.strptime(CONF.reservation_start_date,
                                              "%Y-%m-%d %H:%M")
        default_hints['lease_params'] = jsonutils.dumps({
            'name': utils.generate_uid('lease', size=6),
            'start': CONF.reservation_start_date, 
            'end': (base + delta_days + delta_hours + delta_minutes).strftime('%Y-%m-%d %H:%M')
        })
        if 'server' in body:
            if 'scheduler_hints' in body['server']:
                if not 'lease_params' in body['server']['scheduler_hints']:
                    body['server']['scheduler_hints'].update(default_hints)
            else:
                body['server']['scheduler_hints'] = default_hints
        else:
            attr = '%s:scheduler_hints' % 'OS-SCH-HNT'
            if 'os:scheduler_hints' in body and \
                    not 'lease_params' in body['os:scheduler_hints']:
                body['os:scheduler_hints'].update(default_hints)
            elif attr in body and not 'lease_params' in body[attr]:
                body[attr].update(default_hints)
        yield
Example #8
0
 def run_instances(self, context, **kwargs):
     # make sure user can access the image
     # vpn image is private so it doesn't show up on lists
     if kwargs['image_id'] != FLAGS.vpn_image_id:
         image = self._get_image(context, kwargs['image_id'])
     logging.debug("Going to run instances...")
     reservation_id = utils.generate_uid('r')
     launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
     key_data = None
     if kwargs.has_key('key_name'):
         key_pair = context.user.get_key_pair(kwargs['key_name'])
         if not key_pair:
             raise exception.ApiError('Key Pair %s not found' %
                                      kwargs['key_name'])
         key_data = key_pair.public_key
     # TODO: Get the real security group of launch in here
     security_group = "default"
     bridge_name = network.BridgedNetwork.get_network_for_project(
         context.user.id, context.project.id, security_group)['bridge_name']
     for num in range(int(kwargs['max_count'])):
         inst = self.instdir.new()
         # TODO(ja): add ari, aki
         inst['image_id'] = kwargs['image_id']
         if 'kernel_id' in kwargs:
             inst['kernel_id'] = kwargs['kernel_id']
         if 'ramdisk_id' in kwargs:
             inst['ramdisk_id'] = kwargs['ramdisk_id']
         inst['user_data'] = kwargs.get('user_data', '')
         inst['instance_type'] = kwargs.get('instance_type', 'm1.small')
         inst['reservation_id'] = reservation_id
         inst['launch_time'] = launch_time
         inst['key_data'] = key_data or ''
         inst['key_name'] = kwargs.get('key_name', '')
         inst['user_id'] = context.user.id
         inst['project_id'] = context.project.id
         inst['mac_address'] = utils.generate_mac()
         inst['ami_launch_index'] = num
         inst['bridge_name'] = bridge_name
         if inst['image_id'] == FLAGS.vpn_image_id:
             address = network.allocate_vpn_ip(inst['user_id'],
                                               inst['project_id'],
                                               mac=inst['mac_address'])
         else:
             address = network.allocate_ip(inst['user_id'],
                                           inst['project_id'],
                                           mac=inst['mac_address'])
         inst['private_dns_name'] = str(address)
         # TODO: allocate expresses on the router node
         inst.save()
         rpc.cast(
             FLAGS.compute_topic, {
                 "method": "run_instance",
                 "args": {
                     "instance_id": inst.instance_id
                 }
             })
         logging.debug("Casting to node for %s's instance with IP of %s" %
                       (context.user.name, inst['private_dns_name']))
     # TODO: Make the NetworkComputeNode figure out the network name from ip.
     return defer.succeed(self._format_instances(context, reservation_id))
Example #9
0
    def fake_instance_create(context, values):
        """Stubs out the db.instance_create method."""

        type_data = INSTANCE_TYPES[values['instance_type']]

        base_options = {
            'name': values['name'],
            'id': values['id'],
            'uuid': values['uuid'],
            'reservation_id': utils.generate_uid('r'),
            'image_ref': values['image_ref'],
            'kernel_id': values['kernel_id'],
            'ramdisk_id': values['ramdisk_id'],
            'vm_state': vm_states.BUILDING,
            'task_state': task_states.SCHEDULING,
            'user_id': values['user_id'],
            'project_id': values['project_id'],
            'instance_type': values['instance_type'],
            'memory_mb': type_data['memory_mb'],
            'vcpus': type_data['vcpus'],
            'mac_addresses': [{'address': values['mac_address']}],
            'root_gb': type_data['root_gb'],
            'node': values['node'],
            }

        return base_options
Example #10
0
 def _create_volume(self, size):
     self.size = size
     self.volume_id = generate_uid('vol')
     self._create_lv(size)
     self._setup_export()
     self.status = "available"
     self.save()
Example #11
0
    def fake_instance_create(context, values):
        """Stubs out the db.instance_create method."""

        type_data = FLAVORS[values['flavor']]

        base_options = {
            'name': values['name'],
            'id': values['id'],
            'uuid': values['uuid'],
            'reservation_id': utils.generate_uid('r'),
            'image_ref': values.get('image_ref'),
            'kernel_id': values['kernel_id'],
            'ramdisk_id': values['ramdisk_id'],
            'vm_state': vm_states.BUILDING,
            'task_state': task_states.SCHEDULING,
            'user_id': values['user_id'],
            'project_id': values['project_id'],
            'flavor': values['flavor'],
            'memory_mb': type_data['memory_mb'],
            'vcpus': type_data['vcpus'],
            'mac_addresses': [{
                'address': values['mac_address']
            }],
            'root_gb': type_data['root_gb'],
            'node': values['node'],
            'metadata': []
        }

        return base_options
Example #12
0
    def fake_instance_create(context, values):
        """Stubs out the db.instance_create method."""

        type_data = INSTANCE_TYPES[values["instance_type"]]

        base_options = {
            "name": values["name"],
            "id": values["id"],
            "uuid": values["uuid"],
            "reservation_id": utils.generate_uid("r"),
            "image_ref": values["image_ref"],
            "kernel_id": values["kernel_id"],
            "ramdisk_id": values["ramdisk_id"],
            "vm_state": vm_states.BUILDING,
            "task_state": task_states.SCHEDULING,
            "user_id": values["user_id"],
            "project_id": values["project_id"],
            "instance_type": values["instance_type"],
            "memory_mb": type_data["memory_mb"],
            "vcpus": type_data["vcpus"],
            "mac_addresses": [{"address": values["mac_address"]}],
            "root_gb": type_data["root_gb"],
            "node": values["node"],
        }
        return FakeModel(base_options)
Example #13
0
    def fake_instance_create(context, values):
        """Stubs out the db.instance_create method."""

        if 'instance_type' not in values:
            return

        instance_type = values['instance_type']

        base_options = {
            'name': values['name'],
            'id': values['id'],
            'uuid': str(uuid.uuid4()),
            'reservation_id': utils.generate_uid('r'),
            'image_ref': values['image_ref'],
            'kernel_id': values['kernel_id'],
            'ramdisk_id': values['ramdisk_id'],
            'vm_state': vm_states.BUILDING,
            'task_state': task_states.SCHEDULING,
            'user_id': values['user_id'],
            'project_id': values['project_id'],
            'instance_type': instance_type,
            'memory_mb': instance_type['memory_mb'],
            'vcpus': instance_type['vcpus'],
            'mac_addresses': [{
                'address': values['mac_address']
            }],
            'root_gb': instance_type['root_gb'],
        }
        return FakeModel(base_options)
Example #14
0
    def fake_instance_create(context, values):
        """Stubs out the db.instance_create method."""

        if 'flavor' not in values:
            return

        flavor = values['flavor']

        base_options = {
            'name': values['name'],
            'id': values['id'],
            'uuid': str(uuid.uuid4()),
            'reservation_id': utils.generate_uid('r'),
            'image_ref': values['image_ref'],
            'kernel_id': values['kernel_id'],
            'ramdisk_id': values['ramdisk_id'],
            'vm_state': vm_states.BUILDING,
            'task_state': task_states.SCHEDULING,
            'user_id': values['user_id'],
            'project_id': values['project_id'],
            'flavor': flavor,
            'memory_mb': flavor['memory_mb'],
            'vcpus': flavor['vcpus'],
            'mac_addresses': [{
                'address': values['mac_address']
            }],
            'root_gb': flavor['root_gb'],
            'system_metadata': {
                'image_shutdown_timeout': 0
            },
        }
        return FakeModel(base_options)
Example #15
0
    def fake_instance_create(context, values):
        """Stubs out the db.instance_create method."""

        if 'instance_type' not in values:
            return

        instance_type = values['instance_type']

        base_options = {
            'name': values['name'],
            'id': values['id'],
            'uuid': str(uuid.uuid4()),
            'reservation_id': utils.generate_uid('r'),
            'image_ref': values['image_ref'],
            'kernel_id': values['kernel_id'],
            'ramdisk_id': values['ramdisk_id'],
            'vm_state': vm_states.BUILDING,
            'task_state': task_states.SCHEDULING,
            'user_id': values['user_id'],
            'project_id': values['project_id'],
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type': instance_type,
            'memory_mb': instance_type['memory_mb'],
            'vcpus': instance_type['vcpus'],
            'mac_addresses': [{'address': values['mac_address']}],
            'root_gb': instance_type['root_gb'],
        }
        return FakeModel(base_options)
Example #16
0
    def fake_instance_create(context, values):
        """Stubs out the db.instance_create method."""

        if 'flavor' not in values:
            return

        flavor = values['flavor']

        base_options = {
            'name': values['name'],
            'id': values['id'],
            'uuid': str(uuid.uuid4()),
            'reservation_id': utils.generate_uid('r'),
            'image_ref': values['image_ref'],
            'kernel_id': values['kernel_id'],
            'ramdisk_id': values['ramdisk_id'],
            'vm_state': vm_states.BUILDING,
            'task_state': task_states.SCHEDULING,
            'user_id': values['user_id'],
            'project_id': values['project_id'],
            'flavor': flavor,
            'memory_mb': flavor['memory_mb'],
            'vcpus': flavor['vcpus'],
            'mac_addresses': [{'address': values['mac_address']}],
            'root_gb': flavor['root_gb'],
        }
        return FakeModel(base_options)
Example #17
0
 def _create_volume(self, size):
     self.size = size
     self.volume_id = generate_uid('vol')
     self._create_lv(size)
     self._setup_export()
     self.status = "available"
     self.save()
Example #18
0
 def register(self, context, image_location):
     """ rpc call to register a new image based from a manifest """
     image_id = utils.generate_uid('ami')
     self._conn(context).make_request(
         method='PUT',
         bucket='_images',
         query_args=self._qs({'image_location': image_location,
                              'image_id': image_id}))
     return image_id
Example #19
0
def register(context, image_location):
    """ rpc call to register a new image based from a manifest """

    image_id = utils.generate_uid("ami")
    conn(context).make_request(
        method="PUT", bucket="_images", query_args=qs({"image_location": image_location, "image_id": image_id})
    )

    return image_id
Example #20
0
    def _copy_instance(self, context, instance_uuid, new_name, launch=False, new_user_data=None, security_groups=None):
        # (dscannell): Basically we want to copy all of the information from
        # instance with id=instance_uuid into a new instance. This is because we
        # are basically "cloning" the vm as far as all the properties are
        # concerned.

        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
        image_ref = instance_ref.get('image_ref', '')
        if image_ref == '':
            image_ref = instance_ref.get('image_id', '')

        if launch:
            metadata = {'launched_from':'%s' % (instance_ref['uuid'])}
        else:
            metadata = {'blessed_from':'%s' % (instance_ref['uuid'])}

        instance = {
           'reservation_id': utils.generate_uid('r'),
           'image_ref': image_ref,
           'vm_state': vm_states.BUILDING,
           'state_description': 'halted',
           'user_id': context.user_id,
           'project_id': context.project_id,
           'launch_time': '',
           'instance_type_id': instance_ref['instance_type_id'],
           'memory_mb': instance_ref['memory_mb'],
           'vcpus': instance_ref['vcpus'],
           'root_gb': instance_ref['root_gb'],
           'ephemeral_gb': instance_ref['ephemeral_gb'],
           'display_name': new_name,
           'hostname': utils.sanitize_hostname(new_name),
           'display_description': instance_ref['display_description'],
           'user_data': new_user_data or '',
           'key_name': instance_ref.get('key_name', ''),
           'key_data': instance_ref.get('key_data', ''),
           'locked': False,
           'metadata': metadata,
           'availability_zone': instance_ref['availability_zone'],
           'os_type': instance_ref['os_type'],
           'host': None,
        }
        new_instance_ref = self.db.instance_create(context, instance)

        # (dscannell) We need to reload the instance_ref in order for it to be associated with
        # the database session of lazy-loading.
        new_instance_ref = self.db.instance_get(context, new_instance_ref.id)

        elevated = context.elevated()
        if security_groups == None:
            security_groups = self.db.security_group_get_by_instance(context, instance_ref['id'])
        for security_group in security_groups:
            self.db.instance_add_security_group(elevated,
                                                new_instance_ref['uuid'],
                                                security_group['id'])

        return new_instance_ref
Example #21
0
File: cloud.py Project: sorenh/cc
 def run_instances(self, context, **kwargs):
     # make sure user can access the image
     # vpn image is private so it doesn't show up on lists
     if kwargs['image_id'] != FLAGS.vpn_image_id:
         image = self._get_image(context, kwargs['image_id'])
     logging.debug("Going to run instances...")
     reservation_id = utils.generate_uid('r')
     launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
     key_data = None
     if kwargs.has_key('key_name'):
         key_pair = context.user.get_key_pair(kwargs['key_name'])
         if not key_pair:
             raise exception.ApiError('Key Pair %s not found' %
                                      kwargs['key_name'])
         key_data = key_pair.public_key
     # TODO: Get the real security group of launch in here
     security_group = "default"
     bridge_name = network.BridgedNetwork.get_network_for_project(context.user.id, context.project.id, security_group)['bridge_name']
     for num in range(int(kwargs['max_count'])):
         inst = self.instdir.new()
         # TODO(ja): add ari, aki
         inst['image_id'] = kwargs['image_id']
         if 'kernel_id' in kwargs:
             inst['kernel_id'] = kwargs['kernel_id']
         if 'ramdisk_id' in kwargs:
             inst['ramdisk_id'] = kwargs['ramdisk_id']
         inst['user_data'] = kwargs.get('user_data', '')
         inst['instance_type'] = kwargs.get('instance_type', 'm1.small')
         inst['reservation_id'] = reservation_id
         inst['launch_time'] = launch_time
         inst['key_data'] = key_data or ''
         inst['key_name'] = kwargs.get('key_name', '')
         inst['user_id'] = context.user.id
         inst['project_id'] = context.project.id
         inst['mac_address'] = utils.generate_mac()
         inst['ami_launch_index'] = num
         inst['bridge_name'] = bridge_name
         if inst['image_id'] == FLAGS.vpn_image_id:
             address = network.allocate_vpn_ip(
                     inst['user_id'], inst['project_id'], mac=inst['mac_address'])
         else:
             address = network.allocate_ip(
                     inst['user_id'], inst['project_id'], mac=inst['mac_address'])
         inst['private_dns_name'] = str(address)
         # TODO: allocate expresses on the router node
         inst.save()
         rpc.cast(FLAGS.compute_topic,
              {"method": "run_instance",
               "args": {"instance_id" : inst.instance_id}})
         logging.debug("Casting to node for %s's instance with IP of %s" %
                   (context.user.name, inst['private_dns_name']))
     # TODO: Make the NetworkComputeNode figure out the network name from ip.
     return defer.succeed(self._format_instances(
                             context, reservation_id))
Example #22
0
def register(user, image_location):
    """ rpc call to register a new image based from a manifest """

    image_id = generate_uid('ami')
    conn(user).make_request(
            method='PUT',
            bucket='_images',
            query_args=qs({'image_location': image_location,
                           'image_id': image_id}))

    return image_id
Example #23
0
 def _create_reservation(self, user, launchstate):
     launchstate['owner_id'] = user.id
     launchstate['reservation_id'] = generate_uid('r')
     launchstate['launch_time'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
     if launchstate.has_key('key_name'):
         key_pair = user.get_key_pair(launchstate['key_name'])
         if not key_pair:
             raise exception.ApiError('Key Pair %s not found' %
                                      launchstate['key_name'])
         launchstate['key_data'] = key_pair.public_key
     return launchstate
Example #24
0
def register(context, image_location):
    """ rpc call to register a new image based from a manifest """

    image_id = utils.generate_uid('ami')
    conn(context).make_request(method='PUT',
                               bucket='_images',
                               query_args=qs({
                                   'image_location': image_location,
                                   'image_id': image_id
                               }))

    return image_id
Example #25
0
 def _create_reservation(self, user, launchstate):
     launchstate['owner_id'] = user.id
     launchstate['reservation_id'] = generate_uid('r')
     launchstate['launch_time'] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
                                                time.gmtime())
     if launchstate.has_key('key_name'):
         key_pair = user.get_key_pair(launchstate['key_name'])
         if not key_pair:
             raise exception.ApiError('Key Pair %s not found' %
                                      launchstate['key_name'])
         launchstate['key_data'] = key_pair.public_key
     return launchstate
Example #26
0
    def _copy_instance(self, context, instance_uuid, new_suffix, launch=False):
        # (dscannell): Basically we want to copy all of the information from
        # instance with id=instance_uuid into a new instance. This is because we
        # are basically "cloning" the vm as far as all the properties are
        # concerned.

        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
        image_ref = instance_ref.get("image_ref", "")
        if image_ref == "":
            image_ref = instance_ref.get("image_id", "")

        if launch:
            metadata = {"launched_from": "%s" % (instance_ref["uuid"])}
        else:
            metadata = {"blessed_from": "%s" % (instance_ref["uuid"])}

        instance = {
            "reservation_id": utils.generate_uid("r"),
            "image_ref": image_ref,
            "vm_state": vm_states.BUILDING,
            "state_description": "halted",
            "user_id": context.user_id,
            "project_id": context.project_id,
            "launch_time": "",
            "instance_type_id": instance_ref["instance_type_id"],
            "memory_mb": instance_ref["memory_mb"],
            "vcpus": instance_ref["vcpus"],
            "root_gb": instance_ref["root_gb"],
            "ephemeral_gb": instance_ref["ephemeral_gb"],
            "display_name": "%s-%s" % (instance_ref["display_name"], new_suffix),
            "display_description": instance_ref["display_description"],
            "user_data": instance_ref.get("user_data", ""),
            "key_name": instance_ref.get("key_name", ""),
            "key_data": instance_ref.get("key_data", ""),
            "locked": False,
            "metadata": metadata,
            "availability_zone": instance_ref["availability_zone"],
            "os_type": instance_ref["os_type"],
            "host": None,
        }
        new_instance_ref = self.db.instance_create(context, instance)

        # (dscannell) We need to reload the instance_ref in order for it to be associated with
        # the database session of lazy-loading.
        new_instance_ref = self.db.instance_get(context, new_instance_ref.id)

        elevated = context.elevated()
        security_groups = self.db.security_group_get_by_instance(context, instance_ref["id"])
        for security_group in security_groups:
            self.db.instance_add_security_group(elevated, new_instance_ref["uuid"], security_group["id"])

        return new_instance_ref
Example #27
0
 def _really_run_instance(self, user, launchstate, idx):
     launchstate['instance_id'] = generate_uid('i')
     launchstate['ami_launch_index'] = idx
     network = self.network.get_users_network(str(user.id))
     launchstate['network_str'] = network.to_dict()
     launchstate['bridge_name'] = network.bridge_name
     logging.debug(
         "Casting to node for %s's instance with IP of %s in the %s network"
         % (user.name, launchstate['private_dns_name'],
            launchstate['network_name']))
     rpc.call(FLAGS.compute_topic, {
         "method": "run_instance",
         "args": launchstate
     })
     return launchstate
Example #28
0
 def _really_run_instance(self, user, launchstate, idx):
     launchstate['instance_id'] = generate_uid('i')
     launchstate['ami_launch_index'] = idx 
     network = self.network.get_users_network(str(user.id))
     launchstate['network_str'] = network.to_dict()
     launchstate['bridge_name'] = network.bridge_name
     logging.debug("Casting to node for %s's instance with IP of %s in the %s network" %
                   (user.name,
                    launchstate['private_dns_name'],
                    launchstate['network_name']))
     rpc.call(FLAGS.compute_topic, 
                           {"method": "run_instance", 
                            "args" : launchstate 
                                     })
     return launchstate
Example #29
0
 def _initialize_resource(self, resource):
     if resource.tracking_id:
         return
     # Have to start tracking this resource so that we can know
     # in the future what its tracking 'id' is so that we can
     # reference its past work and resume from said previous states
     # if needbe.
     resource.tracking_id = utils.generate_uid('r')
     what_started = {
         'request_uuid': self.admin_context.request_id,
         'tracking_id': resource.tracking_id,
         'status': states.STARTING,
     }
     self.db.resource_tracker_create(self.admin_context, what_started)
     LOG.debug(_("Starting to track request id %s fullfillment"
                 " using tracking id %s"),
               self.admin_context.request_id, resource.tracking_id)
Example #30
0
File: storage.py Project: jxta/cc
 def create(cls, size, user_id):
     volume_id = utils.generate_uid('vol')
     vol = cls(volume_id=volume_id)
     #TODO(vish): do we really need to store the volume id as .object_id .volume_id and ['volume_id']?
     vol['volume_id'] = volume_id
     vol['node_name'] = FLAGS.storage_name
     vol['size'] = size
     vol['user_id'] = user_id
     vol['availability_zone'] = FLAGS.storage_availability_zone
     vol["instance_id"] = 'none'
     vol["mountpoint"] = 'none'
     vol["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
     vol["attachment_set"] = ''
     vol.create_lv()
     vol.setup_export()
     vol['status'] = "available"
     vol.save()
     return vol
Example #31
0
 def create(cls, size, user_id):
     volume_id = utils.generate_uid('vol')
     vol = cls(volume_id=volume_id)
     #TODO(vish): do we really need to store the volume id as .object_id .volume_id and ['volume_id']?
     vol['volume_id'] = volume_id
     vol['node_name'] = FLAGS.storage_name
     vol['size'] = size
     vol['user_id'] = user_id
     vol['availability_zone'] = FLAGS.storage_availability_zone
     vol["instance_id"] = 'none'
     vol["mountpoint"] = 'none'
     vol["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
     vol["attachment_set"] = ''
     vol.create_lv()
     vol.setup_export()
     vol['status'] = "available"
     vol.save()
     return vol
 def build_server_instance(self, env, context):
     reservation = utils.generate_uid("r")
     ltime = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
     inst = self.instdir.new()
     inst["name"] = env["server"]["name"]
     inst["image_id"] = env["server"]["imageId"]
     inst["instance_type"] = env["server"]["flavorId"]
     inst["user_id"] = context["user"].id
     inst["project_id"] = context["project"].id
     inst["reservation_id"] = reservation
     inst["launch_time"] = ltime
     inst["mac_address"] = utils.generate_mac()
     address = network.allocate_ip(inst["user_id"], inst["project_id"], mac=inst["mac_address"])
     inst["private_dns_name"] = str(address)
     inst["bridge_name"] = network.BridgedNetwork.get_network_for_project(
         inst["user_id"], inst["project_id"], "default"  # security group
     )["bridge_name"]
     # key_data, key_name, ami_launch_index
     # TODO(todd): key data or root password
     inst.save()
     return inst
Example #33
0
    def backup_schedule_add(self, context, instance_uuid, params):
        if not 'frequency' in params:
            raise exception.NovaException(
                "Backup schedule is missing 'frequency'")
        if not 'retention' in params:
            raise exception.NovaException(
                "Backup schedule is missing 'retention'")

        frequency = int(params['frequency'])
        retention = int(params['retention'])

        if retention < frequency:
            raise exception.NovaException(
                "Invalid backup schedule: retention < frequency")

        schedule = self.driver.instance_backup_schedule(context, instance_uuid)

        # Make sure we're not already full
        if len(schedule) >= meta.MAX_SCHEDULE_ITEMS:
            raise exception.NovaException(
                "Maximum number of schedules (%d) already reached" % \
                    meta.MAX_SCHEDULE_ITEMS)

        # Make sure we don't have any conflicts
        conflict = utils.schedule_has_conflict(schedule, frequency, retention)
        if conflict:
            raise exception.NovaException(
                "Schedule conflicts with existing schedule %s" % \
                    conflict[meta.SCHEDULE_ID_KEY])

        # Good to go
        schedule_id = novautils.generate_uid('b')
        new_item = { meta.SCHEDULE_ID_KEY : schedule_id,
                     meta.SCHEDULE_FREQUENCY_KEY : frequency,
                     meta.SCHEDULE_RETENTION_KEY : retention,
                     meta.SCHEDULE_ACTIVE_KEY : 1 }
        schedule.append(new_item)
        return self.driver.instance_backup_schedule_update(context,
                                                           instance_uuid,
                                                           schedule)
Example #34
0
 def create(cls, size, user_id, project_id):
     volume_id = utils.generate_uid('vol')
     vol = cls(volume_id)
     vol['node_name'] = FLAGS.storage_name
     vol['size'] = size
     vol['user_id'] = user_id
     vol['project_id'] = project_id
     vol['availability_zone'] = FLAGS.storage_availability_zone
     vol["instance_id"] = 'none'
     vol["mountpoint"] = 'none'
     vol['attach_time'] = 'none'
     vol['status'] = "creating" # creating | available | in-use
     vol['attach_status'] = "detached"  # attaching | attached | detaching | detached
     vol['delete_on_termination'] = 'False'
     vol.save()
     vol.create_lv()
     vol._setup_export()
     # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes
     # TODO(joshua
     vol['status'] = "available"
     vol.save()
     return vol
Example #35
0
    def fake_instance_create(values):
        """Stubs out the db.instance_create method."""

        type_data = INSTANCE_TYPES[values['instance_type']]

        base_options = {
            'name': values['name'],
            'id': values['id'],
            'reservation_id': utils.generate_uid('r'),
            'image_id': values['image_id'],
            'kernel_id': values['kernel_id'],
            'ramdisk_id': values['ramdisk_id'],
            'state_description': 'scheduling',
            'user_id': values['user_id'],
            'project_id': values['project_id'],
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type': values['instance_type'],
            'memory_mb': type_data['memory_mb'],
            'mac_address': values['mac_address'],
            'vcpus': type_data['vcpus'],
            'local_gb': type_data['local_gb'],
            }
        return FakeModel(base_options)
Example #36
0
 def create(cls, size, user_id, project_id):
     volume_id = utils.generate_uid('vol')
     vol = cls(volume_id=volume_id)
     vol['volume_id'] = volume_id
     vol['node_name'] = FLAGS.storage_name
     vol['size'] = size
     vol['user_id'] = user_id
     vol['project_id'] = project_id
     vol['availability_zone'] = FLAGS.storage_availability_zone
     vol["instance_id"] = 'none'
     vol["mountpoint"] = 'none'
     vol['attach_time'] = 'none'
     vol["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
     vol['status'] = "creating"  # creating | available | in-use
     vol['attach_status'] = "detached"  # attaching | attached | detaching | detached
     vol['delete_on_termination'] = 'False'
     vol.save()
     vol.create_lv()
     vol.setup_export()
     # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes
     # TODO(joshua
     vol['status'] = "available"
     vol.save()
     return vol
Example #37
0
    def _copy_instance(self, context, instance_uuid, new_name, launch=False,
                       new_user_data=None, security_groups=None, key_name=None,
                       launch_index=0, availability_zone=None):
        # (dscannell): Basically we want to copy all of the information from
        # instance with id=instance_uuid into a new instance. This is because we
        # are basically "cloning" the vm as far as all the properties are
        # concerned.

        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
        image_ref = instance_ref.get('image_ref', '')
        if image_ref == '':
            image_ref = instance_ref.get('image_id', '')


        system_metadata = {}
        for data in instance_ref.get('system_metadata', []):
            # (dscannell) Do not copy over the system metadata that we setup
            # on an instance. This is important when doing clone-of-clones.
            if data['key'] not in ['blessed_from', 'launched_from']:
                system_metadata[data['key']] = data['value']

        metadata = {}
        # We need to record the launched_from / blessed_from in both the
        # metadata and system_metadata. It needs to be in the metadata so
        # that we can we can query the database to support list-blessed
        # and list-launched operations. It needs to be in the system
        # metadata so that the manager can access it.
        if launch:
            metadata['launched_from'] = '%s' % (instance_ref['uuid'])
            system_metadata['launched_from'] = '%s' % (instance_ref['uuid'])
        else:
            metadata['blessed_from'] = '%s' % (instance_ref['uuid'])
            system_metadata['blessed_from'] = '%s' % (instance_ref['uuid'])

        if key_name is None:
            key_name = instance_ref.get('key_name', '')
            key_data = instance_ref.get('key_data', '')
        else:
            key_pair = self.db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        if availability_zone is None:
            availability_zone = instance_ref['availability_zone']

        instance = {
           'reservation_id': utils.generate_uid('r'),
           'image_ref': image_ref,
           'ramdisk_id': instance_ref.get('ramdisk_id', ''),
           'kernel_id': instance_ref.get('kernel_id', ''),
           'vm_state': vm_states.BUILDING,
           'state_description': 'halted',
           'user_id': context.user_id,
           'project_id': context.project_id,
           'launch_time': '',
           'instance_type_id': instance_ref['instance_type_id'],
           'memory_mb': instance_ref['memory_mb'],
           'vcpus': instance_ref['vcpus'],
           'root_gb': instance_ref['root_gb'],
           'ephemeral_gb': instance_ref['ephemeral_gb'],
           'display_name': new_name,
           'hostname': utils.sanitize_hostname(new_name),
           'display_description': instance_ref['display_description'],
           'user_data': new_user_data or '',
           'key_name': key_name,
           'key_data': key_data,
           'locked': False,
           'metadata': metadata,
           'availability_zone': availability_zone,
           'os_type': instance_ref['os_type'],
           'host': None,
           'system_metadata': system_metadata,
           'launch_index': launch_index,
           'root_device_name': instance_ref['root_device_name'],
           'power_state': power_state.NOSTATE,
           # Set disable_terminate on bless so terminate in nova-api barfs on a
           # blessed instance.
           'disable_terminate': not launch,
        }
        new_instance_ref = self.db.instance_create(context, instance)
        nw_info = instance_ref['info_cache'].get('network_info')
        self.db.instance_info_cache_update(context, new_instance_ref['uuid'],
                                           {'network_info': nw_info})

        # (dscannell) We need to reload the instance_ref in order for it to be associated with
        # the database session of lazy-loading.
        new_instance_ref = self.db.instance_get(context, new_instance_ref.id)

        elevated = context.elevated()
        if security_groups == None:
            security_groups = self.db.security_group_get_by_instance(context, instance_ref['uuid'])
        for security_group in security_groups:
            self.db.instance_add_security_group(elevated,
                                                new_instance_ref['uuid'],
                                                security_group['id'])

        # Create a copy of all the block device mappings
        block_device_mappings = self.db.block_device_mapping_get_all_by_instance(context, instance_ref['uuid'])
        for mapping in block_device_mappings:
            values = {
                'instance_uuid': new_instance_ref['uuid'],
                'device_name': mapping['device_name'],
                'delete_on_termination':
                        mapping.get('delete_on_termination', True),
                'source_type': mapping.get('source_type'),
                'destination_type': mapping.get('destination_type'),
                'guest_format': mapping.get('guest_format'),
                'device_type': mapping.get('device_type'),
                'disk_bus': mapping.get('disk_bus'),
                'boot_index': mapping.get('boot_index'),
                'image_id': mapping.get('image_id'),
                # The snapshot id / volume id will be re-written once the bless / launch completes.
                # For now we just copy over the data from the source instance.
                'snapshot_id': mapping.get('snapshot_id', None),
                'volume_id': mapping.get('volume_id', None),
                'volume_size': mapping.get('volume_size', None),
                'no_device': mapping.get('no_device', None),
                'connection_info': mapping.get('connection_info', None)
            }
            self.db.block_device_mapping_create(elevated, values, legacy=False)

        return new_instance_ref
Example #38
0
    def create(self, context, instance_type,
               image_id, kernel_id=None, ramdisk_id=None,
               min_count=1, max_count=1,
               display_name='', display_description='',
               key_name=None, key_data=None, security_group='default',
               availability_zone=None, user_data=None, metadata={},
               injected_files=None):
        """Create the number and type of instances requested.

        Verifies that quota and other arguments are valid.

        """
        if not instance_type:
            instance_type = instance_types.get_default_instance_type()

        num_instances = quota.allowed_instances(context, max_count,
                                                instance_type)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(_("Quota exceeeded for %(pid)s,"
                    " tried to run %(min_count)s instances") % locals())
            raise quota.QuotaError(_("Instance quota exceeded. You can only "
                                     "run %s more instances of this type.") %
                                   num_instances, "InstanceLimitExceeded")

        self._check_metadata_properties_quota(context, metadata)
        self._check_injected_file_quota(context, injected_files)

        image = self.image_service.show(context, image_id)

        os_type = None
        if 'properties' in image and 'os_type' in image['properties']:
            os_type = image['properties']['os_type']

        if kernel_id is None:
            kernel_id = image['properties'].get('kernel_id', None)
        if ramdisk_id is None:
            ramdisk_id = image['properties'].get('ramdisk_id', None)
        # FIXME(sirp): is there a way we can remove null_kernel?
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" %
                       (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ['default']
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context,
                                                  context.project_id,
                                                  security_group_name)
            security_groups.append(group['id'])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        base_options = {
            'reservation_id': utils.generate_uid('r'),
            'image_id': image_id,
            'kernel_id': kernel_id or '',
            'ramdisk_id': ramdisk_id or '',
            'state': 0,
            'state_description': 'scheduling',
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type_id': instance_type['id'],
            'memory_mb': instance_type['memory_mb'],
            'vcpus': instance_type['vcpus'],
            'local_gb': instance_type['local_gb'],
            'display_name': display_name,
            'display_description': display_description,
            'user_data': user_data or '',
            'key_name': key_name,
            'key_data': key_data,
            'locked': False,
            'metadata': metadata,
            'availability_zone': availability_zone,
            'os_type': os_type}
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(),
                            launch_index=num,
                            **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance['id']

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated,
                                                    instance_id,
                                                    security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if (not hasattr(instance, 'display_name') or
                    instance.display_name is None):
                updates['display_name'] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
                    " instance %(instance_id)s") % locals())
            rpc.cast(context,
                     FLAGS.scheduler_topic,
                     {"method": "run_instance",
                      "args": {"topic": FLAGS.compute_topic,
                               "instance_id": instance_id,
                               "availability_zone": availability_zone,
                               "injected_files": injected_files}})

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]
Example #39
0
 def new(self):
     """ returns an empty Instance object, with ID """
     instance_id = utils.generate_uid('i')
     return self.get(instance_id)
Example #40
0
    def _copy_instance(self, context, instance, new_name, launch=False,
                       new_user_data=None, security_groups=None, key_name=None,
                       launch_index=0, availability_zone=None):
        # (OmgLag): Basically we want to copy all of the information from
        # instance with provided instance into a new instance. This is because
        # we are basically "cloning" the vm as far as all the properties are
        # concerned.

        image_ref = instance.get('image_ref', '')
        if image_ref == '':
            image_ref = instance.get('image_id', '')


        system_metadata = {}
        for data in instance.get('system_metadata', []):
            # (dscannell) Do not copy over the system metadata that we setup
            # on an instance. This is important when doing clone-of-clones.
            if data['key'] not in ['blessed_from', 'launched_from']:
                system_metadata[data['key']] = data['value']

        metadata = {}
        # We need to record the launched_from / blessed_from in both the
        # metadata and system_metadata. It needs to be in the metadata so
        # that we can we can query the database to support list-blessed
        # and list-launched operations. It needs to be in the system
        # metadata so that the manager can access it.
        if launch:
            metadata['launched_from'] = '%s' % (instance['uuid'])
            system_metadata['launched_from'] = '%s' % (instance['uuid'])
        else:
            metadata['blessed_from'] = '%s' % (instance['uuid'])
            system_metadata['blessed_from'] = '%s' % (instance['uuid'])

        if key_name is None:
            key_name = instance.get('key_name', '')
            key_data = instance.get('key_data', '')
        else:
            key_pair = self.db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        if availability_zone is None:
            availability_zone = instance['availability_zone']

        instance_params = {
           'reservation_id': utils.generate_uid('r'),
           'image_ref': image_ref,
           'ramdisk_id': instance.get('ramdisk_id', ''),
           'kernel_id': instance.get('kernel_id', ''),
           'vm_state': vm_states.BUILDING,
           'user_id': context.user_id,
           'project_id': context.project_id,
           'launched_at': None,
           'instance_type_id': instance['instance_type_id'],
           'memory_mb': instance['memory_mb'],
           'vcpus': instance['vcpus'],
           'root_gb': instance['root_gb'],
           'ephemeral_gb': instance['ephemeral_gb'],
           'display_name': new_name,
           'hostname': utils.sanitize_hostname(new_name),
           'display_description': instance['display_description'],
           'user_data': new_user_data or '',
           'key_name': key_name,
           'key_data': key_data,
           'locked': False,
           'metadata': metadata,
           'availability_zone': availability_zone,
           'os_type': instance['os_type'],
           'host': None,
           'system_metadata': system_metadata,
           'launch_index': launch_index,
           'root_device_name': instance['root_device_name'],
           'power_state': power_state.NOSTATE,
           'vm_mode': instance['vm_mode'],
           'architecture': instance['architecture'],
           'access_ip_v4': instance['access_ip_v4'],
           'access_ip_v6': instance['access_ip_v6'],
           'config_drive': instance['config_drive'],
           'default_ephemeral_device': instance['default_ephemeral_device'],
           'default_swap_device': instance['default_swap_device'],
           'auto_disk_config': instance['auto_disk_config'],
           # Set disable_terminate on bless so terminate in nova-api barfs on a
           # blessed instance.
           'disable_terminate': not launch,
        }

        new_instance = instance_obj.Instance()
        new_instance.update(instance_params)
        if security_groups != None:
            self.sg_api.populate_security_groups(new_instance,
                                                 security_groups)
        new_instance.create(context)
        nw_info = instance['info_cache'].get('network_info')
        self.db.instance_info_cache_update(context, new_instance['uuid'],
                                           {'network_info': nw_info})

        # (dscannell) We need to reload the instance reference in order for it to be associated with
        # the database session of lazy-loading.
        new_instance = self.db.instance_get(context, new_instance.id)

        elevated = context.elevated()

        # Create a copy of all the block device mappings
        block_device_mappings =\
            self.db.block_device_mapping_get_all_by_instance(context,
                                                             instance['uuid'])
        block_device_mappings =\
            self._parse_block_device_mapping(block_device_mappings)
        for bdev in block_device_mappings:
            bdev['instance_uuid'] = new_instance['uuid']
            self.db.block_device_mapping_create(elevated, bdev, legacy=False)

        return new_instance
Example #41
0
File: api.py Project: ed-/reddwarf
    def create(
        self,
        context,
        instance_type,
        image_id,
        kernel_id=None,
        ramdisk_id=None,
        min_count=1,
        max_count=1,
        display_name="",
        display_description="",
        key_name=None,
        key_data=None,
        security_group="default",
        availability_zone=None,
        user_data=None,
        metadata={},
        injected_files=None,
        admin_password=None,
    ):
        """Create the number and type of instances requested.

        Verifies that quota and other arguments are valid.

        """
        if not instance_type:
            instance_type = instance_types.get_default_instance_type()

        num_instances = quota.allowed_instances(context, max_count, instance_type)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals())
            if num_instances <= 0:
                message = _("Instance quota exceeded. You cannot run any " "more instances of this type.")
            else:
                message = (
                    _("Instance quota exceeded. You can only run %s " "more instances of this type.") % num_instances
                )
            raise quota.QuotaError(message, "InstanceLimitExceeded")

        self._check_metadata_properties_quota(context, metadata)
        self._check_injected_file_quota(context, injected_files)

        image = self.image_service.show(context, image_id)

        os_type = None
        if "properties" in image and "os_type" in image["properties"]:
            os_type = image["properties"]["os_type"]

        if kernel_id is None:
            kernel_id = image["properties"].get("kernel_id", None)
        if ramdisk_id is None:
            ramdisk_id = image["properties"].get("ramdisk_id", None)
        # FIXME(sirp): is there a way we can remove null_kernel?
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ["default"]
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context, context.project_id, security_group_name)
            security_groups.append(group["id"])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair["public_key"]

        base_options = {
            "reservation_id": utils.generate_uid("r"),
            "image_id": image_id,
            "kernel_id": kernel_id or "",
            "ramdisk_id": ramdisk_id or "",
            "state": 0,
            "state_description": "scheduling",
            "user_id": context.user_id,
            "project_id": context.project_id,
            "launch_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
            "instance_type_id": instance_type["id"],
            "memory_mb": instance_type["memory_mb"],
            "vcpus": instance_type["vcpus"],
            "local_gb": instance_type["local_gb"],
            "display_name": display_name,
            "display_description": display_description,
            "user_data": user_data or "",
            "key_name": key_name,
            "key_data": key_data,
            "locked": False,
            "metadata": metadata,
            "availability_zone": availability_zone,
            "os_type": os_type,
        }
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance["id"]

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated, instance_id, security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if not hasattr(instance, "display_name") or instance.display_name is None:
                updates["display_name"] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals())

            # NOTE(sandy): For now we're just going to pass in the
            # instance_type record to the scheduler. In a later phase
            # we'll be ripping this whole for-loop out and deferring the
            # creation of the Instance record. At that point all this will
            # change.
            rpc.cast(
                context,
                FLAGS.scheduler_topic,
                {
                    "method": "run_instance",
                    "args": {
                        "topic": FLAGS.compute_topic,
                        "instance_id": instance_id,
                        "instance_type": instance_type,
                        "availability_zone": availability_zone,
                        "injected_files": injected_files,
                        "admin_password": admin_password,
                    },
                },
            )

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]
Example #42
0
File: api.py Project: yosh/nova
    def create(self, context, instance_type,
               image_id, kernel_id=None, ramdisk_id=None,
               min_count=1, max_count=1,
               display_name='', display_description='',
               key_name=None, key_data=None, security_group='default',
               availability_zone=None, user_data=None, metadata=[],
               onset_files=None):
        """Create the number of instances requested if quota and
        other arguments check out ok."""

        type_data = instance_types.get_instance_type(instance_type)
        num_instances = quota.allowed_instances(context, max_count, type_data)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(_("Quota exceeeded for %(pid)s,"
                    " tried to run %(min_count)s instances") % locals())
            raise quota.QuotaError(_("Instance quota exceeded. You can only "
                                     "run %s more instances of this type.") %
                                   num_instances, "InstanceLimitExceeded")

        num_metadata = len(metadata)
        quota_metadata = quota.allowed_metadata_items(context, num_metadata)
        if quota_metadata < num_metadata:
            pid = context.project_id
            msg = (_("Quota exceeeded for %(pid)s,"
                     " tried to set %(num_metadata)s metadata properties")
                   % locals())
            LOG.warn(msg)
            raise quota.QuotaError(msg, "MetadataLimitExceeded")

        # Because metadata is stored in the DB, we hard-code the size limits
        # In future, we may support more variable length strings, so we act
        #  as if this is quota-controlled for forwards compatibility
        for metadata_item in metadata:
            k = metadata_item['key']
            v = metadata_item['value']
            if len(k) > 255 or len(v) > 255:
                pid = context.project_id
                msg = (_("Quota exceeeded for %(pid)s,"
                         " metadata property key or value too long")
                       % locals())
                LOG.warn(msg)
                raise quota.QuotaError(msg, "MetadataLimitExceeded")

        image = self.image_service.show(context, image_id)
        if kernel_id is None:
            kernel_id = image.get('kernel_id', None)
        if ramdisk_id is None:
            ramdisk_id = image.get('ramdisk_id', None)
        # FIXME(sirp): is there a way we can remove null_kernel?
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" %
                       (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ['default']
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context,
                                                  context.project_id,
                                                  security_group_name)
            security_groups.append(group['id'])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        base_options = {
            'reservation_id': utils.generate_uid('r'),
            'image_id': image_id,
            'kernel_id': kernel_id or '',
            'ramdisk_id': ramdisk_id or '',
            'state_description': 'scheduling',
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type': instance_type,
            'memory_mb': type_data['memory_mb'],
            'vcpus': type_data['vcpus'],
            'local_gb': type_data['local_gb'],
            'display_name': display_name,
            'display_description': display_description,
            'user_data': user_data or '',
            'key_name': key_name,
            'key_data': key_data,
            'locked': False,
            'metadata': metadata,
            'availability_zone': availability_zone}
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(),
                            launch_index=num,
                            **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance['id']

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated,
                                                    instance_id,
                                                    security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if (not hasattr(instance, 'display_name') or
                    instance.display_name == None):
                updates['display_name'] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
                    " instance %(instance_id)s") % locals())
            rpc.cast(context,
                     FLAGS.scheduler_topic,
                     {"method": "run_instance",
                      "args": {"topic": FLAGS.compute_topic,
                               "instance_id": instance_id,
                               "availability_zone": availability_zone,
                               "onset_files": onset_files}})

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]
Example #43
0
    def _copy_instance(self,
                       context,
                       instance_uuid,
                       new_name,
                       launch=False,
                       new_user_data=None,
                       security_groups=None):
        # (dscannell): Basically we want to copy all of the information from
        # instance with id=instance_uuid into a new instance. This is because we
        # are basically "cloning" the vm as far as all the properties are
        # concerned.

        instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
        image_ref = instance_ref.get('image_ref', '')
        if image_ref == '':
            image_ref = instance_ref.get('image_id', '')

        if launch:
            metadata = {'launched_from': '%s' % (instance_ref['uuid'])}
        else:
            metadata = {'blessed_from': '%s' % (instance_ref['uuid'])}

        instance = {
            'reservation_id': utils.generate_uid('r'),
            'image_ref': image_ref,
            'vm_state': vm_states.BUILDING,
            'state_description': 'halted',
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launch_time': '',
            'instance_type_id': instance_ref['instance_type_id'],
            'memory_mb': instance_ref['memory_mb'],
            'vcpus': instance_ref['vcpus'],
            'root_gb': instance_ref['root_gb'],
            'ephemeral_gb': instance_ref['ephemeral_gb'],
            'display_name': new_name,
            'hostname': utils.sanitize_hostname(new_name),
            'display_description': instance_ref['display_description'],
            'user_data': new_user_data or '',
            'key_name': instance_ref.get('key_name', ''),
            'key_data': instance_ref.get('key_data', ''),
            'locked': False,
            'metadata': metadata,
            'availability_zone': instance_ref['availability_zone'],
            'os_type': instance_ref['os_type'],
            'host': None,
        }
        new_instance_ref = self.db.instance_create(context, instance)

        # (dscannell) We need to reload the instance_ref in order for it to be associated with
        # the database session of lazy-loading.
        new_instance_ref = self.db.instance_get(context, new_instance_ref.id)

        elevated = context.elevated()
        if security_groups == None:
            security_groups = self.db.security_group_get_by_instance(
                context, instance_ref['id'])
        for security_group in security_groups:
            self.db.instance_add_security_group(elevated,
                                                new_instance_ref['uuid'],
                                                security_group['id'])

        return new_instance_ref
Example #44
0
    def create(self,
               context,
               instance_type,
               image_id,
               kernel_id=None,
               ramdisk_id=None,
               min_count=1,
               max_count=1,
               display_name='',
               display_description='',
               key_name=None,
               key_data=None,
               security_group='default',
               availability_zone=None,
               user_data=None,
               metadata={},
               injected_files=None):
        """Create the number and type of instances requested.

        Verifies that quota and other arguments are valid.

        """
        if not instance_type:
            instance_type = instance_types.get_default_instance_type()

        num_instances = quota.allowed_instances(context, max_count,
                                                instance_type)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(
                _("Quota exceeeded for %(pid)s,"
                  " tried to run %(min_count)s instances") % locals())
            raise quota.QuotaError(
                _("Instance quota exceeded. You can only "
                  "run %s more instances of this type.") % num_instances,
                "InstanceLimitExceeded")

        self._check_metadata_properties_quota(context, metadata)
        self._check_injected_file_quota(context, injected_files)

        image = self.image_service.show(context, image_id)

        os_type = None
        if 'properties' in image and 'os_type' in image['properties']:
            os_type = image['properties']['os_type']

        if kernel_id is None:
            kernel_id = image['properties'].get('kernel_id', None)
        if ramdisk_id is None:
            ramdisk_id = image['properties'].get('ramdisk_id', None)
        # FIXME(sirp): is there a way we can remove null_kernel?
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ['default']
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context, context.project_id,
                                                  security_group_name)
            security_groups.append(group['id'])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        base_options = {
            'reservation_id': utils.generate_uid('r'),
            'image_id': image_id,
            'kernel_id': kernel_id or '',
            'ramdisk_id': ramdisk_id or '',
            'state': 0,
            'state_description': 'scheduling',
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type_id': instance_type['id'],
            'memory_mb': instance_type['memory_mb'],
            'vcpus': instance_type['vcpus'],
            'local_gb': instance_type['local_gb'],
            'display_name': display_name,
            'display_description': display_description,
            'user_data': user_data or '',
            'key_name': key_name,
            'key_data': key_data,
            'locked': False,
            'metadata': metadata,
            'availability_zone': availability_zone,
            'os_type': os_type
        }
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(),
                            launch_index=num,
                            **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance['id']

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated, instance_id,
                                                    security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if (not hasattr(instance, 'display_name')
                    or instance.display_name is None):
                updates['display_name'] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(
                _("Casting to scheduler for %(pid)s/%(uid)s's"
                  " instance %(instance_id)s") % locals())
            rpc.cast(
                context, FLAGS.scheduler_topic, {
                    "method": "run_instance",
                    "args": {
                        "topic": FLAGS.compute_topic,
                        "instance_id": instance_id,
                        "availability_zone": availability_zone,
                        "injected_files": injected_files
                    }
                })

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]
Example #45
0
    def create(
        self,
        context,
        instance_type,
        image_id,
        kernel_id=None,
        ramdisk_id=None,
        min_count=1,
        max_count=1,
        display_name="",
        display_description="",
        key_name=None,
        key_data=None,
        security_group="default",
        availability_zone=None,
        user_data=None,
        onset_files=None,
    ):
        """Create the number of instances requested if quota and
        other arguments check out ok.
        """
        type_data = instance_types.INSTANCE_TYPES[instance_type]
        num_instances = quota.allowed_instances(context, max_count, type_data)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals())
            raise quota.QuotaError(
                _("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances,
                "InstanceLimitExceeded",
            )

        image = self.image_service.show(context, image_id)
        if kernel_id is None:
            kernel_id = image.get("kernel_id", None)
        if ramdisk_id is None:
            ramdisk_id = image.get("ramdisk_id", None)
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ["default"]
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context, context.project_id, security_group_name)
            security_groups.append(group["id"])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair["public_key"]

        base_options = {
            "reservation_id": utils.generate_uid("r"),
            "image_id": image_id,
            "kernel_id": kernel_id or "",
            "ramdisk_id": ramdisk_id or "",
            "state_description": "scheduling",
            "user_id": context.user_id,
            "project_id": context.project_id,
            "launch_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
            "instance_type": instance_type,
            "memory_mb": type_data["memory_mb"],
            "vcpus": type_data["vcpus"],
            "local_gb": type_data["local_gb"],
            "display_name": display_name,
            "display_description": display_description,
            "user_data": user_data or "",
            "key_name": key_name,
            "key_data": key_data,
            "locked": False,
            "availability_zone": availability_zone,
        }
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance["id"]

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated, instance_id, security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if not hasattr(instance, "display_name") or instance.display_name == None:
                updates["display_name"] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals())
            rpc.cast(
                context,
                FLAGS.scheduler_topic,
                {
                    "method": "run_instance",
                    "args": {
                        "topic": FLAGS.compute_topic,
                        "instance_id": instance_id,
                        "availability_zone": availability_zone,
                        "onset_files": onset_files,
                    },
                },
            )

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]
Example #46
0
    def _copy_instance(self,
                       context,
                       instance,
                       new_name,
                       launch=False,
                       new_user_data=None,
                       security_groups=None,
                       key_name=None,
                       launch_index=0,
                       availability_zone=None):
        # (OmgLag): Basically we want to copy all of the information from
        # instance with provided instance into a new instance. This is because
        # we are basically "cloning" the vm as far as all the properties are
        # concerned.

        image_ref = instance.get('image_ref', '')
        if image_ref == '':
            image_ref = instance.get('image_id', '')

        system_metadata = {}
        for data in instance.get('system_metadata', []):
            # (dscannell) Do not copy over the system metadata that we setup
            # on an instance. This is important when doing clone-of-clones.
            if data['key'] not in ['blessed_from', 'launched_from']:
                system_metadata[data['key']] = data['value']

        metadata = {}
        # We need to record the launched_from / blessed_from in both the
        # metadata and system_metadata. It needs to be in the metadata so
        # that we can we can query the database to support list-blessed
        # and list-launched operations. It needs to be in the system
        # metadata so that the manager can access it.
        if launch:
            metadata['launched_from'] = '%s' % (instance['uuid'])
            system_metadata['launched_from'] = '%s' % (instance['uuid'])
        else:
            metadata['blessed_from'] = '%s' % (instance['uuid'])
            system_metadata['blessed_from'] = '%s' % (instance['uuid'])

        if key_name is None:
            key_name = instance.get('key_name', '')
            key_data = instance.get('key_data', '')
        else:
            key_pair = self.db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        if availability_zone is None:
            availability_zone = instance['availability_zone']

        instance_params = {
            'reservation_id': utils.generate_uid('r'),
            'image_ref': image_ref,
            'ramdisk_id': instance.get('ramdisk_id', ''),
            'kernel_id': instance.get('kernel_id', ''),
            'vm_state': vm_states.BUILDING,
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launched_at': None,
            'instance_type_id': instance['instance_type_id'],
            'memory_mb': instance['memory_mb'],
            'vcpus': instance['vcpus'],
            'root_gb': instance['root_gb'],
            'ephemeral_gb': instance['ephemeral_gb'],
            'display_name': new_name,
            'hostname': utils.sanitize_hostname(new_name),
            'display_description': instance['display_description'],
            'user_data': new_user_data or '',
            'key_name': key_name,
            'key_data': key_data,
            'locked': False,
            'metadata': metadata,
            'availability_zone': availability_zone,
            'os_type': instance['os_type'],
            'host': None,
            'system_metadata': system_metadata,
            'launch_index': launch_index,
            'root_device_name': instance['root_device_name'],
            'power_state': power_state.NOSTATE,
            'vm_mode': instance['vm_mode'],
            'architecture': instance['architecture'],
            'access_ip_v4': instance['access_ip_v4'],
            'access_ip_v6': instance['access_ip_v6'],
            'config_drive': instance['config_drive'],
            'default_ephemeral_device': instance['default_ephemeral_device'],
            'default_swap_device': instance['default_swap_device'],
            'auto_disk_config': instance['auto_disk_config'],
            # Set disable_terminate on bless so terminate in nova-api barfs on a
            # blessed instance.
            'disable_terminate': not launch,
        }

        new_instance = instance_obj.Instance()
        new_instance.update(instance_params)
        if security_groups != None:
            self.sg_api.populate_security_groups(new_instance, security_groups)
        new_instance.create(context)
        nw_info = instance['info_cache'].get('network_info')
        self.db.instance_info_cache_update(context, new_instance['uuid'],
                                           {'network_info': nw_info})

        # (dscannell) We need to reload the instance reference in order for it to be associated with
        # the database session of lazy-loading.
        new_instance = self.db.instance_get(context, new_instance.id)

        elevated = context.elevated()

        # Create a copy of all the block device mappings
        block_device_mappings =\
            self.db.block_device_mapping_get_all_by_instance(context,
                                                             instance['uuid'])
        block_device_mappings =\
            self._parse_block_device_mapping(block_device_mappings)
        for bdev in block_device_mappings:
            bdev['instance_uuid'] = new_instance['uuid']
            self.db.block_device_mapping_create(elevated, bdev, legacy=False)

        return new_instance