def test_get_instance_availability_zone_default_value(self): """Test get right availability zone by given an instance.""" fake_inst_id = 162 fake_inst = fakes.stub_instance(fake_inst_id, host=self.host) self.assertEqual(self.default_az, az.get_instance_availability_zone(self.context, fake_inst))
def check_attach(self, context, volume, instance=None): # TODO(vish): abstract status checking? if volume['status'] != "available": msg = _("volume '%(vol)s' status must be 'available'. Currently " "in '%(status)s'") % {'vol': volume['id'], 'status': volume['status']} raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": msg = _("volume %s already attached") % volume['id'] raise exception.InvalidVolume(reason=msg) if instance and not CONF.cinder.cross_az_attach: # NOTE(sorrison): If instance is on a host we match against it's AZ # else we check the intended AZ if instance.get('host'): instance_az = az.get_instance_availability_zone( context, instance) else: instance_az = instance['availability_zone'] if instance_az != volume['availability_zone']: msg = _("Instance %(instance)s and volume %(vol)s are not in " "the same availability_zone. Instance is in " "%(ins_zone)s. Volume is in %(vol_zone)s") % { "instance": instance['id'], "vol": volume['id'], 'ins_zone': instance_az, 'vol_zone': volume['availability_zone']} raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume, instance=None): # TODO(vish): abstract status checking? if volume['status'] != "available": msg = _("volume '%(vol)s' status must be 'available'. Currently " "in '%(status)s'") % { 'vol': volume['id'], 'status': volume['status'] } raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": msg = _("volume %s already attached") % volume['id'] raise exception.InvalidVolume(reason=msg) if instance and not CONF.cinder.cross_az_attach: # NOTE(sorrison): If instance is on a host we match against it's AZ # else we check the intended AZ if instance.get('host'): instance_az = az.get_instance_availability_zone( context, instance) else: instance_az = instance['availability_zone'] if instance_az != volume['availability_zone']: msg = _("Instance %(instance)s and volume %(vol)s are not in " "the same availability_zone. Instance is in " "%(ins_zone)s. Volume is in %(vol_zone)s") % { "instance": instance['id'], "vol": volume['id'], 'ins_zone': instance_az, 'vol_zone': volume['availability_zone'] } raise exception.InvalidVolume(reason=msg)
def _extend_server(self, context, server, instance): key = "%s:availability_zone" % PREFIX az = avail_zone.get_instance_availability_zone(context, instance) if not az and instance.get('availability_zone'): # Likely hasn't reached a viable compute node yet so give back the # desired availability_zone that *may* exist in the instance # record itself. az = instance.availability_zone server[key] = az
def test_get_instance_availability_zone_from_aggregate(self): """Test get availability zone from aggregate by given an instance.""" host = 'host170' service = self._create_service_with_topic('compute', host) self._add_to_aggregate(service, self.agg) fake_inst_id = 174 fake_inst = fakes.stub_instance(fake_inst_id, host=host) self.assertEqual(self.availability_zone, az.get_instance_availability_zone(self.context, fake_inst))
def __init__(self, instance, address=None, content=None, extra_md=None, conductor_api=None, network_info=None, vd_driver=None): """Creation of this object should basically cover all time consuming collection. Methods after that should not cause time delays due to network operations or lengthy cpu operations. The user should then get a single instance and make multiple method calls on it. """ if not content: content = [] ctxt = context.get_admin_context() # The default value of mimeType is set to MIME_TYPE_TEXT_PLAIN self.set_mimetype(MIME_TYPE_TEXT_PLAIN) self.instance = instance self.extra_md = extra_md if conductor_api: capi = conductor_api else: capi = conductor.API() self.availability_zone = az.get_instance_availability_zone(ctxt, instance) self.security_groups = objects.SecurityGroupList.get_by_instance( ctxt, instance) self.mappings = _format_instance_mapping(ctxt, instance) if instance.user_data is not None: self.userdata_raw = base64.b64decode(instance.user_data) else: self.userdata_raw = None self.ec2_ids = capi.get_ec2_ids(ctxt, obj_base.obj_to_primitive(instance)) self.address = address # expose instance metadata. self.launch_metadata = utils.instance_meta(instance) self.password = password.extract_password(instance) self.uuid = instance.uuid self.content = {} self.files = [] # get network info, and the rendered network template if network_info is None: network_info = instance.info_cache.network_info self.ip_info = \ ec2utils.get_ip_info_for_instance_from_nw_info(network_info) self.network_config = None cfg = netutils.get_injected_network_template(network_info) if cfg: key = "%04i" % len(self.content) self.content[key] = cfg self.network_config = {"name": "network_config", 'content_path': "/%s/%s" % (CONTENT_DIR, key)} # 'content' is passed in from the configdrive code in # patron/virt/libvirt/driver.py. That's how we get the injected files # (personalities) in. AFAIK they're not stored in the db at all, # so are not available later (web service metadata time). for (path, contents) in content: key = "%04i" % len(self.content) self.files.append({'path': path, 'content_path': "/%s/%s" % (CONTENT_DIR, key)}) self.content[key] = contents if vd_driver is None: vdclass = importutils.import_class(CONF.vendordata_driver) else: vdclass = vd_driver self.vddriver = vdclass(instance=instance, address=address, extra_md=extra_md, network_info=network_info) self.route_configuration = None