def _start_container(self, container_id, instance, network_info=None): self.docker.start(container_id) if not network_info: return timeout = CONF.vif_plugging_timeout if (utils.is_neutron() and timeout): events = self._get_neutron_events(network_info) else: events = [] try: with self.virtapi.wait_for_instance_event( instance, events, deadline=timeout, error_callback=self._neutron_failed_callback): self.plug_vifs(instance, network_info) self._attach_vifs(instance, network_info) except eventlet.timeout.Timeout: LOG.warn(_LW('Timeout waiting for vif plugging callback for ' 'instance %(uuid)s'), {'uuid': instance['name']}) if CONF.vif_plugging_is_fatal: self.docker.kill(container_id) self.docker.remove_container(container_id, force=True) raise exception.InstanceDeployFailure( 'Timeout waiting for vif plugging', instance_id=instance['name']) except (Exception) as e: LOG.warning(_('Cannot setup network: %s'), e, instance=instance, exc_info=True) msg = _('Cannot setup network: {0}') self.docker.kill(container_id) self.docker.remove_container(container_id, force=True) raise exception.InstanceDeployFailure(msg.format(e), instance_id=instance['name'])
def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): container_id = self._find_container_by_name(instance['name']).get('id') if not container_id: return if not self.docker.stop_container(container_id): LOG.warning(_('Cannot stop the container, ' 'please check docker logs')) return try: network.teardown_network(container_id) self.unplug_vifs(instance, network_info) except Exception: LOG.debug('Cannot destroy the container network during reboot') return if not self.docker.start_container(container_id): LOG.warning(_('Cannot restart the container, ' 'please check docker logs')) return try: self.plug_vifs(instance, network_info) except Exception as e: LOG.warning(_('Cannot setup network on reboot: {0}').format(e)) return
def _pull_missing_image(self, context, image_meta, instance): msg = 'Image name "%s" does not exist, fetching it...' LOG.debug(msg, image_meta['name']) # TODO(imain): It would be nice to do this with file like object # passing but that seems a bit complex right now. snapshot_directory = CONF.docker.snapshots_directory fileutils.ensure_tree(snapshot_directory) with utils.tempdir(dir=snapshot_directory) as tmpdir: try: out_path = os.path.join(tmpdir, uuid.uuid4().hex) images.fetch(context, image_meta['id'], out_path, instance['user_id'], instance['project_id']) self.docker.load_repository_file( self._encode_utf8(image_meta['name']), out_path ) except Exception as e: LOG.warning(_('Cannot load repository file: %s'), e, instance=instance, exc_info=True) msg = _('Cannot load repository file: {0}') raise exception.NovaException(msg.format(e), instance_id=image_meta['name']) return self.docker.inspect_image(self._encode_utf8(image_meta['name']))
def plug(self, instance, vif): vif_type = vif['type'] LOG.debug( 'plug vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s', { 'vif_type': vif_type, 'instance': instance, 'vif': vif }) if vif_type is None: raise exception.NovaException( _("vif_type parameter must be present " "for this vif_driver implementation")) if vif_type == network_model.VIF_TYPE_BRIDGE: self.plug_bridge(instance, vif) elif vif_type == network_model.VIF_TYPE_OVS: if self.ovs_hybrid_required(vif): self.plug_ovs_hybrid(instance, vif) else: self.plug_ovs(instance, vif) elif vif_type == network_model.VIF_TYPE_MIDONET: self.plug_midonet(instance, vif) elif vif_type == network_model.VIF_TYPE_IOVISOR: self.plug_iovisor(instance, vif) elif vif_type == 'hyperv': self.plug_windows(instance, vif) else: raise exception.NovaException( _("Unexpected vif_type=%s") % vif_type)
def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): container_id = self._get_container_id(instance) if not container_id: return self._stop(container_id, instance) try: network.teardown_network(container_id) if network_info: self.unplug_vifs(instance, network_info) except Exception as e: LOG.warning(_('Cannot destroy the container network' ' during reboot {0}').format(e), exc_info=True) return binds = self._get_key_binds(container_id, instance) dns = self._extract_dns_entries(network_info) self.docker.start(container_id, binds=binds, dns=dns) try: if network_info: self.plug_vifs(instance, network_info) self._attach_vifs(instance, network_info) except Exception as e: LOG.warning(_('Cannot setup network on reboot: {0}'), e, exc_info=True) return
def power_on(self, context, instance, network_info, block_device_info=None): container_id = self._get_container_id(instance) if not container_id: return binds = self._get_key_binds(container_id, instance) dns = self._extract_dns_entries(network_info) self.docker.start(container_id, binds=binds, dns=dns) if not network_info: return try: self.plug_vifs(instance, network_info) self._attach_vifs(instance, network_info) except Exception as e: LOG.debug(_('Cannot setup network: %s'), e, instance=instance, exc_info=True) msg = _('Cannot setup network: {0}') self.docker.kill(container_id) self.docker.remove_container(container_id, force=True) raise exception.InstanceDeployFailure(msg.format(e), instance_id=instance['name'])
def plug(self, instance, vif): vif_type = vif['type'] LOG.debug('plug vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s', {'vif_type': vif_type, 'instance': instance, 'vif': vif}) if vif_type is None: raise exception.NovaException( _("vif_type parameter must be present " "for this vif_driver implementation")) if vif_type == network_model.VIF_TYPE_BRIDGE: self.plug_bridge(instance, vif) elif vif_type == network_model.VIF_TYPE_OVS: if self.ovs_hybrid_required(vif): self.plug_ovs_hybrid(instance, vif) else: self.plug_ovs(instance, vif) elif vif_type == network_model.VIF_TYPE_MIDONET: self.plug_midonet(instance, vif) elif vif_type == network_model.VIF_TYPE_IOVISOR: self.plug_iovisor(instance, vif) elif vif_type == 'hyperv': self.plug_windows(instance, vif) else: raise exception.NovaException( _("Unexpected vif_type=%s") % vif_type)
def snapshot(self, context, instance, image_href, update_task_state): container_id = self._get_container_id(instance) if not container_id: raise exception.InstanceNotRunning(instance_id=instance['uuid']) update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) (image_service, image_id) = glance.get_remote_image_service(context, image_href) image = image_service.show(context, image_id) if ':' not in image['name']: commit_name = self._encode_utf8(image['name']) tag = 'latest' else: parts = self._encode_utf8(image['name']).rsplit(':', 1) commit_name = parts[0] tag = parts[1] self.docker.commit(container_id, repository=commit_name, tag=tag) update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) metadata = { 'is_public': False, 'status': 'active', 'disk_format': 'raw', 'container_format': 'docker', 'name': image['name'], 'properties': { 'image_location': 'snapshot', 'image_state': 'available', 'status': 'available', 'owner_id': instance['project_id'], 'ramdisk_id': instance['ramdisk_id'] } } if instance['os_type']: metadata['properties']['os_type'] = instance['os_type'] try: raw = self.docker.get_image(commit_name) # Patch the seek/tell as urllib3 throws UnsupportedOperation raw.seek = lambda x=None, y=None: None raw.tell = lambda: None image_service.update(context, image_href, metadata, raw) except Exception as e: LOG.debug(_('Error saving image: %s'), e, instance=instance, exc_info=True) msg = _('Error saving image: {0}') raise exception.NovaException(msg.format(e), instance_id=instance['name'])
def snapshot(self, context, instance, image_href, update_task_state): container_id = self._get_container_id(instance) if not container_id: raise exception.InstanceNotRunning(instance_id=instance['uuid']) update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) (image_service, image_id) = glance.get_remote_image_service( context, image_href) image = image_service.show(context, image_id) if ':' not in image['name']: commit_name = self._encode_utf8(image['name']) tag = 'latest' else: parts = self._encode_utf8(image['name']).rsplit(':', 1) commit_name = parts[0] tag = parts[1] self.docker.commit(container_id, repository=commit_name, tag=tag) update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) metadata = { 'is_public': False, 'status': 'active', 'disk_format': 'raw', 'container_format': 'docker', 'name': image['name'], 'properties': { 'image_location': 'snapshot', 'image_state': 'available', 'status': 'available', 'owner_id': instance['project_id'], 'ramdisk_id': instance['ramdisk_id'] } } if instance['os_type']: metadata['properties']['os_type'] = instance['os_type'] try: raw = self.docker.get_image(commit_name) # Patch the seek/tell as urllib3 throws UnsupportedOperation raw.seek = lambda x=None, y=None: None raw.tell = lambda: None image_service.update(context, image_href, metadata, raw) except Exception as e: LOG.debug(_('Error saving image: %s'), e, instance=instance, exc_info=True) msg = _('Error saving image: {0}') raise exception.NovaException(msg.format(e), instance_id=instance['name'])
def _pull_missing_image(self, context, image_meta, instance): msg = 'Image name "%s" does not exist, fetching it...' LOG.debug(msg, image_meta.name) shared_directory = CONF.docker.shared_directory if (shared_directory and os.path.exists( os.path.join(shared_directory, image_meta.id))): LOG.debug('Found %s in shared_directory', image_meta.id) try: LOG.debug('Loading repository file into docker %s', self._encode_utf8(image_meta.name)) self.docker.load_repository_file( self._encode_utf8(image_meta.name), os.path.join(shared_directory, image_meta.id)) return self.docker.inspect_image( self._encode_utf8(image_meta.name)) except Exception as e: # If failed to load image from shared_directory, continue # to download the image from glance then load. LOG.warning(_('Cannot load repository file from shared ' 'directory: %s'), e, instance=instance, exc_info=True) # TODO(imain): It would be nice to do this with file like object # passing but that seems a bit complex right now. snapshot_directory = CONF.docker.snapshots_directory fileutils.ensure_tree(snapshot_directory) with utils.tempdir(dir=snapshot_directory) as tmpdir: try: out_path = os.path.join(tmpdir, uuid.uuid4().hex) LOG.debug('Fetching image with id %s from glance', image_meta.id) images.fetch(context, image_meta.id, out_path, instance['user_id'], instance['project_id']) LOG.debug('Loading repository file into docker %s', self._encode_utf8(image_meta.name)) self.docker.load_repository_file( self._encode_utf8(image_meta.name), out_path) return self.docker.inspect_image( self._encode_utf8(image_meta.name)) except Exception as e: LOG.warning(_('Cannot load repository file: %s'), e, instance=instance, exc_info=True) msg = _('Cannot load repository file: {0}') raise exception.NovaException(msg.format(e), instance_id=image_meta.name)
def _pull_missing_image(self, context, image_meta, instance): msg = 'Image name "%s" does not exist, fetching it...' LOG.debug(msg, image_meta.name) shared_directory = CONF.docker.shared_directory if (shared_directory and os.path.exists(os.path.join(shared_directory, image_meta.id))): LOG.debug('Found %s in shared_directory', image_meta.id) try: LOG.debug('Loading repository file into docker %s', self._encode_utf8(image_meta.name)) self.docker.load_repository_file( self._encode_utf8(image_meta.name), os.path.join(shared_directory, image_meta.id)) return self.docker.inspect_image( self._encode_utf8(image_meta.name)) except Exception as e: # If failed to load image from shared_directory, continue # to download the image from glance then load. LOG.warning(_('Cannot load repository file from shared ' 'directory: %s'), e, instance=instance, exc_info=True) # TODO(imain): It would be nice to do this with file like object # passing but that seems a bit complex right now. snapshot_directory = CONF.docker.snapshots_directory fileutils.ensure_tree(snapshot_directory) with utils.tempdir(dir=snapshot_directory) as tmpdir: try: out_path = os.path.join(tmpdir, uuid.uuid4().hex) LOG.debug('Fetching image with id %s from glance', image_meta.id) images.fetch(context, image_meta.id, out_path, instance['user_id'], instance['project_id']) LOG.debug('Loading repository file into docker %s', self._encode_utf8(image_meta.name)) self.docker.load_repository_file( self._encode_utf8(image_meta.name), out_path ) return self.docker.inspect_image( self._encode_utf8(image_meta.name)) except Exception as e: LOG.warning(_('Cannot load repository file: %s'), e, instance=instance, exc_info=True) msg = _('Cannot load repository file: {0}') raise exception.NovaException(msg.format(e), instance_id=image_meta.name)
def unpause(self, instance): """Unpause paused VM instance. :param instance: nova.objects.instance.Instance """ try: cont_id = self._get_container_id(instance) if not self.docker.unpause(cont_id): raise exception.NovaException except Exception as e: LOG.debug(_('Error unpause container: %s'), e, instance=instance, exc_info=True) msg = _('Cannot unpause container: {0}') raise exception.NovaException(msg.format(e), instance_id=instance['name'])
def plug_ovs(self, instance, vif): LOG.info('8888888888888') if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] bridge = vif['network']['bridge'] # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: LOG.info('7777777aaaa') LOG.info('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) linux_net.create_ovs_vif_port(bridge, if_local_name, network.get_ovs_interfaceid(vif), vif['address'], instance['uuid']) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) except Exception: LOG.exception("Failed to configure network") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def plug(self, instance, vif): vif_type = vif['type'] LOG.debug('Plug vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s', {'vif_type': vif_type, 'instance': instance, 'vif': vif}) if_local_name = 'veth%s' % vif['id'][:8] if_remote_name = 'ns%s' % vif['id'][:8] # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: utils.execute('ip', 'link', 'add', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) undo_mgr.undo_with(lambda: utils.execute( 'ip', 'link', 'delete', if_local_name, run_as_root=True)) utils.execute('ip', 'link', 'set', if_remote_name, 'address', vif['address'], run_as_root=True) except Exception: LOG.exception("Failed to configure network") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def plug_iovisor(self, instance, vif): """Plug docker vif into IOvisor Creates a port on IOvisor and onboards the interface """ if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] iface_id = vif['id'] net_id = vif['network']['id'] tenant_id = instance['project_id'] # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) utils.execute('ifc_ctl', 'gateway', 'add_port', if_local_name, run_as_root=True) utils.execute('ifc_ctl', 'gateway', 'ifup', if_local_name, 'access_vm', vif['network']['label'] + "_" + iface_id, vif['address'], 'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id, run_as_root=True) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) except Exception: LOG.exception("Failed to configure network on IOvisor") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def plug_midonet(self, instance, vif): """Plug into MidoNet's network port This accomplishes binding of the vif to a MidoNet virtual port """ LOG.info('2222') if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] port_id = network.get_ovs_interfaceid(vif) # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) undo_mgr.undo_with(lambda: utils.execute( 'ip', 'link', 'delete', if_local_name, run_as_root=True)) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) utils.execute('mm-ctl', '--bind-port', port_id, if_local_name, run_as_root=True) except Exception: LOG.exception("Failed to configure network") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def unplug_ovs(self, instance, vif): """Unplug the VIF by deleting the port from the bridge.""" try: linux_net.delete_ovs_vif_port(vif['network']['bridge'], vif['devname']) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance)
def get_available_resource(self, nodename): if not hasattr(self, '_nodename'): self._nodename = nodename if nodename != self._nodename: LOG.error(_('Hostname has changed from %(old)s to %(new)s. ' 'A restart is required to take effect.' ), {'old': self._nodename, 'new': nodename}) memory = hostinfo.get_memory_usage() disk = hostinfo.get_disk_usage() stats = { 'vcpus': hostinfo.get_total_vcpus(), 'vcpus_used': hostinfo.get_vcpus_used(self.list_instances(True)), 'memory_mb': memory['total'] / units.Mi, 'memory_mb_used': memory['used'] / units.Mi, 'local_gb': disk['total'] / units.Gi, 'local_gb_used': disk['used'] / units.Gi, 'disk_available_least': disk['available'] / units.Gi, 'hypervisor_type': 'docker', 'hypervisor_version': versionutils.convert_version_to_int('1.0'), 'hypervisor_hostname': self._nodename, 'cpu_info': '?', 'numa_topology': None, 'supported_instances': [ (arch.I686, hv_type.DOCKER, vm_mode.EXE), (arch.X86_64, hv_type.DOCKER, vm_mode.EXE) ] } return stats
def _attach_vifs(self, instance, network_info): """Plug VIFs into container.""" if not network_info: return container_id = self._get_container_id(instance) if not container_id: return netns_path = '/var/run/netns' if not os.path.exists(netns_path): utils.execute( 'mkdir', '-p', netns_path, run_as_root=True) nspid = self._find_container_pid(container_id) if not nspid: msg = _('Cannot find any PID under container "{0}"') raise RuntimeError(msg.format(container_id)) netns_path = os.path.join(netns_path, container_id) utils.execute( 'ln', '-sf', '/proc/{0}/ns/net'.format(nspid), '/var/run/netns/{0}'.format(container_id), run_as_root=True) utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link', 'set', 'lo', 'up', run_as_root=True) for vif in network_info: self.vif_driver.attach(instance, vif, container_id)
def _attach_vifs(self, instance, network_info): """Plug VIFs into container.""" if not network_info: return container_id = self._get_container_id(instance) if not container_id: return netns_path = '/var/run/netns' if not os.path.exists(netns_path): utils.execute('mkdir', '-p', netns_path, run_as_root=True) nspid = self._find_container_pid(container_id) if not nspid: msg = _('Cannot find any PID under container "{0}"') raise RuntimeError(msg.format(container_id)) netns_path = os.path.join(netns_path, container_id) utils.execute('ln', '-sf', '/proc/{0}/ns/net'.format(nspid), '/var/run/netns/{0}'.format(container_id), run_as_root=True) utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link', 'set', 'lo', 'up', run_as_root=True) for vif in network_info: self.vif_driver.attach(instance, vif, container_id)
def _get_image_name(self, context, instance, image): fmt = image['container_format'] if fmt != 'docker': msg = _('Image container format not supported ({0})') raise exception.InstanceDeployFailure(msg.format(fmt), instance_id=instance['name']) return image['name']
def plug_midonet(self, instance, vif): """Plug into MidoNet's network port This accomplishes binding of the vif to a MidoNet virtual port """ if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] port_id = network.get_ovs_interfaceid(vif) # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) undo_mgr.undo_with(lambda: utils.execute( 'ip', 'link', 'delete', if_local_name, run_as_root=True)) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) utils.execute('mm-ctl', '--bind-port', port_id, if_local_name, run_as_root=True) except Exception: LOG.exception("Failed to configure network") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None, flavor=None): image_name = self._get_image_name(context, instance, image_meta) args = { 'hostname': instance['name'], 'mem_limit': self._get_memory_limit_bytes(instance), 'cpu_shares': self._get_cpu_shares(instance), 'network_disabled': True, } try: image = self.docker.inspect_image(self._encode_utf8(image_name)) except errors.APIError: image = None if not image: image = self._pull_missing_image(context, image_meta, instance) # Glance command-line overrides any set in the Docker image if (image_meta and image_meta.get('properties', {}).get('os_command_line')): args['command'] = image_meta['properties'].get('os_command_line') if 'metadata' in instance: args['environment'] = nova_utils.instance_meta(instance) container_id = self._create_container(instance, image_name, args) if not container_id: raise exception.InstanceDeployFailure( _('Cannot create container'), instance_id=instance['name']) self._start_container(container_id, instance, network_info)
def get_available_resource(self, nodename): if not hasattr(self, '_nodename'): self._nodename = nodename if nodename != self._nodename: LOG.error(_('Hostname has changed from %(old)s to %(new)s. ' 'A restart is required to take effect.' ), {'old': self._nodename, 'new': nodename}) memory = hostinfo.get_memory_usage() disk = hostinfo.get_disk_usage() stats = { 'vcpus': hostinfo.get_total_vcpus(), 'vcpus_used': hostinfo.get_vcpus_used(self.list_instances(True)), 'memory_mb': memory['total'] / units.Mi, 'memory_mb_used': memory['used'] / units.Mi, 'local_gb': disk['total'] / units.Gi, 'local_gb_used': disk['used'] / units.Gi, 'disk_available_least': disk['available'] / units.Gi, 'hypervisor_type': 'docker', 'hypervisor_version': utils.convert_version_to_int('1.0'), 'hypervisor_hostname': self._nodename, 'cpu_info': '?', 'supported_instances': jsonutils.dumps([ (arch.I686, hv_type.DOCKER, vm_mode.EXE), (arch.X86_64, hv_type.DOCKER, vm_mode.EXE) ]) } return stats
def plug_ovs(self, instance, vif): if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] bridge = vif['network']['bridge'] # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) linux_net.create_ovs_vif_port(bridge, if_local_name, network.get_ovs_interfaceid(vif), vif['address'], instance['uuid']) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) except Exception: LOG.exception("Failed to configure network") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): image_name = self._get_image_name(context, instance, image_meta) args = { 'Hostname': instance['name'], 'Image': image_name, 'Memory': self._get_memory_limit_bytes(instance), 'CpuShares': self._get_cpu_shares(instance), 'NetworkDisabled': True, } image = self.docker.inspect_image(image_name) if not image: image = self._pull_missing_image(context, image_meta, instance) if not (image and image['ContainerConfig']['Cmd']): args['Cmd'] = ['sh'] # Glance command-line overrides any set in the Docker image if (image_meta and image_meta.get('properties', {}).get('os_command_line')): args['Cmd'] = image_meta['properties'].get('os_command_line') container_id = self._create_container(instance, args) if not container_id: raise exception.InstanceDeployFailure( _('Cannot create container'), instance_id=instance['name']) self._start_container(instance, network_info)
def get_available_resource(self, nodename): if not hasattr(self, '_nodename'): self._nodename = nodename if nodename != self._nodename: LOG.error(_('Hostname has changed from %(old)s to %(new)s. ' 'A restart is required to take effect.' ) % {'old': self._nodename, 'new': nodename}) memory = hostinfo.get_memory_usage() disk = hostinfo.get_disk_usage() stats = { 'vcpus': 1, 'vcpus_used': 0, 'memory_mb': memory['total'] / units.Mi, 'memory_mb_used': memory['used'] / units.Mi, 'local_gb': disk['total'] / units.Gi, 'local_gb_used': disk['used'] / units.Gi, 'disk_available_least': disk['available'] / units.Gi, 'hypervisor_type': 'docker', 'hypervisor_version': utils.convert_version_to_int('1.0'), 'hypervisor_hostname': self._nodename, 'cpu_info': '?', 'supported_instances': jsonutils.dumps([ ('i686', 'docker', 'lxc'), ('x86_64', 'docker', 'lxc') ]) } return stats
def unplug_ovs_hybrid(self, instance, vif): """UnPlug using hybrid strategy Unhook port from OVS, unhook port from bridge, delete bridge, and delete both veth devices. """ try: br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) if linux_net.device_exists(br_name): utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True) utils.execute('ip', 'link', 'set', br_name, 'down', run_as_root=True) utils.execute('brctl', 'delbr', br_name, run_as_root=True) linux_net.delete_ovs_vif_port(self.get_bridge_name(vif), v2_name) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None, flavor=None, image_name=None): if not image_name: image_name = self._get_image_name(context, instance, image_meta) ips = self._extract_ips_entries(network_info) ips_addr = '-'.join(ips[0].split('.')) LOG.info('ooooooooo:%s:%s', (ips, ips_addr)) dns = self._extract_dns_entries(network_info) #'hostname': instance['name'], args = { 'hostname': 'docker%s' % ips_addr, 'mem_limit': self._get_memory_limit_bytes(instance), 'cpu_shares': self._get_cpu_shares(instance), 'network_disabled': True, 'dns': dns, 'extra_hosts': ["docker%s:%s" % (ips_addr, ips[0])], 'binds': ["/data/docker/%s:/data" % ips_addr, "/tmp/%s:/tmp" % ips_addr], } try: LOG.info('IIIIIIIIIIIIIIIIIIIIIIIIMAGE name %s' % image_name) image = self.docker.inspect_image(self._encode_utf8(image_name)) except errors.APIError: image = None if not image: image = self._pull_missing_image1(context, image_meta, instance) # Glance command-line overrides any set in the Docker image if (image_meta is not None and image_meta.properties.get("os_command_line") is not None): args['command'] = image_meta.properties.get("os_command_line") if 'metadata' in instance: args['environment'] = nova_utils.instance_meta(instance) container_id = self._create_container(instance, image_name, args) ###container create finish### if not container_id: raise exception.InstanceDeployFailure(_('Cannot create container'), instance_id=instance['name']) self._start_container(container_id, instance, network_info)
def find_fixed_ip(instance, network_info): for subnet in network_info['subnets']: netmask = subnet['cidr'].split('/')[1] for ip in subnet['ips']: if ip['type'] == 'fixed' and ip['address']: return ip['address'] + "/" + netmask raise exception.InstanceDeployFailure(_('Cannot find fixed ip'), instance_id=instance['uuid'])
def unplug(self, instance, vif): try: self._vrouter_client.delete_port(vif['id']) except Exception: LOG.exception(_("Delete port failed"), instance=instance) if_local_name = 'veth%s' % vif['id'][:8] utils.execute('ip', 'link', 'delete', if_local_name, run_as_root=True)
def find_dhcp(instance, network_info): for subnet in network_info['subnets']: if subnet['meta'].get('dhcp_server'): return subnet['meta']['dhcp_server'] else: return subnet['gateway']['address'] raise exception.InstanceDeployFailure(_('Cannot find dhcp'), instance_id=instance['uuid'])
def find_fixed_ip(instance, subnet): try: netmask = subnet['cidr'].split('/')[1] for ip in subnet['ips']: if ip['type'] == 'fixed' and ip['address']: return ip['address'] + "/" + netmask except Exception, e: raise exception.InstanceDeployFailure(_('Cannot find fixed ip'), instance_id=instance['uuid'])
def _start_container(self, container_id, instance, network_info=None): binds = self._get_key_binds(container_id, instance) dns = self._extract_dns_entries(network_info) self.docker.start(container_id, binds=binds, dns=dns) if not network_info: return try: self.plug_vifs(instance, network_info) self._attach_vifs(instance, network_info) except Exception as e: LOG.warning(_('Cannot setup network: %s'), e, instance=instance, exc_info=True) msg = _('Cannot setup network: {0}') self.docker.kill(container_id) self.docker.remove_container(container_id, force=True) raise exception.InstanceDeployFailure(msg.format(e), instance_id=instance['name'])
def unplug_midonet(self, instance, vif): """Unplug into MidoNet's network port This accomplishes unbinding of the vif from its MidoNet virtual port """ try: utils.execute('mm-ctl', '--unbind-port', network.get_ovs_interfaceid(vif), run_as_root=True) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance)
def _stop(self, container_id, instance, timeout=5): try: self.docker.stop(container_id, max(timeout, 5)) except errors.APIError as e: if 'Unpause the container before stopping' not in e.explanation: LOG.warning(_('Cannot stop container: %s'), e, instance=instance, exc_info=True) raise self.docker.unpause(container_id) self.docker.stop(container_id, timeout)
def teardown_network(container_id): try: output, err = utils.execute('ip', '-o', 'netns', 'list') for line in output.split('\n'): if container_id == line.strip(): utils.execute('ip', 'netns', 'delete', container_id, run_as_root=True) break except processutils.ProcessExecutionError: LOG.warning(_('Cannot remove network namespace, netns id: %s'), container_id)
def plug(self, instance, vif): vif_type = vif['type'] LOG.debug('plug vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s', {'vif_type': vif_type, 'instance': instance, 'vif': vif}) if vif_type is None: raise exception.NovaException( _("vif_type parameter must be present " "for this vif_driver implementation")) if vif_type == network_model.VIF_TYPE_BRIDGE: self.plug_bridge(instance, vif) elif vif_type == network_model.VIF_TYPE_OVS: self.plug_ovs(instance, vif) else: raise exception.NovaException( _("Unexpected vif_type=%s") % vif_type)
def unpause(self, instance): """Unpause paused VM instance. :param instance: nova.objects.instance.Instance """ try: cont_id = self._find_container_by_name(instance['name']).get('id') if not self.docker.unpause_container(cont_id): raise exception.NovaException except Exception as e: msg = _('Cannot unpause container: {0}') raise exception.NovaException(msg.format(e), instance_id=instance['name'])
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None, flavor=None, image_name=None): if not image_name: image_name = self._get_image_name(context, instance, image_meta) ips=self._extract_ips_entries(network_info) ips_addr='-'.join(ips[0].split('.')) LOG.info('ooooooooo:%s:%s',(ips,ips_addr)) dns=self._extract_dns_entries(network_info) #'hostname': instance['name'], args = { 'hostname': 'docker%s' % ips_addr, 'mem_limit': self._get_memory_limit_bytes(instance), 'cpu_shares': self._get_cpu_shares(instance), 'network_disabled': True, 'dns': dns, 'extra_hosts': ["docker%s:%s" % (ips_addr,ips[0]) ], 'binds': [ "/data/docker/%s:/data" % ips_addr, "/tmp/%s:/tmp" % ips_addr ], } try: LOG.info('IIIIIIIIIIIIIIIIIIIIIIIIMAGE name %s' % image_name) image = self.docker.inspect_image(self._encode_utf8(image_name)) except errors.APIError: image = None if not image: image = self._pull_missing_image1(context, image_meta, instance) # Glance command-line overrides any set in the Docker image if (image_meta is not None and image_meta.properties.get("os_command_line") is not None): args['command'] = image_meta.properties.get("os_command_line") if 'metadata' in instance: args['environment'] = nova_utils.instance_meta(instance) container_id = self._create_container(instance, image_name, args) ###container create finish### if not container_id: raise exception.InstanceDeployFailure( _('Cannot create container'), instance_id=instance['name']) self._start_container(container_id, instance, network_info)
def plug_bridge(self, instance, vif): if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] bridge = vif['network']['bridge'] gateway = network.find_gateway(instance, vif['network']) net = vif['network'] if net.get_meta('should_create_vlan', False): vlan = net.get_meta('vlan'), iface = (CONF.vlan_interface or vif['network'].get_meta('bridge_interface')) linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge( vlan, bridge, iface, net_attrs=vif, mtu=vif.get('mtu')) iface = 'vlan%s' % vlan else: iface = (CONF.flat_interface or vif['network'].get_meta('bridge_interface')) LOG.debug('Ensuring bridge for %s - %s' % (iface, bridge)) linux_net.LinuxBridgeInterfaceDriver.ensure_bridge( bridge, iface, net_attrs=vif, gateway=gateway) # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) undo_mgr.undo_with(lambda: utils.execute( 'ip', 'link', 'delete', if_local_name, run_as_root=True)) # NOTE(samalba): Deleting the interface will delete all # associated resources (remove from the bridge, its pair, etc...) utils.execute('ip', 'link', 'set', if_local_name, 'address', self._fe_random_mac(), run_as_root=True) utils.execute('brctl', 'addif', bridge, if_local_name, run_as_root=True) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) except Exception: LOG.exception("Failed to configure network") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def plug_bridge(self, instance, vif): LOG.info('5555') if_local_name = 'tap%s' % vif['id'][:11] if_remote_name = 'ns%s' % vif['id'][:11] bridge = vif['network']['bridge'] gateway = network.find_gateway(instance, vif['network']) vlan = vif.get('vlan') if vlan is not None: iface = (CONF.vlan_interface or vif['network'].get_meta('bridge_interface')) linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge( vlan, bridge, iface, net_attrs=vif, mtu=vif.get('mtu')) iface = 'vlan%s' % vlan else: iface = (CONF.flat_interface or vif['network'].get_meta('bridge_interface')) LOG.debug('Ensuring bridge for %s - %s' % (iface, bridge)) linux_net.LinuxBridgeInterfaceDriver.ensure_bridge( bridge, iface, net_attrs=vif, gateway=gateway) # Device already exists so return. if linux_net.device_exists(if_local_name): return undo_mgr = utils.UndoManager() try: utils.execute('ip', 'link', 'add', 'name', if_local_name, 'type', 'veth', 'peer', 'name', if_remote_name, run_as_root=True) undo_mgr.undo_with(lambda: utils.execute( 'ip', 'link', 'delete', if_local_name, run_as_root=True)) # NOTE(samalba): Deleting the interface will delete all # associated resources (remove from the bridge, its pair, etc...) utils.execute('ip', 'link', 'set', if_local_name, 'address', self._fe_random_mac(), run_as_root=True) utils.execute('brctl', 'addif', bridge, if_local_name, run_as_root=True) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) except Exception: LOG.exception("Failed to configure network") msg = _('Failed to setup the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _start_container(self, container_id, instance, network_info=None): binds = self._get_key_binds(container_id, instance) dns = self._extract_dns_entries(network_info) ips=self._extract_ips_entries(network_info) self.docker.start(container_id, binds=binds, dns=dns, privileged=CONF.docker.privileged) if not network_info: return timeout = CONF.vif_plugging_timeout if (utils.is_neutron() and timeout): events = self._get_neutron_events(network_info) else: events = [] try: with self.virtapi.wait_for_instance_event( instance, events, deadline=timeout, error_callback=self._neutron_failed_callback): self.plug_vifs(instance, network_info) self._attach_vifs(instance, network_info) except eventlet.timeout.Timeout: LOG.warn(_LW('Timeout waiting for vif plugging callback for ' 'instance %(uuid)s'), {'uuid': instance['name']}) if CONF.vif_plugging_is_fatal: self.docker.kill(container_id) self.docker.remove_container(container_id, force=True) raise exception.InstanceDeployFailure( 'Timeout waiting for vif plugging', instance_id=instance['name']) except (Exception) as e: LOG.warning(_('Cannot setup network: %s'), e, instance=instance, exc_info=True) msg = _('Cannot setup network: {0}') self.docker.kill(container_id) self.docker.remove_container(container_id, force=True) raise exception.InstanceDeployFailure(msg.format(e), instance_id=instance['name'])
def _start_container(self, container_id, instance, network_info=None): binds = self._get_key_binds(container_id, instance) dns = self._extract_dns_entries(network_info) self.docker.start(container_id, binds=binds, dns=dns, privileged=CONF.docker.privileged) if not network_info: return try: self.plug_vifs(instance, network_info) self._attach_vifs(instance, network_info) except Exception as e: LOG.warning(_('Cannot setup network: %s'), e, instance=instance, exc_info=True) msg = _('Cannot setup network: {0}') self.docker.kill(container_id) self.docker.remove_container(container_id, force=True) raise exception.InstanceDeployFailure(msg.format(e), instance_id=instance['name'])
def get_available_resource(self, nodename): if not hasattr(self, '_nodename'): self._nodename = nodename if nodename != self._nodename: LOG.error( _('Hostname has changed from %(old)s to %(new)s. ' 'A restart is required to take effect.'), { 'old': self._nodename, 'new': nodename }) memory = hostinfo.get_memory_usage() disk = hostinfo.get_disk_usage() stats = { 'vcpus': hostinfo.get_total_vcpus(), 'vcpus_used': hostinfo.get_vcpus_used(self.list_instances(True)), 'memory_mb': memory['total'] / units.Mi, 'memory_mb_used': memory['used'] / units.Mi, 'local_gb': disk['total'] / units.Gi, 'local_gb_used': disk['used'] / units.Gi, 'disk_available_least': disk['available'] / units.Gi, 'hypervisor_type': 'docker', 'hypervisor_version': versionutils.convert_version_to_int('1.0'), 'hypervisor_hostname': self._nodename, 'cpu_info': '?', 'numa_topology': None, # zy: pay attention that if you do not define supported_instance,you won't start an container from this node # zy: add new architectures 'supported_instances': [(arch.I686, hv_type.DOCKER, vm_mode.EXE), (arch.X86_64, hv_type.DOCKER, vm_mode.EXE), (arch.ARMV7, hv_type.DOCKER, vm_mode.EXE), (arch.AARCH64, hv_type.DOCKER, vm_mode.EXE), (arch.PPC64LE, hv_type.DOCKER, vm_mode.EXE)] } return stats
def unplug(self, instance, vif): vif_type = vif['type'] if_local_name = 'veth%s' % vif['id'][:8] LOG.debug('Unplug vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s', {'vif_type': vif_type, 'instance': instance, 'vif': vif}) try: self._vrouter_client.delete_port(vif['id']) if linux_net.device_exists(if_local_name): utils.execute('ip', 'link', 'delete', if_local_name, run_as_root=True) except Exception: LOG.exception(_("Delete port failed"), instance=instance)
def unplug_iovisor(self, instance, vif): """Unplug vif from IOvisor Offboard an interface and deletes port from IOvisor """ if_local_name = 'tap%s' % vif['id'][:11] iface_id = vif['id'] try: utils.execute('ifc_ctl', 'gateway', 'ifdown', if_local_name, 'access_vm', vif['network']['label'] + "_" + iface_id, vif['address'], run_as_root=True) utils.execute('ifc_ctl', 'gateway', 'del_port', if_local_name, run_as_root=True) linux_net.delete_net_dev(if_local_name) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance)
def init_host(self, host): if self._is_daemon_running() is False: raise exception.NovaException( _('Docker daemon is not running or is not reachable' ' (check the rights on /var/run/docker.sock)'))
def attach(self, instance, vif, container_id): vif_type = vif['type'] LOG.debug( 'Attach vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s', { 'vif_type': vif_type, 'instance': instance, 'vif': vif }) if_local_name = 'veth%s' % vif['id'][:8] if_remote_name = 'ns%s' % vif['id'][:8] undo_mgr = utils.UndoManager() undo_mgr.undo_with(lambda: utils.execute( 'ip', 'link', 'delete', if_local_name, run_as_root=True)) ipv4_address = '0.0.0.0' ipv6_address = None if 'subnets' in vif['network']: subnets = vif['network']['subnets'] for subnet in subnets: ips = subnet['ips'][0] if (ips['version'] == 4): if ips['address'] is not None: ipv4_address = ips['address'] ipv4_netmask = subnet['cidr'].split('/')[1] ipv4_gateway = subnet['gateway']['address'] if (ips['version'] == 6): if ips['address'] is not None: ipv6_address = ips['address'] ipv6_netmask = subnet['cidr'].split('/')[1] ipv6_gateway = subnet['gateway']['address'] params = { 'ip_address': ipv4_address, 'vn_id': vif['network']['id'], 'display_name': instance['display_name'], 'hostname': instance['hostname'], 'host': instance['host'], 'vm_project_id': instance['project_id'], 'port_type': 'NovaVMPort', 'ip6_address': ipv6_address, } try: utils.execute('ip', 'link', 'set', if_remote_name, 'netns', container_id, run_as_root=True) result = self._vrouter_client.add_port(instance['uuid'], vif['id'], if_local_name, vif['address'], **params) if not result: # follow the exception path raise RuntimeError('add_port returned %s' % str(result)) utils.execute('ip', 'link', 'set', if_local_name, 'up', run_as_root=True) except Exception: LOG.exception("Failed to attach the network") msg = _('Failed to attach the network, rolling back') undo_mgr.rollback_and_reraise(msg=msg, instance=instance) try: utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link', 'set', if_remote_name, 'address', vif['address'], run_as_root=True) if ipv6_address: ip = ipv6_address + "/" + ipv6_netmask gateway = ipv6_gateway utils.execute('ip', 'netns', 'exec', container_id, 'ifconfig', if_remote_name, 'inet6', 'add', ip, run_as_root=True) utils.execute('ip', 'netns', 'exec', container_id, 'ip', '-6', 'route', 'replace', 'default', 'via', gateway, 'dev', if_remote_name, run_as_root=True) if ipv4_address != '0.0.0.0': ip = ipv4_address + "/" + ipv4_netmask gateway = ipv4_gateway utils.execute('ip', 'netns', 'exec', container_id, 'ifconfig', if_remote_name, ip, run_as_root=True) utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'route', 'replace', 'default', 'via', gateway, 'dev', if_remote_name, run_as_root=True) utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link', 'set', if_remote_name, 'up', run_as_root=True) except Exception: LOG.exception(_("Failed to attach vif"), instance=instance)