def _deploy_config_pre_start(self, deploy_config): """Deploy a config before container is started. Most configs can be deployed before the container is up. For configs require a reboot to take effective, they must be deployed in this function. @param deploy_config: Config to be deployed. """ if not lxc_utils.path_exists(deploy_config.source): return # Path to the target file relative to host. if deploy_config.append: target = os.path.join(self.tmp_append, os.path.basename(deploy_config.target)) else: target = os.path.join(self.container.rootfs, deploy_config.target[1:]) # Recursively copy files/folder to the target. `-L` to always follow # symbolic links in source. target_dir = os.path.dirname(target) if not lxc_utils.path_exists(target_dir): utils.run('sudo mkdir -p "%s"' % target_dir) source = deploy_config.source # Make sure the source ends with `/.` if it's a directory. Otherwise # command cp will not work. if os.path.isdir(source) and source[-1] != '.': source += '/.' if source[-1] != '/' else '.' utils.run('sudo cp -RL "%s" "%s"' % (source, target))
def __init__(self, container): """Initialize the deploy config manager. @param container: The container needs to deploy config. """ self.container = container # If shadow config is used, the deployment procedure will skip some # special handling of config file, e.g., # 1. Set enable_master_ssh to False in autotest shadow config. # 2. Set ssh logleve to ERROR for all hosts. self.is_shadow_config = os.path.exists(SSP_DEPLOY_SHADOW_CONFIG_FILE) config_file = (SSP_DEPLOY_SHADOW_CONFIG_FILE if self.is_shadow_config else SSP_DEPLOY_CONFIG_FILE) with open(config_file) as f: deploy_configs = json.load(f) self.deploy_configs = [ self.validate(c) for c in deploy_configs if 'append' in c ] self.mount_configs = [ self.validate_mount(c) for c in deploy_configs if 'mount' in c ] self.tmp_append = os.path.join(self.container.rootfs, APPEND_FOLDER) if lxc_utils.path_exists(self.tmp_append): utils.run('sudo rm -rf "%s"' % self.tmp_append) utils.run('sudo mkdir -p "%s"' % self.tmp_append)
def create_from_base(self, name, disable_snapshot_clone=False, force_cleanup=False): """Create a container from the base container. @param name: Name of the container. @param disable_snapshot_clone: Set to True to force to clone without using snapshot clone even if the host supports that. @param force_cleanup: Force to cleanup existing container. @return: A Container object for the created container. @raise ContainerError: If the container already exist. @raise error.CmdError: If lxc-clone call failed for any reason. """ if self.exist(name) and not force_cleanup: raise error.ContainerError('Container %s already exists.' % name) # Cleanup existing container with the given name. container_folder = os.path.join(self.container_path, name) if lxc_utils.path_exists(container_folder) and force_cleanup: container = Container(self.container_path, {'name': name}) try: container.destroy() except error.CmdError as e: # The container could be created in a incompleted state. Delete # the container folder instead. logging.warn('Failed to destroy container %s, error: %s', name, e) utils.run('sudo rm -rf "%s"' % container_folder) use_snapshot = SUPPORT_SNAPSHOT_CLONE and not disable_snapshot_clone snapshot = '-s' if use_snapshot else '' # overlayfs is the default clone backend storage. However it is not # supported in Ganeti yet. Use aufs as the alternative. aufs = '-B aufs' if utils.is_vm() and use_snapshot else '' cmd = ('sudo lxc-clone -p %s -P %s %s' % (self.container_path, self.container_path, ' '.join([BASE, name, snapshot, aufs]))) try: utils.run(cmd) return self.get(name) except error.CmdError: if not use_snapshot: raise else: # Snapshot clone failed, retry clone without snapshot. The retry # won't hit the code here and cause an infinite loop as # disable_snapshot_clone is set to True. container = self.create_from_base( name, disable_snapshot_clone=True, force_cleanup=True) # Report metadata about retry success. autotest_es.post(use_http=True, type_str=CONTAINER_CREATE_RETRY_METADB_TYPE, metadata={'drone': socket.gethostname(), 'name': name, 'success': True}) return container
def _get_container_info_moblab(container_path, **filters): """Get a collection of container information in the given container path in a Moblab. TODO(crbug.com/457496): remove this method once python 3 can be installed in Moblab and lxc-ls command can use python 3 code. When running in Moblab, lxc-ls behaves differently from a server with python 3 installed: 1. lxc-ls returns a list of containers installed under /etc/lxc, the default lxc container directory. 2. lxc-ls --active lists all active containers, regardless where the container is located. For such differences, we have to special case Moblab to make the behavior close to a server with python 3 installed. That is, 1. List only containers in a given folder. 2. Assume all active containers have state of RUNNING. @param container_path: Path to look for containers. @param filters: Key value to filter the containers, e.g., name='base' @return: A list of dictionaries that each dictionary has the information of a container. The keys are defined in ATTRIBUTES. """ info_collection = [] active_containers = utils.run('sudo lxc-ls --active').stdout.split() name_filter = filters.get('name', None) state_filter = filters.get('state', None) if filters and set(filters.keys()) - set(['name', 'state']): raise error.ContainerError('When running in Moblab, container list ' 'filter only supports name and state.') for name in os.listdir(container_path): # Skip all files and folders without rootfs subfolder. if (os.path.isfile(os.path.join(container_path, name)) or not lxc_utils.path_exists( os.path.join(container_path, name, 'rootfs'))): continue info = { 'name': name, 'state': 'RUNNING' if name in active_containers else 'STOPPED' } if ((name_filter and name_filter != info['name']) or (state_filter and state_filter != info['state'])): continue info_collection.append(info) return info_collection