def __init__(self, config, state, parent, prev_partition): super(PartitionNode, self).__init__(config['name'], state) self.base = config['base'] self.partitioning = parent self.prev_partition = prev_partition # filter out some MBR only options for clarity if self.partitioning.label == 'gpt': if 'flags' in config and 'primary' in config['flags']: raise BlockDeviceSetupException( "Primary flag not supported for GPT partitions") self.flags = set() if 'flags' in config: for f in config['flags']: if f == 'boot': self.flags.add(self.flag_boot) elif f == 'primary': self.flags.add(self.flag_primary) else: raise BlockDeviceSetupException("Unknown flag: %s" % f) if 'size' not in config: raise BlockDeviceSetupException("No size in partition" % self.name) self.size = config['size'] if self.partitioning.label == 'gpt': self.ptype = str(config['type']) if 'type' in config else '8300' elif self.partitioning.label == 'mbr': self.ptype = int(config['type'], 16) if 'type' in config else 0x83
def config_tree_to_graph(config): """Turn a YAML config into a graph config Our YAML config is a list of entries. Each Arguments: :parm config: YAML config; either graph or tree :return: graph-based result """ output = [] for entry in config: # Top-level entries should be a dictionary and have a plugin # registered for it if not isinstance(entry, dict): raise BlockDeviceSetupException("Config entry not a dict: %s" % entry) keys = list(entry.keys()) if len(keys) != 1: raise BlockDeviceSetupException( "Config entry top-level should be a single dict: %s" % entry) if not is_a_plugin(keys[0]): raise BlockDeviceSetupException( "Config entry is not a plugin value: %s" % entry) output.extend(recurse_config(entry)) return output
def __init__(self, config, defaults, state): super(Mount, self).__init__() if 'mount-base' not in defaults: raise BlockDeviceSetupException( "Mount default config needs 'mount-base'") self.node = MountPointNode(defaults['mount-base'], config, state) # save this new node to the global mount-point list and # re-order it to keep it in mount-order. Used in get_edges() # to ensure we build the mount graph in order # # note we can't just put the MountPointNode into the state, # because it's not json serialisable and we still dump the # state to json. that's why we have this (mount_point, name) # tuples and sorting trickery sorted_mount_points = state.get('sorted_mount_points', []) mount_points = [mp for mp, name in sorted_mount_points] if self.node.mount_point in mount_points: raise BlockDeviceSetupException( "Mount point [%s] specified more than once" % self.node.mount_point) sorted_mount_points.append((self.node.mount_point, self.node.name)) sorted_mount_points.sort(key=functools.cmp_to_key(cmp_mount_order)) # Save the state if it's new (otherwise this is idempotent update) state['sorted_mount_points'] = sorted_mount_points logger.debug("Ordered mounts now: %s", sorted_mount_points)
def __init__(self, config, state): logger.debug("Create filesystem object; config [%s]", config) super(FilesystemNode, self).__init__(config['name'], state) # Parameter check (mandatory) for pname in ['base', 'type']: if pname not in config: raise BlockDeviceSetupException("Mkfs config needs [%s]" % pname) setattr(self, pname, config[pname]) # Parameter check (optional) for pname in ['label', 'opts', 'uuid']: setattr(self, pname, config[pname] if pname in config else None) if self.label is None: self.label = self.name # Historic reasons - this will hopefully vanish in one of # the next major releases if self.label == "cloudimg-rootfs" and self.type == "xfs": logger.warning("Default label [cloudimg-rootfs] too long for xfs " "file system - using [img-rootfs] instead") self.label = "img-rootfs" # ensure we don't already have a fs with this label ... they # all must be unique. if 'fs_labels' in self.state: if self.label in self.state['fs_labels']: raise BlockDeviceSetupException( "File system label [%s] used more than once" % self.label) self.state['fs_labels'].append(self.label) else: self.state['fs_labels'] = [self.label] if self.type in file_system_max_label_length: if file_system_max_label_length[self.type] < len(self.label): raise BlockDeviceSetupException( "Label [{label}] too long for filesystem [{type}]: " "{len} > {max_len}".format( **{ 'label': self.label, 'type': self.type, 'len': len(self.label), 'max_len': file_system_max_label_length[self.type] })) else: logger.warning( "Length of label [%s] cannot be checked for " "filesystem [%s]: unknown max length", self.label, self.type) logger.warning("Continue - but this might lead to an error") if self.opts is not None: self.opts = self.opts.strip().split(' ') if self.uuid is None: self.uuid = str(uuid.uuid4()) logger.debug("Filesystem created [%s]", self)
def __init__(self, config, state): logger.debug("Create filesystem object; config [%s]", config) super(FilesystemNode, self).__init__(config['name'], state) # Parameter check (mandatory) for pname in ['base', 'type']: if pname not in config: raise BlockDeviceSetupException("Mkfs config needs [%s]" % pname) setattr(self, pname, config[pname]) # Parameter check (optional) for pname in ['label', 'opts', 'uuid']: setattr(self, pname, config[pname] if pname in config else None) if self.label is None: self.label = self.name # for fat/vfat, we use the label as an identifier for the disk # so we need that the label is converted to upper case if self.type in ('vfat', 'fat'): self.label = self.label.upper() # ensure we don't already have a fs with this label ... they # all must be unique. if 'fs_labels' in self.state: if self.label in self.state['fs_labels']: raise BlockDeviceSetupException( "File system label [%s] used more than once" % self.label) self.state['fs_labels'].append(self.label) else: self.state['fs_labels'] = [self.label] if self.type in file_system_max_label_length: if file_system_max_label_length[self.type] < len(self.label): raise BlockDeviceSetupException( "Label [{label}] too long for filesystem [{type}]: " "{len} > {max_len}".format( **{ 'label': self.label, 'type': self.type, 'len': len(self.label), 'max_len': file_system_max_label_length[self.type] })) else: logger.warning( "Length of label [%s] cannot be checked for " "filesystem [%s]: unknown max length", self.label, self.type) logger.warning("Continue - but this might lead to an error") if self.opts is not None: self.opts = self.opts.strip().split(' ') if self.uuid is None: self.uuid = str(uuid.uuid4()) logger.debug("Filesystem created [%s]", self)
def __init__(self, config, default_config, state): logger.debug("Creating Partitioning object; config [%s]", config) super(Partitioning, self).__init__() # Unlike other PluginBase we are somewhat persistent, as the # partition nodes call back to us (see create() below). We # need to keep this reference. self.state = state # Because using multiple partitions of one base is done # within one object, there is the need to store a flag if the # creation of the partitions was already done. self.already_created = False self.already_cleaned = False # Parameter check if 'base' not in config: raise BlockDeviceSetupException("Partitioning config needs 'base'") self.base = config['base'] if 'partitions' not in config: raise BlockDeviceSetupException( "Partitioning config needs 'partitions'") if 'label' not in config: raise BlockDeviceSetupException( "Partitioning config needs 'label'") self.label = config['label'] if self.label not in ("mbr", ): raise BlockDeviceSetupException("Label must be 'mbr'") # It is VERY important to get the alignment correct. If this # is not correct, the disk performance might be very poor. # Example: In some tests a 'off by one' leads to a write # performance of 30% compared to a correctly aligned # partition. # The problem for DIB is, that it cannot assume that the host # system uses the same IO sizes as the target system, # therefore here a fixed approach (as used in all modern # systems with large disks) is used. The partitions are # aligned to 1MiB (which are about 2048 times 512 bytes # blocks) self.align = 1024 * 1024 # 1MiB as default if 'align' in config: self.align = parse_abs_size_spec(config['align']) self.partitions = [] prev_partition = None for part_cfg in config['partitions']: np = PartitionNode(part_cfg, state, self, prev_partition) self.partitions.append(np) prev_partition = np
def exec_sudo(cmd): """Run a command under sudo Run command under sudo, with debug trace of output. This is like subprocess.check_call() but sudo wrapped and with output tracing at debug levels. Arguments: :param cmd: str command list; for Popen() :return: the stdout+stderror of the called command :raises BlockDeviceSetupException: if return code != 0. Exception values similar to ``subprocess.CalledProcessError`` * ``returncode`` : returncode of child * ``cmd`` : the command run * ``output`` : stdout+stderr output """ assert isinstance(cmd, list) sudo_cmd = ["sudo"] sudo_cmd.extend(cmd) try: logger.info("Calling [%s]", " ".join(sudo_cmd)) except TypeError: # Popen actually doesn't care, but we've managed to get mixed # str and bytes in argument lists which causes errors logging # commands. Give a clue as to what's going on. logger.exception("Ensure all arguments are str type!") raise proc = subprocess.Popen(sudo_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = "" with proc.stdout: for line in iter(proc.stdout.readline, b''): line = line.decode(encoding=locale.getpreferredencoding(False), errors='backslashreplace') out += line logger.debug("exec_sudo: %s", line.rstrip()) proc.wait() if proc.returncode: e = BlockDeviceSetupException("exec_sudo failed") e.returncode = proc.returncode e.cmd = ' '.join(sudo_cmd) e.output = out raise e return out
def get_node(self): global mount_points if self.mount_point in mount_points: raise BlockDeviceSetupException( "Mount point [%s] specified more than once" % self.mount_point) logger.debug("Insert node [%s]", self) mount_points[self.mount_point] = self return self
def __init__(self, config, defaults): super(Mount, self).__init__() if 'mount-base' not in defaults: raise BlockDeviceSetupException( "Mount default config needs 'mount-base'") self.node = MountPointNode(defaults['mount-base'], config) # save this new node to the global mount-point list and # re-order it. global sorted_mount_points mount_points = [x.mount_point for x in sorted_mount_points] if self.node.mount_point in mount_points: raise BlockDeviceSetupException( "Mount point [%s] specified more than once" % self.node.mount_point) sorted_mount_points.append(self.node) sorted_mount_points.sort() logger.debug("Ordered mounts now: %s", sorted_mount_points)
def __init__(self, mount_base, config, state): super(MountPointNode, self).__init__(config['name'], state) # Parameter check self.mount_base = mount_base for pname in ['base', 'mount_point']: if pname not in config: raise BlockDeviceSetupException( "MountPoint config needs [%s]" % pname) setattr(self, pname, config[pname]) logger.debug("MountPoint created [%s]", self)
def __init__(self, config, defaults): super(Mount, self).__init__() self.mount_points = {} if 'mount-base' not in defaults: raise BlockDeviceSetupException( "Mount default config needs 'mount-base'") self.mount_base = defaults['mount-base'] mp = MountPointNode(self.mount_base, config) self.mount_points[mp.get_name()] = mp
def __init__(self, config, parent, prev_partition): super(PartitionNode, self).__init__(config['name']) self.base = config['base'] self.partitioning = parent self.prev_partition = prev_partition self.flags = set() if 'flags' in config: for f in config['flags']: if f == 'boot': self.flags.add(self.flag_boot) elif f == 'primary': self.flags.add(self.flag_primary) else: raise BlockDeviceSetupException("Unknown flag: %s" % f) if 'size' not in config: raise BlockDeviceSetupException("No size in partition" % self.name) self.size = config['size'] self.ptype = int(config['type'], 16) if 'type' in config else 0x83
def _loopdev_attach(filename): logger.info("loopdev attach") logger.debug("Calling [sudo losetup --show -f %s]", filename) subp = subprocess.Popen(["sudo", "losetup", "--show", "-f", filename], stdout=subprocess.PIPE) rval = subp.wait() if rval == 0: # [:-1]: Cut of the newline block_device = subp.stdout.read()[:-1].decode("utf-8") logger.info("New block device [%s]", block_device) return block_device else: logger.error("losetup failed") raise BlockDeviceSetupException("losetup failed")
def __init__(self, filename=None): """Initialise state :param filename: if :param:`filename` is passed and exists, it will be loaded as the state. If it does not exist an exception is raised. If :param:`filename` is not passed, state will be initalised to a blank dictionary. """ if filename: if not os.path.exists(filename): raise BlockDeviceSetupException("State dump not found") else: self.state = _load_json(filename) assert self.state is not None else: self.state = {}
def cmd_delete(self): """Cleanup all remaining relicts - in case of an error""" # Deleting must be done in reverse order try: call_order = pickle.load(open(self.node_pickle_file_name, 'rb')) except IOError: raise BlockDeviceSetupException("Pickle file not found") reverse_order = reversed(call_order) for node in reverse_order: node.delete() logger.info("Removing temporary state dir [%s]", self.state_dir) shutil.rmtree(self.state_dir) return 0
def _config_error(self, msg): raise BlockDeviceSetupException(msg)
def create_graph(config, default_config, state): """Generate configuration digraph Generate the configuration digraph from the config :param config: graph configuration file :param default_config: default parameters (from --params) :param state: reference to global state dictionary. Passed to :func:`PluginBase.__init__` :return: tuple with the graph object (a :class:`nx.Digraph`), ordered list of :class:`NodeBase` objects """ # This is the directed graph of nodes: each parse method must # add the appropriate nodes and edges. dg = nx.DiGraph() # check about dg.nodes, to support different networkx versions if hasattr(dg.nodes, '__iter__'): dg_nodes = dg.nodes else: dg_nodes = dg.node for config_entry in config: # this should have been checked by generate_config assert len(config_entry) == 1 logger.debug("Config entry [%s]", config_entry) cfg_obj_name = list(config_entry.keys())[0] cfg_obj_val = config_entry[cfg_obj_name] # Instantiate a "plugin" object, passing it the # configuration entry # XXX : would a "factory" pattern for plugins, where we # make a method call on an object stevedore has instantiated # be better here? if not is_a_plugin(cfg_obj_name): raise BlockDeviceSetupException( ("Config element [%s] is not implemented" % cfg_obj_name)) plugin = _extensions[cfg_obj_name].plugin assert issubclass(plugin, PluginBase) cfg_obj = plugin(cfg_obj_val, default_config, state) # Ask the plugin for the nodes it would like to insert # into the graph. Some plugins, such as partitioning, # return multiple nodes from one config entry. nodes = cfg_obj.get_nodes() assert isinstance(nodes, list) for node in nodes: # plugins should return nodes... assert isinstance(node, NodeBase) # ensure node names are unique. networkx by default # just appends the attribute to the node dict for # existing nodes, which is not what we want. if node.name in dg_nodes: raise BlockDeviceSetupException("Duplicate node name: %s" % (node.name)) logger.debug("Adding %s : %s", node.name, node) dg.add_node(node.name, obj=node) # Now find edges for name, attr in dg.nodes(data=True): obj = attr['obj'] # Unfortunately, we can not determine node edges just from # the configuration file. It's not always simply the # "base:" pointer. So ask nodes for a list of nodes they # want to point to. *mostly* it's just base: ... but # mounting is different. # edges_from are the nodes that point to us # edges_to are the nodes we point to edges_from, edges_to = obj.get_edges() logger.debug("Edges for %s: f:%s t:%s", name, edges_from, edges_to) for edge_from in edges_from: if edge_from not in dg_nodes: raise BlockDeviceSetupException("Edge not defined: %s->%s" % (edge_from, name)) dg.add_edge(edge_from, name) for edge_to in edges_to: if edge_to not in dg_nodes: raise BlockDeviceSetupException("Edge not defined: %s->%s" % (name, edge_to)) dg.add_edge(name, edge_to) # this can be quite helpful debugging but needs pydotplus which # isn't in requirements. for debugging, do # .tox/py27/bin/pip install pydotplus # DUMP_CONFIG_GRAPH=1 tox -e py27 -- specific_test # dotty /tmp/graph_dump.dot # to see helpful output if 'DUMP_CONFIG_GRAPH' in os.environ: nx.nx_pydot.write_dot(dg, '/tmp/graph_dump.dot') # Topological sort (i.e. create a linear array that satisfies # dependencies) and return the object list call_order_nodes = list(nx.topological_sort(dg)) logger.debug("Call order: %s", call_order_nodes) call_order = [dg_nodes[n]['obj'] for n in call_order_nodes] return dg, call_order