def _dumpPartitionTable(targetDir, backupUser, backupGroup, compress=True): """ Dumps information about the partition table via ``fdisk``. Args: targetDir: Directory to write output file into backupUser: User which should own the resulting file backupGroup: Group which should own the resulting file compress: Indicates whether to compress the output file Raises: IOError: If the dump fails for some reason """ if not os.path.exists(FDISK_PATH): logger.info( "Not executing partition table dump since %s doesn't seem to exist.", FDISK_PATH) elif not os.access(FDISK_PATH, os.X_OK): logger.info( "Not executing partition table dump since %s cannot be executed.", FDISK_PATH) else: (outputFile, filename) = _getOutputFile(targetDir, "fdisk-l", compress) with outputFile: command = resolveCommand(FDISK_COMMAND) result = executeCommand(command, [], returnOutput=False, ignoreStderr=True, outputFile=outputFile)[0] if result != 0: raise IOError("Error [%d] executing partition table dump." % result) if not os.path.exists(filename): raise IOError( "File [%s] does not seem to exist after partition table dump finished." % filename) changeOwnership(filename, backupUser, backupGroup)
def _encryptFileWithGpg(sourcePath, recipient): """ Encrypts the indicated source file using GPG. The encrypted file will be in GPG's binary output format and will have the same name as the source file plus a ``".gpg"`` extension. The source file will not be modified or removed by this function call. Args: sourcePath: Absolute path of file to be encrypted recipient: Recipient name to be passed to GPG's ``"-r"`` option Returns: Path to the newly-created encrypted file Raises: IOError: If there is a problem encrypting the file """ encryptedPath = "%s.gpg" % sourcePath command = resolveCommand(GPG_COMMAND) args = ["--batch", "--yes", "-e", "-r", recipient, "-o", encryptedPath, sourcePath] result = executeCommand(command, args)[0] if result != 0: raise IOError("Error [%d] calling gpg to encrypt [%s]." % (result, sourcePath)) if not os.path.exists(encryptedPath): raise IOError("After call to [%s], encrypted file [%s] does not exist." % (command, encryptedPath)) logger.debug("Completed encrypting file [%s] to [%s].", sourcePath, encryptedPath) return encryptedPath
def _dumpFilesystemContents(targetDir, backupUser, backupGroup, compress=True): """ Dumps complete listing of filesystem contents via ``ls -laR``. Args: targetDir: Directory to write output file into backupUser: User which should own the resulting file backupGroup: Group which should own the resulting file compress: Indicates whether to compress the output file Raises: IOError: If the dump fails for some reason """ (outputFile, filename) = _getOutputFile(targetDir, "ls-laR", compress) with outputFile: # Note: can't count on return status from 'ls', so we don't check it. command = resolveCommand(LS_COMMAND) executeCommand(command, [], returnOutput=False, ignoreStderr=True, doNotLog=True, outputFile=outputFile) if not os.path.exists(filename): raise IOError( "File [%s] does not seem to exist after filesystem contents dump finished." % filename) changeOwnership(filename, backupUser, backupGroup)
def _getEstimatedSize(self, entries): """ Returns the estimated size (in bytes) for the passed-in entries dictionary. Returns: Estimated size of the image, in bytes Raises: IOError: If there is a problem calling ``mkisofs`` """ args = self._buildSizeArgs(entries) command = resolveCommand(MKISOFS_COMMAND) (result, output) = executeCommand(command, args, returnOutput=True, ignoreStderr=True) if result != 0: raise IOError( "Error (%d) executing mkisofs command to estimate size." % result) if len(output) != 1: raise IOError("Unable to parse mkisofs output.") try: sectors = float(output[0]) size = convertSize(sectors, UNIT_SECTORS, UNIT_BYTES) return size except: raise IOError("Unable to parse mkisofs output.")
def _writeImage(self, newDisc, imagePath, entries, mediaLabel=None): """ Writes an image to disc using either an entries list or an ISO image on disk. Callers are assumed to have done validation on paths, etc. before calling this method. Args: newDisc: Indicates whether the disc should be re-initialized imagePath: Path to an ISO image on disk, or c{None} to use ``entries`` entries: Mapping from path to graft point, or ``None`` to use ``imagePath`` Raises: IOError: If the media could not be written to for some reason """ command = resolveCommand(GROWISOFS_COMMAND) args = DvdWriter._buildWriteArgs(newDisc, self.hardwareId, self._driveSpeed, imagePath, entries, mediaLabel, dryRun=False) (result, output) = executeCommand(command, args, returnOutput=True) if result != 0: DvdWriter._searchForOverburn( output) # throws own exception if overburn condition is found raise IOError("Error (%d) executing command to write disc." % result) self.refreshMedia()
def backupDatabase(user, backupFile, database=None): """ Backs up an individual PostgreSQL database, or all databases. This function backs up either a named local PostgreSQL database or all local PostgreSQL databases, using the passed in user for connectivity. This is *always* a full backup. There is no facility for incremental backups. The backup data will be written into the passed-in back file. Normally, this would be an object as returned from ``open``, but it is possible to use something like a ``GzipFile`` to write compressed output. The caller is responsible for closing the passed-in backup file. *Note:* Typically, you would use the ``root`` user to back up all databases. Args: user (String representing PostgreSQL username): User to use for connecting to the database backupFile (Python file object as from ``open`` or ``file``): File use for writing backup database (String representing database name, or ``None`` for all databases): Name of the database to be backed up Raises: ValueError: If some value is missing or invalid IOError: If there is a problem executing the PostgreSQL dump """ args = [] if user is not None: args.append("-U") args.append(user) if database is None: command = resolveCommand(POSTGRESQLDUMPALL_COMMAND) else: command = resolveCommand(POSTGRESQLDUMP_COMMAND) args.append(database) result = executeCommand(command, args, returnOutput=False, ignoreStderr=True, doNotLog=True, outputFile=backupFile)[0] if result != 0: if database is None: raise IOError("Error [%d] executing PostgreSQL database dump for all databases." % result) else: raise IOError("Error [%d] executing PostgreSQL database dump for database [%s]." % (result, database))
def unlockTray(self): """ Unlocks the device's tray via 'eject -i off'. Raises: IOError: If there is an error talking to the device """ command = resolveCommand(EJECT_COMMAND) args = ["-i", "off", self.device] result = executeCommand(command, args)[0] if result != 0: raise IOError( "Error (%d) executing eject command to unlock tray." % result)
def _confirmGpgRecipient(recipient): """ Confirms that a recipient's public key is known to GPG. Throws an exception if there is a problem, or returns normally otherwise. Args: recipient: Recipient name Raises: IOError: If the recipient's public key is not known to GPG """ command = resolveCommand(GPG_COMMAND) args = ["--batch", "-k", recipient] # should use --with-colons if the output will be parsed result = executeCommand(command, args)[0] if result != 0: raise IOError("GPG unable to find public key for [%s]." % recipient)
def openTray(self): """ Opens the device's tray and leaves it open. This only works if the device has a tray and supports ejecting its media. We have no way to know if the tray is currently open or closed, so we just send the appropriate command and hope for the best. If the device does not have a tray or does not support ejecting its media, then we do nothing. Starting with Debian wheezy on my backup hardware, I started seeing consistent problems with the eject command. I couldn't tell whether these problems were due to the device management system or to the new kernel (3.2.0). Initially, I saw simple eject failures, possibly because I was opening and closing the tray too quickly. I worked around that behavior with the new ejectDelay flag. Later, I sometimes ran into issues after writing an image to a disc: eject would give errors like "unable to eject, last error: Inappropriate ioctl for device". Various sources online (like Ubuntu bug #875543) suggested that the drive was being locked somehow, and that the workaround was to run 'eject -i off' to unlock it. Sure enough, that fixed the problem for me, so now it's a normal error-handling strategy. Raises: IOError: If there is an error talking to the device """ if self._deviceHasTray and self._deviceCanEject: command = resolveCommand(EJECT_COMMAND) args = [self.device] result = executeCommand(command, args)[0] if result != 0: logger.debug( "Eject failed; attempting kludge of unlocking the tray before retrying." ) self.unlockTray() result = executeCommand(command, args)[0] if result != 0: raise IOError( "Error (%d) executing eject command to open tray (failed even after unlocking tray)." % result) logger.debug("Kludge was apparently successful.") if self.ejectDelay is not None: logger.debug( "Per configuration, sleeping %d seconds after opening tray.", self.ejectDelay) time.sleep(self.ejectDelay)
def _retrieveSectorsUsed(self): """ Retrieves the number of sectors used on the current media. This is a little ugly. We need to call growisofs in "dry-run" mode and parse some information from its output. However, to do that, we need to create a dummy file that we can pass to the command -- and we have to make sure to remove it later. Once growisofs has been run, then we call ``_parseSectorsUsed`` to parse the output and calculate the number of sectors used on the media. Returns: Number of sectors used on the media """ tempdir = tempfile.mkdtemp() try: entries = {tempdir: None} args = DvdWriter._buildWriteArgs(False, self.hardwareId, self.driveSpeed, None, entries, None, dryRun=True) command = resolveCommand(GROWISOFS_COMMAND) (result, output) = executeCommand(command, args, returnOutput=True) if result != 0: logger.debug( "Error (%d) calling growisofs to read sectors used.", result) logger.warning( "Unable to read disc (might not be initialized); returning zero sectors used." ) return 0.0 sectorsUsed = DvdWriter._parseSectorsUsed(output) logger.debug("Determined sectors used as %s", sectorsUsed) return sectorsUsed finally: if os.path.exists(tempdir): try: os.rmdir(tempdir) except: pass
def readMediaLabel(devicePath): """ Reads the media label (volume name) from the indicated device. The volume name is read using the ``volname`` command. Args: devicePath: Device path to read from Returns: Media label as a string, or None if there is no name or it could not be read """ args = [devicePath] command = resolveCommand(VOLNAME_COMMAND) (result, output) = executeCommand(command, args, returnOutput=True, ignoreStderr=True) if result != 0: return None if output is None or len(output) < 1: return None return output[0].rstrip()
def closeTray(self): """ Closes the device's tray. This only works if the device has a tray and supports ejecting its media. We have no way to know if the tray is currently open or closed, so we just send the appropriate command and hope for the best. If the device does not have a tray or does not support ejecting its media, then we do nothing. Raises: IOError: If there is an error talking to the device """ if self._deviceHasTray and self._deviceCanEject: command = resolveCommand(EJECT_COMMAND) args = ["-t", self.device] result = executeCommand(command, args)[0] if result != 0: raise IOError( "Error (%d) executing eject command to close tray." % result)
def writeImage(self, imagePath): """ Writes this image to disk using the image path. Args: imagePath (String representing a path on disk): Path to write image out as Raises: IOError: If there is an error writing the image to disk ValueError: If there are no filesystem entries in the image ValueError: If a path cannot be encoded properly """ imagePath = encodePath(imagePath) if len(list(self.entries.keys())) == 0: raise ValueError("Image does not contain any entries.") args = self._buildWriteArgs(self.entries, imagePath) command = resolveCommand(MKISOFS_COMMAND) (result, output) = executeCommand(command, args, returnOutput=False) if result != 0: raise IOError( "Error (%d) executing mkisofs command to build image." % result)
def _dumpDebianPackages(targetDir, backupUser, backupGroup, compress=True): """ Dumps a list of currently installed Debian packages via ``dpkg``. Args: targetDir: Directory to write output file into backupUser: User which should own the resulting file backupGroup: Group which should own the resulting file compress: Indicates whether to compress the output file Raises: IOError: If the dump fails for some reason """ if not os.path.exists(DPKG_PATH): logger.info( "Not executing Debian package dump since %s doesn't seem to exist.", DPKG_PATH) elif not os.access(DPKG_PATH, os.X_OK): logger.info( "Not executing Debian package dump since %s cannot be executed.", DPKG_PATH) else: (outputFile, filename) = _getOutputFile(targetDir, "dpkg-selections", compress) with outputFile: command = resolveCommand(DPKG_COMMAND) result = executeCommand(command, [], returnOutput=False, ignoreStderr=True, doNotLog=True, outputFile=outputFile)[0] if result != 0: raise IOError("Error [%d] executing Debian package dump." % result) if not os.path.exists(filename): raise IOError( "File [%s] does not seem to exist after Debian package dump finished." % filename) changeOwnership(filename, backupUser, backupGroup)
def _splitFile(sourcePath, splitSize, backupUser, backupGroup, removeSource=False): """ Splits the source file into chunks of the indicated size. The split files will be owned by the indicated backup user and group. If ``removeSource`` is ``True``, then the source file will be removed after it is successfully split. Args: sourcePath: Absolute path of the source file to split splitSize: Encryption mode (only "gpg" is allowed) backupUser: User that target files should be owned by backupGroup: Group that target files should be owned by removeSource: Indicates whether to remove the source file Raises: IOError: If there is a problem accessing, splitting or removing the source file """ cwd = os.getcwd() try: if not os.path.exists(sourcePath): raise ValueError("Source path [%s] does not exist." % sourcePath) dirname = os.path.dirname(sourcePath) filename = os.path.basename(sourcePath) prefix = "%s_" % filename bytes = int(splitSize.bytes) # pylint: disable=W0622 os.chdir( dirname ) # need to operate from directory that we want files written to command = resolveCommand(SPLIT_COMMAND) args = [ "--verbose", "--numeric-suffixes", "--suffix-length=5", "--bytes=%d" % bytes, filename, prefix ] (result, output) = executeCommand(command, args, returnOutput=True, ignoreStderr=False) if result != 0: raise IOError("Error [%d] calling split for [%s]." % (result, sourcePath)) pattern = re.compile(r"(creating file [`'])(%s)(.*)(')" % prefix) match = pattern.search(output[-1:][0]) if match is None: raise IOError("Unable to parse output from split command.") value = int(match.group(3).strip()) for index in range(0, value): path = "%s%05d" % (prefix, index) if not os.path.exists(path): raise IOError( "After call to split, expected file [%s] does not exist." % path) changeOwnership(path, backupUser, backupGroup) if removeSource: if os.path.exists(sourcePath): try: os.remove(sourcePath) logger.debug("Completed removing old file [%s].", sourcePath) except: raise IOError( "Failed to remove file [%s] after splitting it." % (sourcePath)) finally: os.chdir(cwd)
def backupDatabase(user, password, backupFile, database=None): """ Backs up an individual MySQL database, or all databases. This function backs up either a named local MySQL database or all local MySQL databases, using the passed-in user and password (if provided) for connectivity. This function call *always* results a full backup. There is no facility for incremental backups. The backup data will be written into the passed-in backup file. Normally, this would be an object as returned from ``open``, but it is possible to use something like a ``GzipFile`` to write compressed output. The caller is responsible for closing the passed-in backup file. Often, the "root" database user will be used when backing up all databases. An alternative is to create a separate MySQL "backup" user and grant that user rights to read (but not write) all of the databases that will be backed up. This function accepts a username and password. However, you probably do not want to pass those values in. This is because they will be provided to ``mysqldump`` via the command-line ``--user`` and ``--password`` switches, which will be visible to other users in the process listing. Instead, you should configure the username and password in one of MySQL's configuration files. Typically, this would be done by putting a stanza like this in ``/root/.my.cnf``, to provide ``mysqldump`` with the root database username and its password:: [mysqldump] user = root password = <secret> If you are executing this function as some system user other than root, then the ``.my.cnf`` file would be placed in the home directory of that user. In either case, make sure to set restrictive permissions (typically, mode ``0600``) on ``.my.cnf`` to make sure that other users cannot read the file. Args: user (String representing MySQL username, or ``None``): User to use for connecting to the database (if any) password (String representing MySQL password, or ``None``): Password associated with user (if any) backupFile (Python file object as from ``open`` or ``file``): File use for writing backup database (String representing database name, or ``None`` for all databases): Name of the database to be backed up Raises: ValueError: If some value is missing or invalid IOError: If there is a problem executing the MySQL dump """ args = ["-all", "--flush-logs", "--opt"] if user is not None: logger.warning( "Warning: MySQL username will be visible in process listing (consider using ~/.my.cnf)." ) args.append("--user=%s" % user) if password is not None: logger.warning( "Warning: MySQL password will be visible in process listing (consider using ~/.my.cnf)." ) args.append("--password=%s" % password) if database is None: args.insert(0, "--all-databases") else: args.insert(0, "--databases") args.append(database) command = resolveCommand(MYSQLDUMP_COMMAND) result = executeCommand(command, args, returnOutput=False, ignoreStderr=True, doNotLog=True, outputFile=backupFile)[0] if result != 0: if database is None: raise IOError( "Error [%d] executing MySQL database dump for all databases." % result) else: raise IOError( "Error [%d] executing MySQL database dump for database [%s]." % (result, database))