Exemple #1
0
    def writeImage(self, imagePath=None, newDisc=False, writeMulti=True):
        """
        Writes an ISO image to the media in the device.

        If ``newDisc`` is passed in as ``True``, we assume that the entire disc
        will be re-created from scratch.  Note that unlike ``CdWriter``,
        ``DvdWriter`` does not blank rewritable media before reusing it; however,
        ``growisofs`` is called such that the media will be re-initialized as
        needed.

        If ``imagePath`` is passed in as ``None``, then the existing image
        configured with ``initializeImage()`` will be used.  Under these
        circumstances, the passed-in ``newDisc`` flag will be ignored and the
        value passed in to ``initializeImage()`` will apply instead.

        The ``writeMulti`` argument is ignored.  It exists for compatibility with
        the Cedar Backup image writer interface.

        *Note:* The image size indicated in the log ("Image size will be...") is
        an estimate.  The estimate is conservative and is probably larger than
        the actual space that ``dvdwriter`` will use.

        Args:
           imagePath (String representing a path on disk): Path to an ISO image on disk, or ``None`` to use writer's image
           newDisc (Boolean true/false): Indicates whether the disc should be re-initialized
           writeMulti (Boolean true/false): Unused
        Raises:
           ValueError: If the image path is not absolute
           ValueError: If some path cannot be encoded properly
           IOError: If the media could not be written to for some reason
           ValueError: If no image is passed in and initializeImage() was not previously called
        """
        if not writeMulti:
            logger.warning("writeMulti value of [%s] ignored.", writeMulti)
        if imagePath is None:
            if self._image is None:
                raise ValueError(
                    "Must call initializeImage() before using this method with no image path."
                )
            size = self.getEstimatedImageSize()
            logger.info("Image size will be %s (estimated).",
                        displayBytes(size))
            available = self.retrieveCapacity(
                entireDisc=self._image.newDisc).bytesAvailable
            if size > available:
                logger.error(
                    "Image [%s] does not fit in available capacity [%s].",
                    displayBytes(size), displayBytes(available))
                raise IOError(
                    "Media does not contain enough capacity to store image.")
            self._writeImage(self._image.newDisc, None, self._image.entries,
                             self._image.mediaLabel)
        else:
            if not os.path.isabs(imagePath):
                raise ValueError("Image path must be absolute.")
            imagePath = encodePath(imagePath)
            self._writeImage(newDisc, imagePath, None)
Exemple #2
0
def _getNewDisc(writer, rebuildMedia, todayIsStart, blankBehavior):
    """
    Gets a value for the newDisc flag based on blanking factor rules.

    The blanking factor rules are described above by :any:`writeImageBlankSafe`.

    Args:
       writer: Previously configured image writer containing image entries
       rebuildMedia: Indicates whether media should be rebuilt
       todayIsStart: Indicates whether today is the starting day of the week
       blankBehavior: Blank behavior from configuration, or ``None`` to use default behavior

    Returns:
        newDisc flag to be set on writer
    """
    newDisc = False
    if rebuildMedia:
        newDisc = True
        logger.debug("Setting new disc flag based on rebuildMedia flag.")
    else:
        if blankBehavior is None:
            logger.debug("Default media blanking behavior is in effect.")
            if todayIsStart:
                newDisc = True
                logger.debug("Setting new disc flag based on todayIsStart.")
        else:
            # note: validation says we can assume that behavior is fully filled in if it exists at all
            logger.debug(
                "Optimized media blanking behavior is in effect based on configuration."
            )
            if blankBehavior.blankMode == "daily" or (
                    blankBehavior.blankMode == "weekly" and todayIsStart):
                logger.debug(
                    "New disc flag will be set based on blank factor calculation."
                )
                blankFactor = float(blankBehavior.blankFactor)
                logger.debug("Configured blanking factor: %.2f", blankFactor)
                available = writer.retrieveCapacity().bytesAvailable
                logger.debug("Bytes available: %s", displayBytes(available))
                required = writer.getEstimatedImageSize()
                logger.debug("Bytes required: %s", displayBytes(required))
                ratio = available / (1.0 + required)
                logger.debug("Calculated ratio: %.2f", ratio)
                newDisc = ratio <= blankFactor
                logger.debug("%.2f <= %.2f ? %s", ratio, blankFactor, newDisc)
            else:
                logger.debug(
                    "No blank factor calculation is required based on configuration."
                )
    logger.debug("New disc flag [%s].", newDisc)
    return newDisc
Exemple #3
0
def executeAction(configPath, options, config):
    """
    Executes the capacity action.

    Args:
       configPath (String representing a path on disk): Path to configuration file on disk
       options (Options object): Program command-line options
       config (Config object): Program configuration
    Raises:
       ValueError: Under many generic error conditions
       IOError: If there are I/O problems reading or writing files
    """
    logger.debug("Executing capacity extended action.")
    if config.options is None or config.store is None:
        raise ValueError("Cedar Backup configuration is not properly filled in.")
    local = LocalConfig(xmlPath=configPath)
    if config.store.checkMedia:
        checkMediaState(config.store)  # raises exception if media is not initialized
    capacity = createWriter(config).retrieveCapacity()
    logger.debug("Media capacity: %s", capacity)
    if local.capacity.maxPercentage is not None:
        if capacity.utilized > local.capacity.maxPercentage.percentage:
            logger.error(
                "Media has reached capacity limit of %s%%: %.2f%% utilized",
                local.capacity.maxPercentage.quantity,
                capacity.utilized,
            )
    else:
        if capacity.bytesAvailable < local.capacity.minBytes:
            logger.error(
                "Media has reached capacity limit of %s: only %s available",
                local.capacity.minBytes,
                displayBytes(capacity.bytesAvailable),
            )
    logger.info("Executed the capacity extended action successfully.")
Exemple #4
0
    def _searchForOverburn(output):
        """
        Search for an "overburn" error message in ``growisofs`` output.

        The ``growisofs`` command returns a non-zero exit code and puts a message
        into the output -- even on a dry run -- if there is not enough space on
        the media.  This is called an "overburn" condition.

        The error message looks like this::

           :-( /dev/cdrom: 894048 blocks are free, 2033746 to be written!

        This method looks for the overburn error message anywhere in the output.
        If a matching error message is found, an ``IOError`` exception is raised
        containing relevant information about the problem.  Otherwise, the method
        call returns normally.

        Args:
           output: List of output lines to search, as from ``executeCommand``

        Raises:
           IOError: If an overburn condition is found
        """
        if output is None:
            return
        pattern = re.compile(
            r"(^)(:-[(])(\s*.*:\s*)(.* )(blocks are free, )(.* )(to be written!)"
        )
        for line in output:
            match = pattern.search(line)
            if match is not None:
                try:
                    available = convertSize(float(match.group(4).strip()),
                                            UNIT_SECTORS, UNIT_BYTES)
                    size = convertSize(float(match.group(6).strip()),
                                       UNIT_SECTORS, UNIT_BYTES)
                    logger.error(
                        "Image [%s] does not fit in available capacity [%s].",
                        displayBytes(size), displayBytes(available))
                except ValueError:
                    logger.error(
                        "Image does not fit in available capacity (no useful capacity info available)."
                    )
                raise IOError(
                    "Media does not contain enough capacity to store image.")
Exemple #5
0
def _executeBackup(config, backupList, absolutePath, tarfilePath, collectMode,
                   archiveMode, resetDigest, digestPath):
    """
    Execute the backup process for the indicated backup list.

    This function exists mainly to consolidate functionality between the
    :any:`_collectFile` and :any:`_collectDirectory` functions.  Those functions build
    the backup list; this function causes the backup to execute properly and
    also manages usage of the digest file on disk as explained in their
    comments.

    For collect files, the digest file will always just contain the single file
    that is being backed up.  This might little wasteful in terms of the number
    of files that we keep around, but it's consistent and easy to understand.

    Args:
       config: Config object
       backupList: List to execute backup for
       absolutePath: Absolute path of directory or file to collect
       tarfilePath: Path to tarfile that should be created
       collectMode: Collect mode to use
       archiveMode: Archive mode to use
       resetDigest: Reset digest flag
       digestPath: Path to digest file on disk, if needed
    """
    if collectMode != "incr":
        logger.debug("Collect mode is [%s]; no digest will be used.",
                     collectMode)
        if len(backupList) == 1 and backupList[
                0] == absolutePath:  # special case for individual file
            logger.info("Backing up file [%s] (%s).", absolutePath,
                        displayBytes(backupList.totalSize()))
        else:
            logger.info("Backing up %d files in [%s] (%s).", len(backupList),
                        absolutePath, displayBytes(backupList.totalSize()))
        if len(backupList) > 0:
            backupList.generateTarfile(tarfilePath, archiveMode, True)
            changeOwnership(tarfilePath, config.options.backupUser,
                            config.options.backupGroup)
    else:
        if resetDigest:
            logger.debug("Based on resetDigest flag, digest will be cleared.")
            oldDigest = {}
        else:
            logger.debug(
                "Based on resetDigest flag, digest will loaded from disk.")
            oldDigest = _loadDigest(digestPath)
        (removed, newDigest) = backupList.removeUnchanged(oldDigest,
                                                          captureDigest=True)
        logger.debug("Removed %d unchanged files based on digest values.",
                     removed)
        if len(backupList) == 1 and backupList[
                0] == absolutePath:  # special case for individual file
            logger.info("Backing up file [%s] (%s).", absolutePath,
                        displayBytes(backupList.totalSize()))
        else:
            logger.info("Backing up %d files in [%s] (%s).", len(backupList),
                        absolutePath, displayBytes(backupList.totalSize()))
        if len(backupList) > 0:
            backupList.generateTarfile(tarfilePath, archiveMode, True)
            changeOwnership(tarfilePath, config.options.backupUser,
                            config.options.backupGroup)
        _writeDigest(config, newDigest, digestPath)
Exemple #6
0
 def __str__(self):
     """
     Informal string representation for class instance.
     """
     return "utilized %s of %s (%.2f%%)" % (displayBytes(
         self.bytesUsed), displayBytes(self.totalCapacity), self.utilized)
Exemple #7
0
def _executeAction(options, config):
    """
    Implements the guts of the cback3-span tool.

    Args:
       options (SpanOptions object): Program command-line options
       config (Config object): Program configuration
    Raises:
       Exception: Under many generic error conditions
    """
    print("")
    print("================================================")
    print("           Cedar Backup 'span' tool")
    print("================================================")
    print("")
    print("This the Cedar Backup span tool.  It is used to split up staging")
    print("data when that staging data does not fit onto a single disc.")
    print("")
    print(
        "This utility operates using Cedar Backup configuration.  Configuration"
    )
    print(
        "specifies which staging directory to look at and which writer device")
    print("and media type to use.")
    print("")
    if not _getYesNoAnswer("Continue?", default="Y"):
        return
    print("===")

    print("")
    print("Cedar Backup store configuration looks like this:")
    print("")
    print("   Source Directory...: %s" % config.store.sourceDir)
    print("   Media Type.........: %s" % config.store.mediaType)
    print("   Device Type........: %s" % config.store.deviceType)
    print("   Device Path........: %s" % config.store.devicePath)
    print("   Device SCSI ID.....: %s" % config.store.deviceScsiId)
    print("   Drive Speed........: %s" % config.store.driveSpeed)
    print("   Check Data Flag....: %s" % config.store.checkData)
    print("   No Eject Flag......: %s" % config.store.noEject)
    print("")
    if not _getYesNoAnswer("Is this OK?", default="Y"):
        return
    print("===")

    (writer, mediaCapacity) = _getWriter(config)

    print("")
    print(
        "Please wait, indexing the source directory (this may take a while)..."
    )
    (dailyDirs, fileList) = _findDailyDirs(config.store.sourceDir)
    print("===")

    print("")
    print(
        "The following daily staging directories have not yet been written to disc:"
    )
    print("")
    for dailyDir in dailyDirs:
        print("   %s" % dailyDir)

    totalSize = fileList.totalSize()
    print("")
    print("The total size of the data in these directories is %s." %
          displayBytes(totalSize))
    print("")
    if not _getYesNoAnswer("Continue?", default="Y"):
        return
    print("===")

    print("")
    print("Based on configuration, the capacity of your media is %s." %
          displayBytes(mediaCapacity))

    print("")
    print("Since estimates are not perfect and there is some uncertainly in")
    print('media capacity calculations, it is good to have a "cushion",')
    print("a percentage of capacity to set aside.  The cushion reduces the")
    print("capacity of your media, so a 1.5% cushion leaves 98.5% remaining.")
    print("")
    cushion = _getFloat("What cushion percentage?", default=4.5)
    print("===")

    realCapacity = ((100.0 - cushion) / 100.0) * mediaCapacity
    minimumDiscs = (totalSize / realCapacity) + 1
    print("")
    print("The real capacity, taking into account the %.2f%% cushion, is %s." %
          (cushion, displayBytes(realCapacity)))
    print("It will take at least %d disc(s) to store your %s of data." %
          (minimumDiscs, displayBytes(totalSize)))
    print("")
    if not _getYesNoAnswer("Continue?", default="Y"):
        return
    print("===")

    happy = False
    while not happy:
        print("")
        print("Which algorithm do you want to use to span your data across")
        print("multiple discs?")
        print("")
        print("The following algorithms are available:")
        print("")
        print('   first....: The "first-fit" algorithm')
        print('   best.....: The "best-fit" algorithm')
        print('   worst....: The "worst-fit" algorithm')
        print('   alternate: The "alternate-fit" algorithm')
        print("")
        print("If you don't like the results you will have a chance to try a")
        print("different one later.")
        print("")
        algorithm = _getChoiceAnswer("Which algorithm?", "worst",
                                     ["first", "best", "worst", "alternate"])
        print("===")

        print("")
        print("Please wait, generating file lists (this may take a while)...")
        spanSet = fileList.generateSpan(capacity=realCapacity,
                                        algorithm="%s_fit" % algorithm)
        print("===")

        print("")
        print(
            'Using the "%s-fit" algorithm, Cedar Backup can split your data' %
            algorithm)
        print("into %d discs." % len(spanSet))
        print("")
        counter = 0
        for item in spanSet:
            counter += 1
            print("Disc %d: %d files, %s, %.2f%% utilization" %
                  (counter, len(item.fileList), displayBytes(
                      item.size), item.utilization))
        print("")
        if _getYesNoAnswer("Accept this solution?", default="Y"):
            happy = True
        print("===")

    counter = 0
    for spanItem in spanSet:
        counter += 1
        if counter == 1:
            print("")
            _getReturn(
                "Please place the first disc in your backup device.\nPress return when ready."
            )
            print("===")
        else:
            print("")
            _getReturn(
                "Please replace the disc in your backup device.\nPress return when ready."
            )
            print("===")
        _writeDisc(config, writer, spanItem)

    _writeStoreIndicator(config, dailyDirs)

    print("")
    print("Completed writing all discs.")