Exemplo n.º 1
0
def _executeBackup(config, backupList, absolutePath, tarfilePath, collectMode, archiveMode, resetDigest, digestPath):
   """
   Execute the backup process for the indicated backup list.

   This function exists mainly to consolidate functionality between the
   L{_collectFile} and L{_collectDirectory} functions.  Those functions build
   the backup list; this function causes the backup to execute properly and
   also manages usage of the digest file on disk as explained in their
   comments.

   For collect files, the digest file will always just contain the single file
   that is being backed up.  This might little wasteful in terms of the number
   of files that we keep around, but it's consistent and easy to understand.

   @param config: Config object.
   @param backupList: List to execute backup for
   @param absolutePath: Absolute path of directory or file to collect.
   @param tarfilePath: Path to tarfile that should be created.
   @param collectMode: Collect mode to use.
   @param archiveMode: Archive mode to use.
   @param resetDigest: Reset digest flag.
   @param digestPath: Path to digest file on disk, if needed.
   """
   if collectMode != 'incr':
      logger.debug("Collect mode is [%s]; no digest will be used.", collectMode)
      if len(backupList) == 1 and backupList[0] == absolutePath:  # special case for individual file
         logger.info("Backing up file [%s] (%s).", absolutePath, displayBytes(backupList.totalSize()))
      else:
         logger.info("Backing up %d files in [%s] (%s).", len(backupList), absolutePath, displayBytes(backupList.totalSize()))
      if len(backupList) > 0:
         backupList.generateTarfile(tarfilePath, archiveMode, True)
         changeOwnership(tarfilePath, config.options.backupUser, config.options.backupGroup)
   else:
      if resetDigest:
         logger.debug("Based on resetDigest flag, digest will be cleared.")
         oldDigest = {}
      else:
         logger.debug("Based on resetDigest flag, digest will loaded from disk.")
         oldDigest = _loadDigest(digestPath)
      (removed, newDigest) = backupList.removeUnchanged(oldDigest, captureDigest=True)
      logger.debug("Removed %d unchanged files based on digest values.", removed)
      if len(backupList) == 1 and backupList[0] == absolutePath:  # special case for individual file
         logger.info("Backing up file [%s] (%s).", absolutePath, displayBytes(backupList.totalSize()))
      else:
         logger.info("Backing up %d files in [%s] (%s).", len(backupList), absolutePath, displayBytes(backupList.totalSize()))
      if len(backupList) > 0:
         backupList.generateTarfile(tarfilePath, archiveMode, True)
         changeOwnership(tarfilePath, config.options.backupUser, config.options.backupGroup)
      _writeDigest(config, newDigest, digestPath)
Exemplo n.º 2
0
def executeAction(configPath, options, config):
   """
   Executes the capacity action.

   @param configPath: Path to configuration file on disk.
   @type configPath: String representing a path on disk.

   @param options: Program command-line options.
   @type options: Options object.

   @param config: Program configuration.
   @type config: Config object.

   @raise ValueError: Under many generic error conditions
   @raise IOError: If there are I/O problems reading or writing files
   """
   logger.debug("Executing capacity extended action.")
   if config.options is None or config.store is None:
      raise ValueError("Cedar Backup configuration is not properly filled in.")
   local = LocalConfig(xmlPath=configPath)
   if config.store.checkMedia:
      checkMediaState(config.store)  # raises exception if media is not initialized
   capacity = createWriter(config).retrieveCapacity()
   logger.debug("Media capacity: %s", capacity)
   if local.capacity.maxPercentage is not None:
      if capacity.utilized > local.capacity.maxPercentage.percentage:
         logger.error("Media has reached capacity limit of %s%%: %.2f%% utilized",
                      local.capacity.maxPercentage.quantity, capacity.utilized)
   else:
      if capacity.bytesAvailable < local.capacity.minBytes:
         logger.error("Media has reached capacity limit of %s: only %s available",
                      local.capacity.minBytes, displayBytes(capacity.bytesAvailable))
   logger.info("Executed the capacity extended action successfully.")
Exemplo n.º 3
0
    def _searchForOverburn(output):
        """
      Search for an "overburn" error message in C{growisofs} output.

      The C{growisofs} command returns a non-zero exit code and puts a message
      into the output -- even on a dry run -- if there is not enough space on
      the media.  This is called an "overburn" condition.

      The error message looks like this::

         :-( /dev/cdrom: 894048 blocks are free, 2033746 to be written!

      This method looks for the overburn error message anywhere in the output.
      If a matching error message is found, an C{IOError} exception is raised
      containing relevant information about the problem.  Otherwise, the method
      call returns normally.

      @param output: List of output lines to search, as from C{executeCommand}

      @raise IOError: If an overburn condition is found.
      """
        if output is None:
            return
        pattern = re.compile(
            r"(^)(:-[(])(\s*.*:\s*)(.* )(blocks are free, )(.* )(to be written!)"
        )
        for line in output:
            match = pattern.search(line)
            if match is not None:
                try:
                    available = convertSize(float(match.group(4).strip()),
                                            UNIT_SECTORS, UNIT_BYTES)
                    size = convertSize(float(match.group(6).strip()),
                                       UNIT_SECTORS, UNIT_BYTES)
                    logger.error(
                        "Image [%s] does not fit in available capacity [%s].",
                        displayBytes(size), displayBytes(available))
                except ValueError:
                    logger.error(
                        "Image does not fit in available capacity (no useful capacity info available)."
                    )
                raise IOError(
                    "Media does not contain enough capacity to store image.")
Exemplo n.º 4
0
def _getNewDisc(writer, rebuildMedia, todayIsStart, blankBehavior):
   """
   Gets a value for the newDisc flag based on blanking factor rules.

   The blanking factor rules are described above by L{writeImageBlankSafe}.

   @param writer: Previously configured image writer containing image entries
   @param rebuildMedia: Indicates whether media should be rebuilt
   @param todayIsStart: Indicates whether today is the starting day of the week
   @param blankBehavior: Blank behavior from configuration, or C{None} to use default behavior

   @return: newDisc flag to be set on writer.
   """
   newDisc = False
   if rebuildMedia:
      newDisc = True
      logger.debug("Setting new disc flag based on rebuildMedia flag.")
   else:
      if blankBehavior is None:
         logger.debug("Default media blanking behavior is in effect.")
         if todayIsStart:
            newDisc = True
            logger.debug("Setting new disc flag based on todayIsStart.")
      else:
         # note: validation says we can assume that behavior is fully filled in if it exists at all
         logger.debug("Optimized media blanking behavior is in effect based on configuration.")
         if blankBehavior.blankMode == "daily" or (blankBehavior.blankMode == "weekly" and todayIsStart):
            logger.debug("New disc flag will be set based on blank factor calculation.")
            blankFactor = float(blankBehavior.blankFactor)
            logger.debug("Configured blanking factor: %.2f", blankFactor)
            available = writer.retrieveCapacity().bytesAvailable
            logger.debug("Bytes available: %s", displayBytes(available))
            required = writer.getEstimatedImageSize()
            logger.debug("Bytes required: %s", displayBytes(required))
            ratio = available / (1.0 + required)
            logger.debug("Calculated ratio: %.2f", ratio)
            newDisc = (ratio <= blankFactor)
            logger.debug("%.2f <= %.2f ? %s", ratio, blankFactor, newDisc)
         else:
            logger.debug("No blank factor calculation is required based on configuration.")
   logger.debug("New disc flag [%s].", newDisc)
   return newDisc
Exemplo n.º 5
0
def _applySizeLimits(options, config, local, stagingDirs):
    """
   Apply size limits, throwing an exception if any limits are exceeded.

   Size limits are optional.  If a limit is set to None, it does not apply.
   The full size limit applies if the full option is set or if today is the
   start of the week.  The incremental size limit applies otherwise.  Limits
   are applied to the total size of all the relevant staging directories.

   @param options: Options object.
   @param config: Config object.
   @param local: Local config object.
   @param stagingDirs: Dictionary mapping directory path to date suffix.

   @raise ValueError: Under many generic error conditions
   @raise ValueError: If a size limit has been exceeded
   """
    if options.full or isStartOfWeek(config.options.startingDay):
        logger.debug("Using Amazon S3 size limit for full backups.")
        limit = local.amazons3.fullBackupSizeLimit
    else:
        logger.debug("Using Amazon S3 size limit for incremental backups.")
        limit = local.amazons3.incrementalBackupSizeLimit
    if limit is None:
        logger.debug("No Amazon S3 size limit will be applied.")
    else:
        logger.debug("Amazon S3 size limit is: %s", limit)
        contents = BackupFileList()
        for stagingDir in stagingDirs:
            contents.addDirContents(stagingDir)
        total = contents.totalSize()
        logger.debug("Amazon S3 backup size is: %s", displayBytes(total))
        if total > limit.bytes:
            logger.error("Amazon S3 size limit exceeded: %s > %s",
                         displayBytes(total), limit)
            raise ValueError("Amazon S3 size limit exceeded: %s > %s" %
                             (displayBytes(total), limit))
        else:
            logger.info(
                "Total size does not exceed Amazon S3 size limit, so backup can continue."
            )
Exemplo n.º 6
0
    def writeImage(self, imagePath=None, newDisc=False, writeMulti=True):
        """
      Writes an ISO image to the media in the device.

      If C{newDisc} is passed in as C{True}, we assume that the entire disc
      will be re-created from scratch.  Note that unlike C{CdWriter},
      C{DvdWriter} does not blank rewritable media before reusing it; however,
      C{growisofs} is called such that the media will be re-initialized as
      needed.

      If C{imagePath} is passed in as C{None}, then the existing image
      configured with C{initializeImage()} will be used.  Under these
      circumstances, the passed-in C{newDisc} flag will be ignored and the
      value passed in to C{initializeImage()} will apply instead.

      The C{writeMulti} argument is ignored.  It exists for compatibility with
      the Cedar Backup image writer interface.

      @note: The image size indicated in the log ("Image size will be...") is
      an estimate.  The estimate is conservative and is probably larger than
      the actual space that C{dvdwriter} will use.

      @param imagePath: Path to an ISO image on disk, or C{None} to use writer's image
      @type imagePath: String representing a path on disk

      @param newDisc: Indicates whether the disc should be re-initialized
      @type newDisc: Boolean true/false.

      @param writeMulti: Unused
      @type writeMulti: Boolean true/false

      @raise ValueError: If the image path is not absolute.
      @raise ValueError: If some path cannot be encoded properly.
      @raise IOError: If the media could not be written to for some reason.
      @raise ValueError: If no image is passed in and initializeImage() was not previously called
      """
        if not writeMulti:
            logger.warn("writeMulti value of [%s] ignored.", writeMulti)
        if imagePath is None:
            if self._image is None:
                raise ValueError(
                    "Must call initializeImage() before using this method with no image path."
                )
            size = self.getEstimatedImageSize()
            logger.info("Image size will be %s (estimated).",
                        displayBytes(size))
            available = self.retrieveCapacity(
                entireDisc=self._image.newDisc).bytesAvailable
            if size > available:
                logger.error(
                    "Image [%s] does not fit in available capacity [%s].",
                    displayBytes(size), displayBytes(available))
                raise IOError(
                    "Media does not contain enough capacity to store image.")
            self._writeImage(self._image.newDisc, None, self._image.entries,
                             self._image.mediaLabel)
        else:
            if not os.path.isabs(imagePath):
                raise ValueError("Image path must be absolute.")
            imagePath = encodePath(imagePath)
            self._writeImage(newDisc, imagePath, None)
Exemplo n.º 7
0
 def __str__(self):
     """
   Informal string representation for class instance.
   """
     return "utilized %s of %s (%.2f%%)" % (displayBytes(
         self.bytesUsed), displayBytes(self.totalCapacity), self.utilized)
Exemplo n.º 8
0
def _executeAction(options, config):
    """
   Implements the guts of the cback-span tool.

   @param options: Program command-line options.
   @type options: SpanOptions object.

   @param config: Program configuration.
   @type config: Config object.

   @raise Exception: Under many generic error conditions
   """
    print ""
    print "================================================"
    print "           Cedar Backup 'span' tool"
    print "================================================"
    print ""
    print "This the Cedar Backup span tool.  It is used to split up staging"
    print "data when that staging data does not fit onto a single disc."
    print ""
    print "This utility operates using Cedar Backup configuration.  Configuration"
    print "specifies which staging directory to look at and which writer device"
    print "and media type to use."
    print ""
    if not _getYesNoAnswer("Continue?", default="Y"):
        return
    print "==="

    print ""
    print "Cedar Backup store configuration looks like this:"
    print ""
    print "   Source Directory...: %s" % config.store.sourceDir
    print "   Media Type.........: %s" % config.store.mediaType
    print "   Device Type........: %s" % config.store.deviceType
    print "   Device Path........: %s" % config.store.devicePath
    print "   Device SCSI ID.....: %s" % config.store.deviceScsiId
    print "   Drive Speed........: %s" % config.store.driveSpeed
    print "   Check Data Flag....: %s" % config.store.checkData
    print "   No Eject Flag......: %s" % config.store.noEject
    print ""
    if not _getYesNoAnswer("Is this OK?", default="Y"):
        return
    print "==="

    (writer, mediaCapacity) = _getWriter(config)

    print ""
    print "Please wait, indexing the source directory (this may take a while)..."
    (dailyDirs, fileList) = _findDailyDirs(config.store.sourceDir)
    print "==="

    print ""
    print "The following daily staging directories have not yet been written to disc:"
    print ""
    for dailyDir in dailyDirs:
        print "   %s" % dailyDir

    totalSize = fileList.totalSize()
    print ""
    print "The total size of the data in these directories is %s." % displayBytes(
        totalSize)
    print ""
    if not _getYesNoAnswer("Continue?", default="Y"):
        return
    print "==="

    print ""
    print "Based on configuration, the capacity of your media is %s." % displayBytes(
        mediaCapacity)

    print ""
    print "Since estimates are not perfect and there is some uncertainly in"
    print "media capacity calculations, it is good to have a \"cushion\","
    print "a percentage of capacity to set aside.  The cushion reduces the"
    print "capacity of your media, so a 1.5% cushion leaves 98.5% remaining."
    print ""
    cushion = _getFloat("What cushion percentage?", default=4.5)
    print "==="

    realCapacity = ((100.0 - cushion) / 100.0) * mediaCapacity
    minimumDiscs = (totalSize / realCapacity) + 1
    print ""
    print "The real capacity, taking into account the %.2f%% cushion, is %s." % (
        cushion, displayBytes(realCapacity))
    print "It will take at least %d disc(s) to store your %s of data." % (
        minimumDiscs, displayBytes(totalSize))
    print ""
    if not _getYesNoAnswer("Continue?", default="Y"):
        return
    print "==="

    happy = False
    while not happy:
        print ""
        print "Which algorithm do you want to use to span your data across"
        print "multiple discs?"
        print ""
        print "The following algorithms are available:"
        print ""
        print "   first....: The \"first-fit\" algorithm"
        print "   best.....: The \"best-fit\" algorithm"
        print "   worst....: The \"worst-fit\" algorithm"
        print "   alternate: The \"alternate-fit\" algorithm"
        print ""
        print "If you don't like the results you will have a chance to try a"
        print "different one later."
        print ""
        algorithm = _getChoiceAnswer("Which algorithm?", "worst", [
            "first",
            "best",
            "worst",
            "alternate",
        ])
        print "==="

        print ""
        print "Please wait, generating file lists (this may take a while)..."
        spanSet = fileList.generateSpan(capacity=realCapacity,
                                        algorithm="%s_fit" % algorithm)
        print "==="

        print ""
        print "Using the \"%s-fit\" algorithm, Cedar Backup can split your data" % algorithm
        print "into %d discs." % len(spanSet)
        print ""
        counter = 0
        for item in spanSet:
            counter += 1
            print "Disc %d: %d files, %s, %.2f%% utilization" % (
                counter, len(item.fileList), displayBytes(
                    item.size), item.utilization)
        print ""
        if _getYesNoAnswer("Accept this solution?", default="Y"):
            happy = True
        print "==="

    counter = 0
    for spanItem in spanSet:
        counter += 1
        if counter == 1:
            print ""
            _getReturn(
                "Please place the first disc in your backup device.\nPress return when ready."
            )
            print "==="
        else:
            print ""
            _getReturn(
                "Please replace the disc in your backup device.\nPress return when ready."
            )
            print "==="
        _writeDisc(config, writer, spanItem)

    _writeStoreIndicator(config, dailyDirs)

    print ""
    print "Completed writing all discs."