def init(self, repo, dirPath):
        # Var assignment and validation
        self.repo = repo
        self.dirPath = dirPath
        self.completePath = joinPaths(repo, dirPath)
        dataPath = joinPaths(repo, rdiffDataDirName)

        # cache dir listings
        self.entries = []
        if os.access(self.completePath, os.F_OK):
            self.entries = os.listdir(
                self.completePath
            )  # the directory may not exist if it has been deleted
        self.dataDirEntries = os.listdir(dataPath)
        incrementsDir = joinPaths(repo, rdiffIncrementsDirName, dirPath)
        self.incrementEntries = []
        if os.access(
                incrementsDir, os.F_OK
        ):  # the increments may not exist if the folder has existed forever and never been changed
            self.incrementEntries = os.listdir(incrementsDir)

        self.groupedIncrementEntries = rdw_helpers.groupby(
            self.incrementEntries, lambda x: incrementEntry(x).getFilename())
        self.backupTimes = [
            incrementEntry(x).getDate() for x in filter(
                lambda x: x.startswith("mirror_metadata"), self.dataDirEntries)
        ]
        self.backupTimes.sort()
Exemple #2
0
    def __init__(self, repo, dirPath):
        # Var assignment and validation
        self.pathQuoter = RdiffQuotedPath(repo)

        self.repo = repo
        self.dirPath = dirPath
        self.completePath = joinPaths(repo, dirPath)
        dataPath = joinPaths(repo, RDIFF_BACKUP_DATA)

        # cache dir listings
        self.entries = []
        if os.access(self.completePath, os.F_OK):
            self.entries = os.listdir(
                self.completePath
            )  # the directory may not exist if it has been deleted
        self.dataDirEntries = os.listdir(dataPath)
        incrementsDir = joinPaths(repo, INCREMENTS, dirPath)
        self.incrementEntries = []
        if os.access(
                incrementsDir, os.F_OK
        ):  # the increments may not exist if the folder has existed forever and never been changed
            self.incrementEntries = filter(
                lambda x: not os.path.isdir(joinPaths(incrementsDir, x)),
                os.listdir(incrementsDir))  # ignore directories

        self.groupedIncrementEntries = rdw_helpers.groupby(
            self.incrementEntries,
            lambda x: IncrementEntry(repo, x).getFilename())
        self.backupTimes = [
            IncrementEntry(repo, x).getDate() for x in filter(
                lambda x: x.startswith("mirror_metadata"), self.dataDirEntries)
        ]
        self.backupTimes.sort()
   def init(self, repo, dirPath):
      # Var assignment and validation
      self.repo = repo
      self.dirPath = dirPath
      self.completePath = joinPaths(repo, dirPath)
      dataPath = joinPaths(repo, rdiffDataDirName)

      # cache dir listings
      self.entries = []
      if os.access(self.completePath, os.F_OK):
         self.entries = os.listdir(self.completePath) # the directory may not exist if it has been deleted
      self.dataDirEntries = os.listdir(dataPath)
      incrementsDir = joinPaths(repo, rdiffIncrementsDirName, dirPath)
      self.incrementEntries = []
      if os.access(incrementsDir, os.F_OK): # the increments may not exist if the folder has existed forever and never been changed
         self.incrementEntries = os.listdir(incrementsDir)

      self.groupedIncrementEntries = rdw_helpers.groupby(self.incrementEntries, lambda x: incrementEntry(x).getFilename())
      self.backupTimes = [ incrementEntry(x).getDate() for x in filter(lambda x: x.startswith("mirror_metadata"), self.dataDirEntries) ]
      self.backupTimes.sort()
Exemple #4
0
    def dir_entries(self):
        """Get directory entries for the current path. It is similar to
        listdir() but for rdiff-backup."""

        logger.debug("get directory entries for [%s]" %
                     self._decode(self.full_path))

        # Group increments by filename
        grouped_increment_entries = rdw_helpers.groupby(
            self._increment_entries, lambda x: x.filename)

        # Process each increment entries and combine this with the existing
        # entries
        entriesDict = {}
        for filename, increments in grouped_increment_entries.iteritems():
            # Check if filename exists
            exists = filename in self.existing_entries
            # Create DirEntry to represent the item
            new_entry = DirEntry(
                self,
                filename,
                exists,
                increments)
            entriesDict[filename] = new_entry

        # Then add existing entries
        for filename in self.existing_entries:
            # Check if the entry was created by increments entry
            if filename in entriesDict:
                continue
            # The entry doesn't exists (mostly because it ever change). So
            # create a DirEntry to represent it
            new_entry = DirEntry(
                self,
                filename,
                True,
                [])
            entriesDict[filename] = new_entry

        # Return the values (so the DirEntry objects)
        return entriesDict.values()
Exemple #5
0
   def __init__(self, repo, dirPath):
      # Var assignment and validation
      self.pathQuoter = RdiffQuotedPath(repo)
      
      self.repo = repo
      self.dirPath = dirPath
      self.completePath = joinPaths(repo, dirPath)
      dataPath = joinPaths(repo, RDIFF_BACKUP_DATA)

      # cache dir listings
      self.entries = []
      if os.access(self.completePath, os.F_OK):
         self.entries = os.listdir(self.completePath) # the directory may not exist if it has been deleted
      self.dataDirEntries = os.listdir(dataPath)
      incrementsDir = joinPaths(repo, INCREMENTS, dirPath)
      self.incrementEntries = []
      if os.access(incrementsDir, os.F_OK): # the increments may not exist if the folder has existed forever and never been changed
         self.incrementEntries = filter(lambda x: not os.path.isdir(joinPaths(incrementsDir, x)), os.listdir(incrementsDir)) # ignore directories

      self.groupedIncrementEntries = rdw_helpers.groupby(self.incrementEntries, lambda x: IncrementEntry(repo, x).getFilename())
      self.backupTimes = [ IncrementEntry(repo, x).getDate() for x in filter(lambda x: x.startswith("mirror_metadata"), self.dataDirEntries) ]
      self.backupTimes.sort()
Exemple #6
0
                    "repo":
                    repo,
                    "error":
                    error.getErrorString(),
                    "repoLink":
                    self.buildBrowseUrl(repo, "/", False)
                })

        allBackups.sort(lambda x, y: cmp(y["date"], x["date"]))
        failedBackups = filter(lambda x: x["errors"], allBackups)

        # group successful backups by day
        successfulBackups = filter(lambda x: not x["errors"], allBackups)
        if successfulBackups:
            lastSuccessDate = successfulBackups[0]["date"]
        successfulBackups = rdw_helpers.groupby(
            successfulBackups, lambda x: x["date"].getLocalDaysSinceEpoch())

        userMessages = []

        # generate failure messages
        if includeFailure:
            for job in failedBackups:
                date = job["date"]
                job.update({
                    "isSuccess": False,
                    "date": date,
                    "dateString": date.getDisplayString(),
                    "pubDate": date.getRSSPubDateString(),
                    "link": self._buildStatusEntryUrl(job["repo"], date),
                    "repoErrors": [],
                    "backups": [],
Exemple #7
0
class rdiffStatusPage(page_main.rdiffPage):
   def index(self):
      userMessages = self._getUserMessages()
      page = self.startPage("Backup Status", rssUrl=self._buildStatusFeedUrl(), rssTitle = "Backup status for "+self.getUsername())
      page = page + self.writeTopLinks()
      page = page + self.compileTemplate("status.html", messages=userMessages, feedLink=self._buildStatusFeedUrl())
      page = page + self.endPage()
      return page
   index.exposed = True

   def feed(self):
      cherrypy.response.headerMap["Content-Type"] = "text/xml"
      userMessages = self._getUserMessages()
      statusUrl = self._buildAbsoluteStatusUrl()
      return self.compileTemplate("status.xml", username=self.getUsername(), link=statusUrl, messages=userMessages)
   feed.exposed = True

   def _buildAbsoluteStatusUrl(self):
      return cherrypy.request.base + "/status/"

   def _buildStatusFeedUrl(self):
      return "/status/feed"

   def _getUserMessages(self):
      userRoot = self.userDB.getUserRoot(self.getUsername())
      userRepos = self.userDB.getUserRepoPaths(self.getUsername())

      asOfDate = rdw_helpers.rdwTime()
      asOfDate.initFromMidnightUTC(-5)

      # build list of all backups
      allBackups = []
      repoErrors = []
      for repo in userRepos:
         try:
            backups = librdiff.getBackupHistorySinceDate(rdw_helpers.joinPaths(userRoot, repo), asOfDate)
            allBackups += [{"repo": repo, "date": backup.date, "displayDate": backup.date.getDisplayString(),
               "size": rdw_helpers.formatFileSizeStr(backup.size), "errors": backup.errors} for backup in backups]
         except librdiff.FileError, error:
            repoErrors.append({"repo": repo, "error": error.getErrorString()})

      allBackups.sort(lambda x, y: cmp(y["date"], x["date"]))
      failedBackups = filter(lambda x: x["errors"], allBackups)

      # group successful backups by day
      successfulBackups = filter(lambda x: not x["errors"], allBackups)
      if successfulBackups:
         lastSuccessDate = successfulBackups[0]["date"]
      successfulBackups = rdw_helpers.groupby(successfulBackups, lambda x: x["date"].getLocalDaysSinceEpoch())

      userMessages = []

      # generate failure messages
      for job in failedBackups:
         date = job["date"]
         title = "Backup Failed: " + job["repo"]
         job.update({"isSuccess": False, "date": date, "pubDate": date.getRSSPubDateString(),
            "link": self._buildAbsoluteStatusUrl(), "title": title, "repoErrors": [], "backups": []})
         userMessages.append(job)

      # generate success messages (publish date is most recent backup date)
      for day in successfulBackups.keys():
         date = successfulBackups[day][0]["date"]
         title = "Successful Backups for " + date.getDateDisplayString()

         # include repository errors in most recent entry
         if date == lastSuccessDate: repoErrorsForMsg = repoErrors
         else: repoErrorsForMsg = []

         userMessages.append({"isSuccess": 1, "date": date, "pubDate": date.getRSSPubDateString(),
            "link": self._buildAbsoluteStatusUrl(), "title": title, "repoErrors": repoErrorsForMsg, "backups":successfulBackups[day]})

      # sort messages by date
      userMessages.sort(lambda x, y: cmp(y["date"], x["date"]))
      return userMessages
Exemple #8
0
    def _getUserMessages(self,
                         repos,
                         includeSuccess,
                         includeFailure,
                         earliest_date,
                         latest_date):

        user_root = self.app.userdb.get_user_root(self.app.currentuser.username)
        user_root_b = encode_s(user_root)

        repoErrors = []
        allBackups = []
        for repo in repos:
            # Get binary representation of the repo
            repo_b = encode_s(repo) if isinstance(repo, unicode) else repo
            repo_b = repo_b.lstrip(b"/")
            try:
                repo_obj = librdiff.RdiffRepo(user_root_b, repo_b)
                backups = repo_obj.get_history_entries(-1, earliest_date,
                                                       latest_date)
                allBackups += [{"repo_path": repo_obj.path,
                                "repo_name": repo_obj.display_name,
                                "date": backup.date,
                                "size": backup.size,
                                "errors": backup.errors} for backup in backups]
            except librdiff.FileError as e:
                repoErrors.append(
                    {"repo_path": repo_b,
                     "repo_name": decode_s(repo_b, 'replace'),
                     "error": unicode(e)})

        allBackups.sort(lambda x, y: cmp(y["date"], x["date"]))
        failedBackups = filter(lambda x: x["errors"], allBackups)

        # group successful backups by day
        successfulBackups = filter(lambda x: not x["errors"], allBackups)
        if successfulBackups:
            lastSuccessDate = successfulBackups[0]["date"]
        successfulBackups = rdw_helpers.groupby(
            successfulBackups, lambda x: x["date"].getLocalDaysSinceEpoch())

        userMessages = []

        # generate failure messages
        if includeFailure:
            for job in failedBackups:
                date = job["date"]
                job.update(
                    {"is_success": False,
                     "date": date,
                     "repoErrors": [],
                     "backups": [],
                     "repo_path": job["repo_path"],
                     "repo_name": job["repo_name"]})
                userMessages.append(job)

        # generate success messages (publish date is most recent backup date)
        if includeSuccess:
            for day in successfulBackups.keys():
                date = successfulBackups[day][0]["date"]

                # include repository errors in most recent entry
                if date == lastSuccessDate:
                    repoErrorsForMsg = repoErrors
                else:
                    repoErrorsForMsg = []

                userMessages.append(
                    {"is_success": True,
                     "date": date,
                     "repoErrors": repoErrorsForMsg,
                     "backups": successfulBackups[day]})

        # sort messages by date
        userMessages.sort(lambda x, y: cmp(y["date"], x["date"]))
        return userMessages
      for repo in repos:
         try:
            backups = librdiff.getBackupHistoryForDateRange(rdw_helpers.joinPaths(userRoot, repo), earliestDate, latestDate);
            allBackups += [{"repo": repo, "date": backup.date, "displayDate": backup.date.getDisplayString(),
               "size": rdw_helpers.formatFileSizeStr(backup.size), "errors": backup.errors} for backup in backups]
         except librdiff.FileError, error:
            repoErrors.append({"repo": repo, "error": error.getErrorString()})

      allBackups.sort(lambda x, y: cmp(y["date"], x["date"]))
      failedBackups = filter(lambda x: x["errors"], allBackups)

      # group successful backups by day
      successfulBackups = filter(lambda x: not x["errors"], allBackups)
      if successfulBackups:
         lastSuccessDate = successfulBackups[0]["date"]
      successfulBackups = rdw_helpers.groupby(successfulBackups, lambda x: x["date"].getLocalDaysSinceEpoch())

      userMessages = []

      # generate failure messages
      if includeFailure:
         for job in failedBackups:
            date = job["date"]
            title = "Backup Failed: " + job["repo"]
            job.update({"isSuccess": False, "date": date, "pubDate": date.getRSSPubDateString(),
               "link": self._buildStatusEntryUrl(job["repo"], date), "title": title, "repoErrors": [], "backups": []})
            userMessages.append(job)

      # generate success messages (publish date is most recent backup date)
      if includeSuccess:
         for day in successfulBackups.keys():