Exemple #1
0
 def stats_line(self):
     """
     Logs various stats for the currently running replication pass.
     """
     if self.replication_count:
         elapsed = (time.time() - self.start) or 0.000001
         rate = self.replication_count / elapsed
         self.logger.info(_("%(replicated)d/%(total)d (%(percentage).2f%%)"
             " partitions replicated in %(time).2fs (%(rate).2f/sec, "
             "%(remaining)s remaining)"),
             {'replicated': self.replication_count, 'total': self.job_count,
              'percentage': self.replication_count * 100.0 / self.job_count,
              'time': time.time() - self.start, 'rate': rate,
              'remaining': '%d%s' % compute_eta(self.start,
                        self.replication_count, self.job_count)})
         if self.suffix_count:
             self.logger.info(_("%(checked)d suffixes checked - "
                 "%(hashed).2f%% hashed, %(synced).2f%% synced"),
                 {'checked': self.suffix_count,
                  'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
                  'synced': (self.suffix_sync * 100.0) / self.suffix_count})
             self.partition_times.sort()
             self.logger.info(_("Partition times: max %(max).4fs, "
                 "min %(min).4fs, med %(med).4fs"),
                 {'max': self.partition_times[-1],
                  'min': self.partition_times[0],
                  'med': self.partition_times[
                             len(self.partition_times) // 2]})
     else:
         self.logger.info(_("Nothing replicated for %s seconds."),
             (time.time() - self.start))
Exemple #2
0
 def stats_line(self):
     """
     Logs various stats for the currently running replication pass.
     """
     if self.replication_count:
         rate = self.replication_count / (time.time() - self.start)
         self.logger.info(_("%(replicated)d/%(total)d (%(percentage).2f%%)"
             " partitions replicated in %(time).2fs (%(rate).2f/sec, "
             "%(remaining)s remaining)"),
             {'replicated': self.replication_count, 'total': self.job_count,
              'percentage': self.replication_count * 100.0 / self.job_count,
              'time': time.time() - self.start, 'rate': rate,
              'remaining': '%d%s' % compute_eta(self.start,
                        self.replication_count, self.job_count)})
         if self.suffix_count:
             self.logger.info(_("%(checked)d suffixes checked - "
                 "%(hashed).2f%% hashed, %(synced).2f%% synced"),
                 {'checked': self.suffix_count,
                  'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
                  'synced': (self.suffix_sync * 100.0) / self.suffix_count})
             self.partition_times.sort()
             self.logger.info(_("Partition times: max %(max).4fs, "
                 "min %(min).4fs, med %(med).4fs"),
                 {'max': self.partition_times[-1],
                  'min': self.partition_times[0],
                  'med': self.partition_times[
                             len(self.partition_times) // 2]})
     else:
         self.logger.info(_("Nothing replicated for %s seconds."),
             (time.time() - self.start))
Exemple #3
0
    def stats_line(self):
        """
        Logs various stats for the currently running replication pass.
        """
        stats = self.total_stats
        replication_count = stats.attempted
        if replication_count > self.last_replication_count:
            self.last_replication_count = replication_count
            elapsed = (time.time() - self.start) or 0.000001
            rate = replication_count / elapsed
            self.logger.info(
                _("%(replicated)d/%(total)d (%(percentage).2f%%)"
                  " partitions replicated in %(time).2fs (%(rate).2f/sec, "
                  "%(remaining)s remaining)"),
                {
                    'replicated':
                    replication_count,
                    'total':
                    self.job_count,
                    'percentage':
                    replication_count * 100.0 / self.job_count,
                    'time':
                    time.time() - self.start,
                    'rate':
                    rate,
                    'remaining':
                    '%d%s' %
                    compute_eta(self.start, replication_count, self.job_count)
                })
            self.logger.info(
                _('%(success)s successes, %(failure)s failures') %
                dict(success=stats.success, failure=stats.failure))

            if stats.suffix_count:
                self.logger.info(
                    _("%(checked)d suffixes checked - "
                      "%(hashed).2f%% hashed, %(synced).2f%% synced"), {
                          'checked': stats.suffix_count,
                          'hashed':
                          (stats.suffix_hash * 100.0) / stats.suffix_count,
                          'synced':
                          (stats.suffix_sync * 100.0) / stats.suffix_count
                      })
                self.partition_times.sort()
                self.logger.info(
                    _("Partition times: max %(max).4fs, "
                      "min %(min).4fs, med %(med).4fs"), {
                          'max': self.partition_times[-1],
                          'min': self.partition_times[0],
                          'med':
                          self.partition_times[len(self.partition_times) // 2]
                      })
        else:
            self.logger.info(_("Nothing replicated for %s seconds."),
                             (time.time() - self.start))
    def stats_line(self):
        """
        Logs various stats for the currently running reconstruction pass.
        """
        if (self.device_count and self.part_count and
                self.reconstruction_device_count):
            elapsed = (time.time() - self.start) or 0.000001
            rate = self.reconstruction_part_count / elapsed
            total_part_count = (self.part_count *
                                self.device_count /
                                self.reconstruction_device_count)
            self.logger.info(
                _("%(reconstructed)d/%(total)d (%(percentage).2f%%)"
                  " partitions of %(device)d/%(dtotal)d "
                  "(%(dpercentage).2f%%) devices"
                  " reconstructed in %(time).2fs "
                  "(%(rate).2f/sec, %(remaining)s remaining)"),
                {'reconstructed': self.reconstruction_part_count,
                 'total': self.part_count,
                 'percentage':
                 self.reconstruction_part_count * 100.0 / self.part_count,
                 'device': self.reconstruction_device_count,
                 'dtotal': self.device_count,
                 'dpercentage':
                 self.reconstruction_device_count * 100.0 / self.device_count,
                 'time': time.time() - self.start, 'rate': rate,
                 'remaining': '%d%s' %
                 compute_eta(self.start,
                             self.reconstruction_part_count,
                             total_part_count)})

            if self.suffix_count and self.partition_times:
                self.logger.info(
                    _("%(checked)d suffixes checked - "
                      "%(hashed).2f%% hashed, %(synced).2f%% synced"),
                    {'checked': self.suffix_count,
                     'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
                     'synced': (self.suffix_sync * 100.0) / self.suffix_count})
                self.partition_times.sort()
                self.logger.info(
                    _("Partition times: max %(max).4fs, "
                      "min %(min).4fs, med %(med).4fs"),
                    {'max': self.partition_times[-1],
                     'min': self.partition_times[0],
                     'med': self.partition_times[
                         len(self.partition_times) // 2]})
        else:
            self.logger.info(
                _("Nothing reconstructed for %s seconds."),
                (time.time() - self.start))
Exemple #5
0
 def direct(obj, part, nodes):
     found_count = 0
     for node in nodes:
         error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
         try:
             attempts, _junk = direct_client.retry(
                 direct_client.direct_head_object,
                 node,
                 part,
                 account,
                 container,
                 obj,
                 error_log=error_log,
                 retries=retries,
                 headers=headers)
             retries_done[0] += attempts - 1
             found_count += 1
         except ClientException as err:
             if err.http_status not in (404, 507):
                 error_log('Giving up on /%s/%s/%s/%s: %s' %
                           (part, account, container, obj, err))
         except (Exception, Timeout) as err:
             error_log('Giving up on /%s/%s/%s/%s: %s' %
                       (part, account, container, obj, err))
     if output_missing_partitions and \
             found_count < len(nodes):
         missing = len(nodes) - found_count
         print('\r\x1B[K', end='')
         stdout.flush()
         print('# Object partition %s missing %s cop%s' %
               (part, missing, 'y' if missing == 1 else 'ies'),
               file=stderr)
     object_copies_found[0] += found_count
     object_copies_missing[len(nodes) - found_count] += 1
     objects_queried[0] += 1
     if time() >= next_report[0]:
         next_report[0] = time() + 5
         eta, eta_unit = compute_eta(begun, objects_queried[0],
                                     objects_listed)
         if not json_output:
             print('\r\x1B[KQuerying objects: %d of %d, %d%s left, %d '
                   'retries' % (objects_queried[0], objects_listed,
                                round(eta), eta_unit, retries_done[0]),
                   end='')
         stdout.flush()
Exemple #6
0
 def stats_line(self):
     """
     Logs various stats for the currently running replication pass.
     """
     if self.replication_count:
         elapsed = (time.time() - self.start) or 0.000001
         rate = self.replication_count / elapsed
         self.logger.info(
             _(
                 "%(replicated)d/%(total)d (%(percentage).2f%%)"
                 " partitions replicated in %(time).2fs (%(rate).2f/sec, "
                 "%(remaining)s remaining)"
             ),
             {
                 "replicated": self.replication_count,
                 "total": self.job_count,
                 "percentage": self.replication_count * 100.0 / self.job_count,
                 "time": time.time() - self.start,
                 "rate": rate,
                 "remaining": "%d%s" % compute_eta(self.start, self.replication_count, self.job_count),
             },
         )
         if self.suffix_count:
             self.logger.info(
                 _("%(checked)d suffixes checked - " "%(hashed).2f%% hashed, %(synced).2f%% synced"),
                 {
                     "checked": self.suffix_count,
                     "hashed": (self.suffix_hash * 100.0) / self.suffix_count,
                     "synced": (self.suffix_sync * 100.0) / self.suffix_count,
                 },
             )
             self.partition_times.sort()
             self.logger.info(
                 _("Partition times: max %(max).4fs, " "min %(min).4fs, med %(med).4fs"),
                 {
                     "max": self.partition_times[-1],
                     "min": self.partition_times[0],
                     "med": self.partition_times[len(self.partition_times) // 2],
                 },
             )
     else:
         self.logger.info(_("Nothing replicated for %s seconds."), (time.time() - self.start))
 def direct(obj, part, nodes):
     found_count = 0
     for node in nodes:
         error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
         try:
             attempts, _junk = direct_client.retry(
                 direct_client.direct_head_object, node, part, account,
                 container, obj, error_log=error_log, retries=retries,
                 headers=headers)
             retries_done[0] += attempts - 1
             found_count += 1
         except ClientException as err:
             if err.http_status not in (404, 507):
                 error_log('Giving up on /%s/%s/%s/%s: %s' % (part, account,
                           container, obj, err))
         except (Exception, Timeout) as err:
             error_log('Giving up on /%s/%s/%s/%s: %s' % (part, account,
                       container, obj, err))
     if output_missing_partitions and \
             found_count < len(nodes):
         missing = len(nodes) - found_count
         print('\r\x1B[K', end='')
         stdout.flush()
         print('# Object partition %s missing %s cop%s' % (
             part, missing, 'y' if missing == 1 else 'ies'), file=stderr)
     object_copies_found[0] += found_count
     object_copies_missing[len(nodes) - found_count] += 1
     objects_queried[0] += 1
     if time() >= next_report[0]:
         next_report[0] = time() + 5
         eta, eta_unit = compute_eta(begun, objects_queried[0],
                                     objects_listed)
         if not json_output:
             print('\r\x1B[KQuerying objects: %d of %d, %d%s left, %d '
                   'retries' % (objects_queried[0], objects_listed,
                                round(eta), eta_unit, retries_done[0]),
                   end='')
         stdout.flush()