コード例 #1
0
ファイル: replicator.py プロジェクト: remixtj/swift
    def get_worker_args(self, once=False, **kwargs):
        if self.replicator_workers < 1:
            return []

        override_opts = parse_override_options(once=once, **kwargs)
        have_overrides = bool(override_opts.devices or override_opts.partitions
                              or override_opts.policies)

        # save this off for ring-change detection later in is_healthy()
        self.all_local_devices = self.get_local_devices()

        if override_opts.devices:
            devices_to_replicate = [
                d for d in override_opts.devices if d in self.all_local_devices
            ]
        else:
            # The sort isn't strictly necessary since we're just trying to
            # spread devices around evenly, but it makes testing easier.
            devices_to_replicate = sorted(self.all_local_devices)

        # Distribute devices among workers as evenly as possible
        self.replicator_workers = min(self.replicator_workers,
                                      len(devices_to_replicate))
        return [{
            'override_devices': devs,
            'override_partitions': override_opts.partitions,
            'override_policies': override_opts.policies,
            'have_overrides': have_overrides,
            'multiprocess_worker_index': index
        } for index, devs in enumerate(
            distribute_evenly(devices_to_replicate, self.replicator_workers))]
コード例 #2
0
ファイル: replicator.py プロジェクト: mahak/swift
    def get_worker_args(self, once=False, **kwargs):
        if self.replicator_workers < 1:
            return []

        override_opts = parse_override_options(once=once, **kwargs)
        have_overrides = bool(override_opts.devices or override_opts.partitions
                              or override_opts.policies)

        # save this off for ring-change detection later in is_healthy()
        self.all_local_devices = self.get_local_devices()

        if override_opts.devices:
            devices_to_replicate = [
                d for d in override_opts.devices
                if d in self.all_local_devices]
        else:
            # The sort isn't strictly necessary since we're just trying to
            # spread devices around evenly, but it makes testing easier.
            devices_to_replicate = sorted(self.all_local_devices)

        # Distribute devices among workers as evenly as possible
        self.replicator_workers = min(self.replicator_workers,
                                      len(devices_to_replicate))
        return [{'override_devices': devs,
                 'override_partitions': override_opts.partitions,
                 'override_policies': override_opts.policies,
                 'have_overrides': have_overrides,
                 'multiprocess_worker_index': index}
                for index, devs in enumerate(
                    distribute_evenly(devices_to_replicate,
                                      self.replicator_workers))]
コード例 #3
0
ファイル: replicator.py プロジェクト: remixtj/swift
    def run_once(self,
                 multiprocess_worker_index=None,
                 have_overrides=False,
                 *args,
                 **kwargs):
        if multiprocess_worker_index is not None:
            self.is_multiprocess_worker = True
            self._emplace_log_prefix(multiprocess_worker_index)

        rsync_reaper = eventlet.spawn(self._child_process_reaper)
        self._zero_stats()
        self.logger.info(_("Running object replicator in script mode."))

        override_opts = parse_override_options(once=True, **kwargs)
        devices = override_opts.devices or None
        partitions = override_opts.partitions or None
        policies = override_opts.policies or None

        start_time = time.time()
        self.replicate(override_devices=devices,
                       override_partitions=partitions,
                       override_policies=policies,
                       start_time=start_time)
        end_time = time.time()
        total = (end_time - start_time) / 60
        self.logger.info(
            _("Object replication complete (once). (%.02f minutes)"), total)

        # If we've been manually run on a subset of
        # policies/devices/partitions, then our recon stats are not
        # representative of how replication is doing, so we don't publish
        # them.
        if self.is_multiprocess_worker:
            # The main process checked for overrides and determined that
            # there were none
            should_update_recon = not have_overrides
        else:
            # We are single-process, so update recon only if we worked on
            # everything
            should_update_recon = not (partitions or devices or policies)
        if should_update_recon:
            self.update_recon(total, end_time, devices)

        # Give rsync processes one last chance to exit, then bail out and
        # let them be init's problem
        self._child_process_reaper_queue.put(None)
        rsync_reaper.wait()
コード例 #4
0
ファイル: replicator.py プロジェクト: mahak/swift
    def run_once(self, multiprocess_worker_index=None,
                 have_overrides=False, *args, **kwargs):
        if multiprocess_worker_index is not None:
            self.is_multiprocess_worker = True
            self._emplace_log_prefix(multiprocess_worker_index)

        rsync_reaper = eventlet.spawn(self._child_process_reaper)
        self._zero_stats()
        self.logger.info(_("Running object replicator in script mode."))

        override_opts = parse_override_options(once=True, **kwargs)
        devices = override_opts.devices or None
        partitions = override_opts.partitions or None
        policies = override_opts.policies or None

        start_time = time.time()
        self.replicate(
            override_devices=devices,
            override_partitions=partitions,
            override_policies=policies,
            start_time=start_time)
        end_time = time.time()
        total = (end_time - start_time) / 60
        self.logger.info(
            _("Object replication complete (once). (%.02f minutes)"), total)

        # If we've been manually run on a subset of
        # policies/devices/partitions, then our recon stats are not
        # representative of how replication is doing, so we don't publish
        # them.
        if self.is_multiprocess_worker:
            # The main process checked for overrides and determined that
            # there were none
            should_update_recon = not have_overrides
        else:
            # We are single-process, so update recon only if we worked on
            # everything
            should_update_recon = not (partitions or devices or policies)
        if should_update_recon:
            self.update_recon(total, end_time, devices)

        # Give rsync processes one last chance to exit, then bail out and
        # let them be init's problem
        self._child_process_reaper_queue.put(None)
        rsync_reaper.wait()
コード例 #5
0
ファイル: db_replicator.py プロジェクト: Igorlcr/swift-2
    def run_once(self, *args, **kwargs):
        """Run a replication pass once."""
        override_options = parse_override_options(once=True, **kwargs)

        devices_to_replicate = override_options.devices or Everything()
        partitions_to_replicate = override_options.partitions or Everything()

        self._zero_stats()
        dirs = []
        ips = whataremyips(self.bind_ip)
        if not ips:
            self.logger.error(_('ERROR Failed to get my own IPs?'))
            return

        if self.handoffs_only:
            self.logger.warning(
                'Starting replication pass with handoffs_only enabled. '
                'This mode is not intended for normal '
                'operation; use handoffs_only with care.')

        self._local_device_ids = set()
        found_local = False
        for node in self.ring.devs:
            if node and is_local_device(ips, self.port, node['replication_ip'],
                                        node['replication_port']):
                found_local = True
                try:
                    dev_path = check_drive(self.root, node['device'],
                                           self.mount_check)
                except ValueError as err:
                    self._add_failure_stats([
                        (failure_dev['replication_ip'], failure_dev['device'])
                        for failure_dev in self.ring.devs if failure_dev
                    ])
                    self.logger.warning('Skipping: %s', err)
                    continue
                if node['device'] not in devices_to_replicate:
                    self.logger.debug(
                        'Skipping device %s due to given arguments',
                        node['device'])
                    continue
                unlink_older_than(os.path.join(dev_path, 'tmp'),
                                  time.time() - self.reclaim_age)
                datadir = os.path.join(self.root, node['device'], self.datadir)
                if os.path.isdir(datadir):
                    self._local_device_ids.add(node['id'])
                    part_filt = self._partition_dir_filter(
                        node['id'], partitions_to_replicate)
                    dirs.append((datadir, node['id'], part_filt))
        if not found_local:
            self.logger.error(
                "Can't find itself %s with port %s in ring "
                "file, not replicating", ", ".join(ips), self.port)
        self.logger.info(_('Beginning replication run'))
        for part, object_file, node_id in self.roundrobin_datadirs(dirs):
            self.cpool.spawn_n(self._replicate_object, part, object_file,
                               node_id)
        self.cpool.waitall()
        self.logger.info(_('Replication run OVER'))
        if self.handoffs_only:
            self.logger.warning(
                'Finished replication pass with handoffs_only enabled. '
                'If handoffs_only is no longer required, disable it.')
        self._report_stats()
コード例 #6
0
ファイル: db_replicator.py プロジェクト: jgmerritt/swift
    def run_once(self, *args, **kwargs):
        """Run a replication pass once."""
        override_options = parse_override_options(once=True, **kwargs)

        devices_to_replicate = override_options.devices or Everything()
        partitions_to_replicate = override_options.partitions or Everything()

        self._zero_stats()
        dirs = []
        ips = whataremyips(self.bind_ip)
        if not ips:
            self.logger.error(_('ERROR Failed to get my own IPs?'))
            return

        if self.handoffs_only:
            self.logger.warning(
                'Starting replication pass with handoffs_only enabled. '
                'This mode is not intended for normal '
                'operation; use handoffs_only with care.')

        self._local_device_ids = set()
        found_local = False
        for node in self.ring.devs:
            if node and is_local_device(ips, self.port,
                                        node['replication_ip'],
                                        node['replication_port']):
                found_local = True
                try:
                    dev_path = check_drive(self.root, node['device'],
                                           self.mount_check)
                except ValueError as err:
                    self._add_failure_stats(
                        [(failure_dev['replication_ip'],
                          failure_dev['device'])
                         for failure_dev in self.ring.devs if failure_dev])
                    self.logger.warning('Skipping: %s', err)
                    continue
                if node['device'] not in devices_to_replicate:
                    self.logger.debug(
                        'Skipping device %s due to given arguments',
                        node['device'])
                    continue
                unlink_older_than(
                    os.path.join(dev_path, 'tmp'),
                    time.time() - self.reclaim_age)
                datadir = os.path.join(self.root, node['device'], self.datadir)
                if os.path.isdir(datadir):
                    self._local_device_ids.add(node['id'])
                    part_filt = self._partition_dir_filter(
                        node['id'], partitions_to_replicate)
                    dirs.append((datadir, node['id'], part_filt))
        if not found_local:
            self.logger.error("Can't find itself %s with port %s in ring "
                              "file, not replicating",
                              ", ".join(ips), self.port)
        self.logger.info(_('Beginning replication run'))
        for part, object_file, node_id in self.roundrobin_datadirs(dirs):
            self.cpool.spawn_n(
                self._replicate_object, part, object_file, node_id)
        self.cpool.waitall()
        self.logger.info(_('Replication run OVER'))
        if self.handoffs_only:
            self.logger.warning(
                'Finished replication pass with handoffs_only enabled. '
                'If handoffs_only is no longer required, disable it.')
        self._report_stats()