예제 #1
0
 def replicate(self, override_devices=[], override_partitions=[]):
     """Run a replication pass"""
     self.start = time.time()
     self.suffix_count = 0
     self.suffix_sync = 0
     self.suffix_hash = 0
     self.replication_count = 0
     self.last_replication_count = -1
     self.partition_times = []
     stats = eventlet.spawn(self.heartbeat)
     lockup_detector = eventlet.spawn(self.detect_lockups)
     eventlet.sleep()  # Give spawns a cycle
     try:
         self.run_pool = GreenPool(size=self.concurrency)
         jobs = self.collect_jobs()
         for job in jobs:
             if override_devices and job['device'] not in override_devices:
                 continue
             if override_partitions and \
                     job['partition'] not in override_partitions:
                 continue
             dev_path = join(self.devices_dir, job['device'])
             if self.mount_check and not os.path.ismount(dev_path):
                 self.logger.warn(_('%s is not mounted'), job['device'])
                 continue
             if not self.check_ring():
                 self.logger.info(_("Ring change detected. Aborting "
                                    "current replication pass."))
                 return
             if job['delete']:
                 self.run_pool.spawn(self.update_deleted, job)
             else:
                 self.run_pool.spawn(self.update, job)
         with Timeout(self.lockup_timeout):
             self.run_pool.waitall()
     except (Exception, Timeout):
         self.logger.exception(_("Exception in top-level replication loop"))
         self.kill_coros()
     finally:
         stats.kill()
         lockup_detector.kill()
         self.stats_line()
예제 #2
0
 def __init__(self, conf, logger=None):
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='replicator')
     self.root = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.bind_ip = conf.get('bind_ip', '0.0.0.0')
     self.port = int(conf.get('bind_port', self.default_port))
     concurrency = int(conf.get('concurrency', 8))
     self.cpool = GreenPool(size=concurrency)
     swift_dir = conf.get('swift_dir', '/etc/swift')
     self.ring = ring.Ring(swift_dir, ring_name=self.server_type)
     self._local_device_ids = set()
     self.per_diff = int(conf.get('per_diff', 1000))
     self.max_diffs = int(conf.get('max_diffs') or 100)
     self.interval = int(
         conf.get('interval') or conf.get('run_pause') or 30)
     if 'run_pause' in conf and 'interval' not in conf:
         self.logger.warning('Option %(type)s-replicator/run_pause '
                             'is deprecated and will be removed in a '
                             'future version. Update your configuration'
                             ' to use option %(type)s-replicator/'
                             'interval.' % {'type': self.server_type})
     self.databases_per_second = int(conf.get('databases_per_second', 50))
     self.node_timeout = float(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.rsync_compress = config_true_value(
         conf.get('rsync_compress', 'no'))
     self.rsync_module = conf.get('rsync_module', '').rstrip('/')
     if not self.rsync_module:
         self.rsync_module = '{replication_ip}::%s' % self.server_type
     self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
     self._zero_stats()
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.recon_replicator = '%s.recon' % self.server_type
     self.rcache = os.path.join(self.recon_cache_path,
                                self.recon_replicator)
     self.extract_device_re = re.compile(
         '%s%s([^%s]+)' % (self.root, os.path.sep, os.path.sep))
     self.handoffs_only = config_true_value(conf.get('handoffs_only', 'no'))
예제 #3
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='replicator')
     self.root = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.port = int(conf.get('bind_port', self.default_port))
     concurrency = int(conf.get('concurrency', 8))
     self.cpool = GreenPool(size=concurrency)
     chase_dir = conf.get('chase_dir', '/etc/chase')
     self.ring = ring.Ring(os.path.join(chase_dir, self.ring_file))
     self.per_diff = int(conf.get('per_diff', 1000))
     self.run_pause = int(conf.get('run_pause', 30))
     self.vm_test_mode = conf.get('vm_test_mode',
                                  'no').lower() in ('yes', 'true', 'on',
                                                    '1')
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
     self._zero_stats()
예제 #4
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='account-reaper')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.interval = int(conf.get('interval', 3600))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.account_ring = None
     self.container_ring = None
     self.object_ring = None
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.myips = whataremyips()
     self.concurrency = int(conf.get('concurrency', 25))
     self.container_concurrency = self.object_concurrency = \
         sqrt(self.concurrency)
     self.container_pool = GreenPool(size=self.container_concurrency)
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
     self.delay_reaping = int(conf.get('delay_reaping') or 0)
예제 #5
0
    def reconstruct(self, **kwargs):
        """Run a reconstruction pass"""
        self._reset_stats()
        self.partition_times = []

        stats = spawn(self.heartbeat)
        lockup_detector = spawn(self.detect_lockups)
        sleep()  # Give spawns a cycle

        try:
            self.run_pool = GreenPool(size=self.concurrency)
            for part_info in self.collect_parts(**kwargs):
                if not self.check_ring(part_info['policy'].object_ring):
                    self.logger.info(
                        _("Ring change detected. Aborting "
                          "current reconstruction pass."))
                    return
                jobs = self.build_reconstruction_jobs(part_info)
                if not jobs:
                    # If this part belongs on this node, _get_part_jobs
                    # will *always* build a sync_job - even if there's
                    # no suffixes in the partition that needs to sync.
                    # If there's any suffixes in the partition then our
                    # job list would have *at least* one revert job.
                    # Therefore we know this part a) doesn't belong on
                    # this node and b) doesn't have any suffixes in it.
                    self.run_pool.spawn(self.delete_partition,
                                        part_info['part_path'])
                for job in jobs:
                    self.run_pool.spawn(self.process_job, job)
            with Timeout(self.lockup_timeout):
                self.run_pool.waitall()
        except (Exception, Timeout):
            self.logger.exception(
                _("Exception in top-level"
                  "reconstruction loop"))
            self.kill_coros()
        finally:
            stats.kill()
            lockup_detector.kill()
            self.stats_line()
예제 #6
0
    def run_server():
        wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
        # Turn off logging requests by the underlying WSGI software.
        wsgi.HttpProtocol.log_request = lambda *a: None
        # Redirect logging other messages by the underlying WSGI software.
        wsgi.HttpProtocol.log_message = \
            lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
        wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60)

        eventlet.hubs.use_hub(get_hub())
        eventlet.patcher.monkey_patch(all=False, socket=True)
        eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no'))
        eventlet.debug.hub_exceptions(eventlet_debug)
        app = loadapp('config:%s' % conf_file,
                      global_conf={'log_name': log_name})
        pool = GreenPool(size=1024)
        try:
            wsgi.server(sock, app, NullLogger(), custom_pool=pool)
        except socket.error, err:
            if err[0] != errno.EINVAL:
                raise
예제 #7
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf)
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.interval = int(conf.get('interval', 3600))
     swift_dir = conf.get('swift_dir', '/etc/swift')
     self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
     self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
     self.object_ring_path = os.path.join(swift_dir, 'object.ring.gz')
     self.account_ring = None
     self.container_ring = None
     self.object_ring = None
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.myips = whataremyips()
     self.concurrency = int(conf.get('concurrency', 25))
     self.container_concurrency = self.object_concurrency = \
         sqrt(self.concurrency)
     self.container_pool = GreenPool(size=self.container_concurrency)
예제 #8
0
    def _autocontainer_loop(self,
                            account,
                            marker=None,
                            limit=None,
                            concurrency=1,
                            **kwargs):
        from functools import partial
        container_marker = self.flatns_manager(marker) if marker else None
        count = 0
        kwargs['pool_manager'] = get_pool_manager(pool_maxsize=concurrency * 2)
        # Start to list contents at 'marker' inside the last visited container
        if container_marker:
            for element in depaginate(
                    self.app.client_manager.storage.object_list,
                    listing_key=lambda x: x['objects'],
                    marker_key=lambda x: x.get('next_marker'),
                    truncated_key=lambda x: x['truncated'],
                    account=account,
                    container=container_marker,
                    marker=marker,
                    **kwargs):
                count += 1
                yield element
                if limit and count >= limit:
                    return

        pool = GreenPool(concurrency)
        for object_list in pool.imap(
                partial(self._list_autocontainer_objects,
                        account=account,
                        **kwargs),
                depaginate(self.app.client_manager.storage.container_list,
                           item_key=lambda x: x[0],
                           account=account,
                           marker=container_marker)):
            for element in object_list:
                count += 1
                yield element
                if limit and count >= limit:
                    return
예제 #9
0
def downloadCutouts(coord_list): 
	'''Downloads cutouts from a list of RA, DEC pairs

	Args:
		coord_list (list): Nx2 list where N is the number
		of object; first column being RA, decond column
		being DEC
	'''
	print "Generating URLs"
	raw_params = (getData(RA, DEC) for RA, DEC in coord_list)
	params = (param for param in raw_params if param is not None)
	print "Downloading"

	if GEVENT: #Did we sucessfully import concurrent downloading library?
		P = GreenPool(GREEN_POOL_SIZE)
		for status, name in P.starmap(getCutout, params):
			print '{:<30}'.format(name), status
	else: #Use the slower default functions instead
		for status, name in itertools.starmap(getCutout, params):
			print '{:<30}'.format(name), status

	print "Done"	
예제 #10
0
    def test_locked_container_dbs(self):
        def run_test(num_locks, catch_503):
            container = 'container-%s' % uuid4()
            client.put_container(self.url, self.token, container)
            # Get the container info into memcache (so no stray
            # get_container_info calls muck up our timings)
            client.get_container(self.url, self.token, container)
            db_files = self.get_container_db_files(container)
            db_conns = []
            for i in range(num_locks):
                db_conn = connect(db_files[i])
                db_conn.execute('begin exclusive transaction')
                db_conns.append(db_conn)
            if catch_503:
                try:
                    client.delete_container(self.url, self.token, container)
                except client.ClientException as err:
                    self.assertEqual(err.http_status, 503)
                else:
                    self.fail("Expected ClientException but didn't get it")
            else:
                client.delete_container(self.url, self.token, container)

        proxy_conf = readconf(self.configs['proxy-server'],
                              section_name='app:proxy-server')
        node_timeout = int(proxy_conf.get('node_timeout', 10))
        pool = GreenPool()
        try:
            with Timeout(node_timeout + 5):
                pool.spawn(run_test, 1, False)
                pool.spawn(run_test, 2, True)
                pool.spawn(run_test, 3, True)
                pool.waitall()
        except Timeout as err:
            raise Exception(
                "The server did not return a 503 on container db locks, "
                "it just hangs: %s" % err)
예제 #11
0
 def __init__(self, conf, logger=None):
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='account-reaper')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.interval = int(conf.get('interval', 3600))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.account_ring = None
     self.container_ring = None
     self.object_ring = None
     self.node_timeout = float(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0'))
     self.bind_port = int(conf.get('bind_port', 6002))
     self.concurrency = int(conf.get('concurrency', 25))
     self.container_concurrency = self.object_concurrency = \
         sqrt(self.concurrency)
     self.container_pool = GreenPool(size=self.container_concurrency)
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
     self.delay_reaping = int(conf.get('delay_reaping') or 0)
     reap_warn_after = float(conf.get('reap_warn_after') or 86400 * 30)
     self.reap_not_done_after = reap_warn_after + self.delay_reaping
     self.start_time = time()
예제 #12
0
    def run(self, *args, **kwargs):
        try:
            self.logger.info('conscience agent: starting')

            pool = GreenPool(len(self.watchers))
            for watcher in self.watchers:
                pool.spawn(watcher.start)

            while True:
                sleep(1)
                for w in self.watchers:
                    if w.failed:
                        self.watchers.remove(w)
                        self.logger.warn('restart watcher "%s"', w.name)
                        new_w = ServiceWatcher(self.conf, w.service)
                        self.watchers.append(new_w)
                        pool.spawn(new_w.start)

        except Exception as e:
            self.logger.error('ERROR in main loop %s', e)
            raise e
        finally:
            self.logger.warn('conscience agent: stopping')
            self.stop_watchers()
예제 #13
0
def direct_delete_container_entry(container_ring,
                                  account_name,
                                  container_name,
                                  object_name,
                                  headers=None):
    """
    Talk directly to the primary container servers to delete a particular
    object listing. Does not talk to object servers; use this only when a
    container entry does not actually have a corresponding object.
    """
    pool = GreenPool()
    part, nodes = container_ring.get_nodes(account_name, container_name)
    for node in nodes:
        pool.spawn_n(direct_delete_container_object,
                     node,
                     part,
                     account_name,
                     container_name,
                     object_name,
                     headers=headers)

    # This either worked or it didn't; if it didn't, we'll retry on the next
    # reconciler loop when we see the queue entry again.
    pool.waitall()
예제 #14
0
파일: replicator.py 프로젝트: remixtj/swift
    def replicate(self,
                  override_devices=None,
                  override_partitions=None,
                  override_policies=None,
                  start_time=None):
        """Run a replication pass"""
        if start_time is None:
            start_time = time.time()
        self.start = start_time
        self.last_replication_count = 0
        self.replication_cycle = (self.replication_cycle + 1) % 10
        self.partition_times = []
        self.my_replication_ips = self._get_my_replication_ips()
        self.all_devs_info = set()
        self.handoffs_remaining = 0

        stats = eventlet.spawn(self.heartbeat)
        eventlet.sleep()  # Give spawns a cycle

        current_nodes = None
        dev_stats = None
        num_jobs = 0
        try:
            self.run_pool = GreenPool(size=self.concurrency)
            jobs = self.collect_jobs(override_devices=override_devices,
                                     override_partitions=override_partitions,
                                     override_policies=override_policies)
            for job in jobs:
                dev_stats = self.stats_for_dev[job['device']]
                num_jobs += 1
                current_nodes = job['nodes']
                dev_path = check_drive(self.devices_dir, job['device'],
                                       self.mount_check)
                if not dev_path:
                    dev_stats.add_failure_stats([
                        (failure_dev['replication_ip'], failure_dev['device'])
                        for failure_dev in job['nodes']
                    ])
                    self.logger.warning(_('%s is not mounted'), job['device'])
                    continue
                if self.handoffs_first and not job['delete']:
                    # in handoffs first mode, we won't process primary
                    # partitions until rebalance was successful!
                    if self.handoffs_remaining:
                        self.logger.warning(
                            _("Handoffs first mode still has handoffs "
                              "remaining.  Aborting current "
                              "replication pass."))
                        break
                if not self.check_ring(job['policy'].object_ring):
                    self.logger.info(
                        _("Ring change detected. Aborting "
                          "current replication pass."))
                    return

                try:
                    if isfile(job['path']):
                        # Clean up any (probably zero-byte) files where a
                        # partition should be.
                        self.logger.warning(
                            'Removing partition directory '
                            'which was a file: %s', job['path'])
                        os.remove(job['path'])
                        continue
                except OSError:
                    continue
                if job['delete']:
                    self.run_pool.spawn(self.update_deleted, job)
                else:
                    self.run_pool.spawn(self.update, job)
            current_nodes = None
            self.run_pool.waitall()
        except (Exception, Timeout) as err:
            if dev_stats:
                if current_nodes:
                    dev_stats.add_failure_stats([
                        (failure_dev['replication_ip'], failure_dev['device'])
                        for failure_dev in current_nodes
                    ])
                else:
                    dev_stats.add_failure_stats(self.all_devs_info)
            self.logger.exception(
                _("Exception in top-level replication loop: %s"), err)
        finally:
            stats.kill()
            self.stats_line()
예제 #15
0
파일: agent.py 프로젝트: ldenel/oio-sds
    def run(self, *args, **kwargs):
        try:
            self.logger.info('event agent: starting')

            pool = GreenPool(len(self.workers))

            for worker in self.workers:
                pool.spawn(worker.start)

            def front(server, backend):
                while True:
                    msg = server.recv_multipart()
                    if validate_msg(msg):
                        try:
                            event_id = sqlite3.Binary(msg[2])
                            data = msg[3]
                            self.queue.put(event_id, data)
                            event = ['', msg[2], msg[3]]
                            backend.send_multipart(event)
                        except Exception:
                            pass
                        finally:
                            ack = msg[0:3]
                            server.send_multipart(ack)

            def back(backend):
                while True:
                    msg = backend.recv_multipart()
                    event_id = msg[1]
                    success = msg[2]
                    event_id = sqlite3.Binary(event_id)
                    if not success:
                        self.queue.failed(event_id)
                    else:
                        self.queue.delete(event_id)

            boss_pool = GreenPool(2)
            boss_pool.spawn_n(front, self.server, self.backend)
            boss_pool.spawn_n(back, self.backend)
            while True:
                sleep(1)

                now = time.time()
                if now - self.last_retry > self.retry_interval:
                    self.retry()
                    self.last_retry = now

                for w in self.workers:
                    if w.failed:
                        self.workers.remove(w)
                        self.logger.warn('restart worker "%s"', w.name)
                        new_w = EventWorker(self.conf, w.name, self.context)
                        self.workers.append(new_w)
                        pool.spawn(new_w.start)

        except Exception as e:
            self.logger.error('ERROR in main loop %s', e)
            raise e
        finally:
            self.logger.warn('event agent: stopping')
            self.stop_workers()
예제 #16
0
import eventlet
from eventlet import GreenPool
from funcs_for_test import last_number_of_factorial, make_3_dim_list
gp = GreenPool()

for i in gp.imap(make_3_dim_list, [500]*10):
    print(i)


예제 #17
0
 def _get_pool(self):
     from eventlet import GreenPool
     return GreenPool()
예제 #18
0
 def __init__(self, socket_name):
     self.server_address = socket_name
     self.zerovm_exename = ['zerovm']
     self.pool = GreenPool()
     self.jobs = set()
     self.stats_dir = '/tmp'
예제 #19
0
def generate_report(conf, policy_name=None):
    global json_output
    json_output = config_true_value(conf.get('dump_json', 'no'))
    if policy_name is None:
        policy = POLICIES.default
    else:
        policy = POLICIES.get_by_name(policy_name)
        if policy is None:
            exit('Unable to find policy: %s' % policy_name)
    if not json_output:
        print('Using storage policy: %s ' % policy.name)

    swift_dir = conf.get('swift_dir', '/etc/swift')
    retries = int(conf.get('retries', 5))
    concurrency = int(conf.get('concurrency', 25))
    endpoint_type = str(conf.get('endpoint_type', 'publicURL'))
    region_name = str(conf.get('region_name', ''))
    container_report = config_true_value(conf.get('container_report', 'yes'))
    object_report = config_true_value(conf.get('object_report', 'yes'))
    if not (object_report or container_report):
        exit("Neither container or object report is set to run")
    user_domain_name = str(conf.get('user_domain_name', ''))
    project_domain_name = str(conf.get('project_domain_name', ''))
    project_name = str(conf.get('project_name', ''))
    insecure = config_true_value(conf.get('keystone_api_insecure', 'no'))

    coropool = GreenPool(size=concurrency)

    os_options = {'endpoint_type': endpoint_type}
    if user_domain_name:
        os_options['user_domain_name'] = user_domain_name
    if project_domain_name:
        os_options['project_domain_name'] = project_domain_name
    if project_name:
        os_options['project_name'] = project_name
    if region_name:
        os_options['region_name'] = region_name

    url, token = get_auth(conf['auth_url'],
                          conf['auth_user'],
                          conf['auth_key'],
                          auth_version=conf.get('auth_version', '1.0'),
                          os_options=os_options,
                          insecure=insecure)
    account = url.rsplit('/', 1)[1]
    connpool = Pool(max_size=concurrency)
    connpool.create = lambda: SimpleClient(
        url=url, token=token, retries=retries)

    container_ring = Ring(swift_dir, ring_name='container')
    object_ring = Ring(swift_dir, ring_name=policy.ring_name)

    output = {}
    if container_report:
        output['container'] = container_dispersion_report(
            coropool, connpool, account, container_ring, retries,
            conf.get('partitions'), policy)
    if object_report:
        output['object'] = object_dispersion_report(coropool, connpool,
                                                    account, object_ring,
                                                    retries,
                                                    conf.get('partitions'),
                                                    policy)

    return output
예제 #20
0
 def __init__(self, settings):
     super(ActiveTasks, self).__init__(settings)
     self.pool = GreenPool()
     self.pointers = list()
     self.update_metadata_enabled = settings.update_metadata
예제 #21
0
 def __init__(self, settings):
     super(NSServer, self).__init__(settings)
     self.pool = GreenPool()
예제 #22
0
    def test_connection(self):
        """
        conn = Connection(
            auth_endpoint="https://identity.api.rackspacecloud.com/v2.0",
            client_id=str(uuid.uuid4()),
            endpoint="http://localhost:8888/v1/12345",
            user="", key="")

        """

        conn = Connection(
            auth_endpoint="https://identity.api.rackspacecloud.com/v2.0",
            client_id=str(uuid.uuid4()),
            endpoint="http://166.78.143.130/v1/12345",
            user="",
            key="")

        conn.connect(token='blah')

        def create_worker(queue_name):
            return conn.create_queue(queue_name)

        def post_worker(queue):
            return queue.post_message('test_message', 10)

        def delete_worker(queue_name):
            conn.delete_queue(queue_name)
            return queue_name

        pool = GreenPool(100)

        def on_message_posted(greenthread):
            msg = greenthread.wait()
            print msg._href

        def on_queue_created(greenthread):
            queue = greenthread.wait()
            print queue.name

            for x in range(0, 10):
                gt = pool.spawn(post_worker, queue)
                gt.link(on_message_posted)

        queue_names = ["queue-" + str(x) for x in xrange(0, 5)]

        for queue_name in queue_names:
            gt = pool.spawn(create_worker, queue_name)
            gt.link(on_queue_created)

        pool.waitall()

        def delete_worker(queue_name):
            conn.delete_queue(queue_name)
            print "Queue:", queue_name, " deleted"

        for queue in conn.get_queues():
            gt = pool.spawn_n(delete_worker, queue.name)

        print "Waiting for everything to finish"
        pool.waitall()
        print "Done"
예제 #23
0
파일: proc.py 프로젝트: webknjaz/detox
 def __init__(self, providerbase):
     self._providerbase = providerbase
     self._spec2thread = {}
     self._pool = GreenPool()
     self._resources = {}
예제 #24
0
    def reap_container(self, account, account_partition, account_nodes,
                       container):
        """
        Deletes the data and the container itself for the given container. This
        will call :func:`reap_object` up to sqrt(self.concurrency) times
        concurrently for the objects in the container.

        If there is any exception while deleting a single object, the process
        will continue for any other objects in the container and the failed
        objects will be tried again the next time this function is called with
        the same parameters.

        If there is any exception while listing the objects for deletion, the
        process will stop (but will obviously be tried again the next time this
        function is called with the same parameters). This is a possibility
        since the listing comes from querying just the primary remote container
        server.

        Once all objects have been attempted to be deleted, the container
        itself will be attempted to be deleted by sending a delete request to
        all container nodes. The format of the delete request is such that each
        container server will update a corresponding account server, removing
        the container from the account's listing.

        This function returns nothing and should raise no exception but only
        update various self.stats_* values for what occurs.

        :param account: The name of the account for the container.
        :param account_partition: The partition for the account on the account
                                  ring.
        :param account_nodes: The primary node dicts for the account.
        :param container: The name of the container to delete.

        * See also: :func:`swift.common.ring.Ring.get_nodes` for a description
          of the account node dicts.
        """
        account_nodes = list(account_nodes)
        part, nodes = self.get_container_ring().get_nodes(account, container)
        node = nodes[-1]
        pool = GreenPool(size=self.object_concurrency)
        marker = ''
        while True:
            objects = None
            try:
                headers, objects = direct_get_container(
                    node, part, account, container,
                    marker=marker,
                    conn_timeout=self.conn_timeout,
                    response_timeout=self.node_timeout)
                self.stats_return_codes[2] = \
                    self.stats_return_codes.get(2, 0) + 1
                self.logger.increment('return_codes.2')
            except ClientException as err:
                if self.logger.getEffectiveLevel() <= DEBUG:
                    self.logger.exception(
                        _('Exception with %(ip)s:%(port)s/%(device)s'), node)
                self.stats_return_codes[err.http_status / 100] = \
                    self.stats_return_codes.get(err.http_status / 100, 0) + 1
                self.logger.increment(
                    'return_codes.%d' % (err.http_status / 100,))
            if not objects:
                break
            try:
                policy_index = headers.get('X-Backend-Storage-Policy-Index', 0)
                for obj in objects:
                    if isinstance(obj['name'], unicode):
                        obj['name'] = obj['name'].encode('utf8')
                    pool.spawn(self.reap_object, account, container, part,
                               nodes, obj['name'], policy_index)
                pool.waitall()
            except (Exception, Timeout):
                self.logger.exception(_('Exception with objects for container '
                                        '%(container)s for account %(account)s'
                                        ),
                                      {'container': container,
                                       'account': account})
            marker = objects[-1]['name']
            if marker == '':
                break
        successes = 0
        failures = 0
        for node in nodes:
            anode = account_nodes.pop()
            try:
                direct_delete_container(
                    node, part, account, container,
                    conn_timeout=self.conn_timeout,
                    response_timeout=self.node_timeout,
                    headers={'X-Account-Host': '%(ip)s:%(port)s' % anode,
                             'X-Account-Partition': str(account_partition),
                             'X-Account-Device': anode['device'],
                             'X-Account-Override-Deleted': 'yes'})
                successes += 1
                self.stats_return_codes[2] = \
                    self.stats_return_codes.get(2, 0) + 1
                self.logger.increment('return_codes.2')
            except ClientException as err:
                if self.logger.getEffectiveLevel() <= DEBUG:
                    self.logger.exception(
                        _('Exception with %(ip)s:%(port)s/%(device)s'), node)
                failures += 1
                self.logger.increment('containers_failures')
                self.stats_return_codes[err.http_status / 100] = \
                    self.stats_return_codes.get(err.http_status / 100, 0) + 1
                self.logger.increment(
                    'return_codes.%d' % (err.http_status / 100,))
        if successes > failures:
            self.stats_containers_deleted += 1
            self.logger.increment('containers_deleted')
        elif not successes:
            self.stats_containers_remaining += 1
            self.logger.increment('containers_remaining')
        else:
            self.stats_containers_possibly_remaining += 1
            self.logger.increment('containers_possibly_remaining')
예제 #25
0
    "run a function, print results"
    print msg, '<request>'
    res = func(*args)
    print msg, '<response>', res


if __name__ == '__main__':
    #from netcall import setup_logger
    #setup_logger()

    # Custom serializer/deserializer functions can be passed in. The server
    # side ones must match.
    echo = GreenRPCClient(green_env='eventlet', serializer=JSONSerializer())
    echo.connect('tcp://127.0.0.1:5555')

    tasks = GreenPool()
    spawn = tasks.spawn

    spawn(printer, "[echo] Echoing \"Hi there\"", echo.echo, "Hi there")

    try:
        print "Testing a remote exception...",
        echo.error()
        print "FAIL, no remote exception!"
    except RemoteRPCError, e:
        print "OK, got an expected remote exception:"
        #print e.ename
        print e.evalue
        print e.traceback

    try:
예제 #26
0
파일: replicator.py 프로젝트: niekun/swift
    def replicate(self,
                  override_devices=None,
                  override_partitions=None,
                  override_policies=None):
        """Run a replication pass"""
        self.start = time.time()
        self.suffix_count = 0
        self.suffix_sync = 0
        self.suffix_hash = 0
        self.replication_count = 0
        self.last_replication_count = -1
        self.replication_cycle = (self.replication_cycle + 1) % 10
        self.partition_times = []
        self.my_replication_ips = self._get_my_replication_ips()
        self.all_devs_info = set()
        self.handoffs_remaining = 0

        stats = eventlet.spawn(self.heartbeat)
        lockup_detector = eventlet.spawn(self.detect_lockups)
        eventlet.sleep()  # Give spawns a cycle

        current_nodes = None
        try:
            self.run_pool = GreenPool(size=self.concurrency)
            jobs = self.collect_jobs(override_devices=override_devices,
                                     override_partitions=override_partitions,
                                     override_policies=override_policies)
            for job in jobs:
                current_nodes = job['nodes']
                if override_devices and job['device'] not in override_devices:
                    continue
                if override_partitions and \
                        job['partition'] not in override_partitions:
                    continue
                dev_path = join(self.devices_dir, job['device'])
                if self.mount_check and not ismount(dev_path):
                    self._add_failure_stats([(failure_dev['replication_ip'],
                                              failure_dev['device'])
                                             for failure_dev in job['nodes']])
                    self.logger.warning(_('%s is not mounted'), job['device'])
                    continue
                if self.handoffs_first and not job['delete']:
                    # in handoffs first mode, we won't process primary
                    # partitions until rebalance was successful!
                    if self.handoffs_remaining:
                        self.logger.warning(
                            _("Handoffs first mode still has handoffs "
                              "remaining.  Aborting current "
                              "replication pass."))
                        break
                if not self.check_ring(job['policy'].object_ring):
                    self.logger.info(
                        _("Ring change detected. Aborting "
                          "current replication pass."))
                    return
                try:
                    if isfile(job['path']):
                        # Clean up any (probably zero-byte) files where a
                        # partition should be.
                        self.logger.warning(
                            'Removing partition directory '
                            'which was a file: %s', job['path'])
                        os.remove(job['path'])
                        continue
                except OSError:
                    continue
                if job['delete']:
                    self.run_pool.spawn(self.update_deleted, job)
                else:
                    self.run_pool.spawn(self.update, job)
            current_nodes = None
            with Timeout(self.lockup_timeout):
                self.run_pool.waitall()
        except (Exception, Timeout):
            if current_nodes:
                self._add_failure_stats([(failure_dev['replication_ip'],
                                          failure_dev['device'])
                                         for failure_dev in current_nodes])
            else:
                self._add_failure_stats(self.all_devs_info)
            self.logger.exception(_("Exception in top-level replication loop"))
            self.kill_coros()
        finally:
            stats.kill()
            lockup_detector.kill()
            self.stats_line()
            self.stats['attempted'] = self.replication_count
예제 #27
0
'''


class CountThread(threading.Thread):
    def run(self):
        count()


print "running count() as two threads"
c1 = CountThread()
c2 = CountThread()
start_time = datetime.datetime.now()
c1.start()
c2.start()
c1.join()
c2.join()
end_time = datetime.datetime.now()
print end_time - start_time
'''
running count() in a green threading manner
'''
print "running count() as two green threads"

start_time = datetime.datetime.now()

pool = GreenPool()
pool.spawn(count())
pool.spawn(count())

end_time = datetime.datetime.now()
print end_time - start_time
예제 #28
0
    def test_connection_pooling(self):
        with patch('swift.common.memcached.socket') as mock_module:

            def mock_getaddrinfo(host,
                                 port,
                                 family=socket.AF_INET,
                                 socktype=socket.SOCK_STREAM,
                                 proto=0,
                                 flags=0):
                return [(family, socktype, proto, '', (host, port))]

            mock_module.getaddrinfo = mock_getaddrinfo

            # patch socket, stub socket.socket, mock sock
            mock_sock = mock_module.socket.return_value

            # track clients waiting for connections
            connected = []
            connections = Queue()
            errors = []

            def wait_connect(addr):
                connected.append(addr)
                sleep(0.1)  # yield
                val = connections.get()
                if val is not None:
                    errors.append(val)

            mock_sock.connect = wait_connect

            memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
                                                     connect_timeout=10)
            # sanity
            self.assertEqual(1, len(memcache_client._client_cache))
            for server, pool in memcache_client._client_cache.items():
                self.assertEqual(2, pool.max_size)

            # make 10 requests "at the same time"
            p = GreenPool()
            for i in range(10):
                p.spawn(memcache_client.set, 'key', 'value')
            for i in range(3):
                sleep(0.1)
                self.assertEqual(2, len(connected))

            # give out a connection
            connections.put(None)

            # at this point, only one connection should have actually been
            # created, the other is in the creation step, and the rest of the
            # clients are not attempting to connect. we let this play out a
            # bit to verify.
            for i in range(3):
                sleep(0.1)
                self.assertEqual(2, len(connected))

            # finish up, this allows the final connection to be created, so
            # that all the other clients can use the two existing connections
            # and no others will be created.
            connections.put(None)
            connections.put('nono')
            self.assertEqual(2, len(connected))
            p.waitall()
            self.assertEqual(2, len(connected))
            self.assertEqual(0, len(errors),
                             "A client was allowed a third connection")
            connections.get_nowait()
            self.assertTrue(connections.empty())
예제 #29
0
    def reap_container(self, account, account_partition, account_nodes,
                       container):
        """
        Deletes the data and the container itself for the given container. This
        will call :func:`reap_object` up to sqrt(self.concurrency) times
        concurrently for the objects in the container.

        If there is any exception while deleting a single object, the process
        will continue for any other objects in the container and the failed
        objects will be tried again the next time this function is called with
        the same parameters.

        If there is any exception while listing the objects for deletion, the
        process will stop (but will obviously be tried again the next time this
        function is called with the same parameters). This is a possibility
        since the listing comes from querying just the primary remote container
        server.

        Once all objects have been attempted to be deleted, the container
        itself will be attempted to be deleted by sending a delete request to
        all container nodes. The format of the delete request is such that each
        container server will update a corresponding account server, removing
        the container from the account's listing.

        This function returns nothing and should raise no exception but only
        update various self.stats_* values for what occurs.

        :param account: The name of the account for the container.
        :param account_partition: The partition for the account on the account
                                  ring.
        :param account_nodes: The primary node dicts for the account.
        :param container: The name of the container to delete.

        * See also: :func:`swift.common.ring.Ring.get_nodes` for a description
          of the account node dicts.
        """
        account_nodes = list(account_nodes)
        part, nodes = self.get_container_ring().get_nodes(account, container)
        node = nodes[-1]
        pool = GreenPool(size=self.object_concurrency)
        marker = ''
        while True:
            objects = None
            try:
                objects = direct_get_container(
                    node,
                    part,
                    account,
                    container,
                    marker=marker,
                    conn_timeout=self.conn_timeout,
                    response_timeout=self.node_timeout)[1]
                self.stats_return_codes[2] = \
                    self.stats_return_codes.get(2, 0) + 1
            except ClientException, err:
                if self.logger.getEffectiveLevel() <= DEBUG:
                    self.logger.exception(
                        _('Exception with %(ip)s:%(port)s/%(device)s'), node)
                self.stats_return_codes[err.http_status / 100] = \
                    self.stats_return_codes.get(err.http_status / 100, 0) + 1
            if not objects:
                break
            try:
                for obj in objects:
                    if isinstance(obj['name'], unicode):
                        obj['name'] = obj['name'].encode('utf8')
                    pool.spawn(self.reap_object, account, container, part,
                               nodes, obj['name'])
                pool.waitall()
            except Exception:
                self.logger.exception(
                    _('Exception with objects for container '
                      '%(container)s for account %(account)s'), {
                          'container': container,
                          'account': account
                      })
            marker = objects[-1]['name']