示例#1
0
    def run(self, args, manage_args):
        environment = get_environment(args.env_name)
        celery_processes = environment.app_processes_config.celery_processes
        by_queue = defaultdict(lambda: {'num_workers': 0, 'concurrency': 0, 'pooling': set(), 'worker_hosts': set()})
        for host, queues in celery_processes.items():
            for queue_name, options in queues.items():
                queue = by_queue[queue_name]
                queue['num_workers'] += options.num_workers
                queue['concurrency'] += options.concurrency * options.num_workers
                queue['pooling'].add(options.pooling)
                queue['worker_hosts'].add(host)

        headers = ['Pooling', 'Worker Queues', 'Processes', 'Concurrency', 'Avg Concurrency per worker']
        if args.show_workers:
            headers.append('Worker Hosts')
        rows = []
        for queue_name, stats in sorted(by_queue.items(), key=itemgetter(0)):
            workers = stats['num_workers']
            concurrency_ = stats['concurrency']
            row = [list(stats['pooling'])[0], '`{}`'.format(queue_name), workers, concurrency_, concurrency_ // workers]
            if args.show_workers:
                worker_hosts = stats['worker_hosts']
                row.append(','.join(sorted([get_machine_alias(environment, worker_host) for worker_host in worker_hosts])))
            rows.append(row)

        print_table(headers, rows, args.csv)
示例#2
0
def test_get_machine_alias():
    env = get_environment('small_cluster')

    all_hosts = set(itertools.chain.from_iterable(env.groups.values()))
    assert_equal(all_hosts, {'172.19.3.0', '172.19.3.1', '172.19.3.2', '172.19.3.3'})
    aliases = set([get_machine_alias(env, host) for host in all_hosts])
    assert_equal(aliases, {'demo_server0', 'demo_server1', 'demo_server2', 'demo_server3'})
示例#3
0
def get_couch_config(environment, nodes=None, port=15984, local_port=15986):
    couch_nodes = nodes or environment.groups['couchdb2']
    config = Config(
        control_node_ip=couch_nodes[0],
        control_node_port=port,
        control_node_local_port=local_port,
        username=environment.get_secret('COUCH_USERNAME'),
        aliases={
            'couchdb@{}'.format(node): get_machine_alias(environment, node) for node in couch_nodes
        }
    )
    config.set_password(environment.get_secret('COUCH_PASSWORD'))
    return config
示例#4
0
def get_couch_config(environment, nodes=None):
    couch_nodes = nodes or environment.groups['couchdb2']
    config = Config(control_node_ip=couch_nodes[0],
                    control_node_port=15984,
                    control_node_local_port=15986,
                    username=environment.get_vault_var(
                        'localsettings_private.COUCH_USERNAME'),
                    aliases={
                        'couchdb@{}'.format(node):
                        get_machine_alias(environment, node)
                        for node in couch_nodes
                    })
    config.set_password(
        environment.get_vault_var('localsettings_private.COUCH_PASSWORD'))
    return config
示例#5
0
def get_couch_config(environment, nodes=None, port=15984, local_port=15986, couchdb_version=None):
    couch_nodes = nodes or environment.groups['couchdb2']
    if couchdb_version is None:
        couchdb_version = environment.public_vars.get('couchdb_version', '2.3.1')
    if LooseVersion(couchdb_version) >= LooseVersion('3.0.0'):
        local_port = port
    config = Config(
        control_node_ip=couch_nodes[0],
        control_node_port=port,
        control_node_local_port=local_port,
        couchdb_version=couchdb_version,
        username=environment.get_secret('COUCH_USERNAME'),
        aliases={
            'couchdb@{}'.format(node): get_machine_alias(environment, node) for node in couch_nodes
        }
    )
    config.set_password(environment.get_secret('COUCH_PASSWORD'))
    return config
示例#6
0
    def run(self, args, manage_args):
        environment = get_environment(args.env_name)
        celery_processes = environment.app_processes_config.celery_processes
        by_queue = defaultdict(
            lambda: {
                'num_workers': 0,
                'concurrency': 0,
                'pooling': set(),
                'worker_hosts': set()
            })
        for host, queues in celery_processes.items():
            for queue_name, options in queues.items():
                queue = by_queue[queue_name]
                queue['num_workers'] += options.num_workers
                queue[
                    'concurrency'] += options.concurrency * options.num_workers
                queue['pooling'].add(options.pooling)
                queue['worker_hosts'].add(host)

        max_name_len = max([len(name) for name in by_queue])
        template = "{{:<8}} | {{:<{}}} | {{:<12}} | {{:<12}} | {{:<12}} | {{:<12}}".format(
            max_name_len + 2)
        print(
            template.format('Pooling', 'Worker Queues', 'Processes',
                            'Concurrency', 'Avg Concurrency per worker',
                            'Worker Hosts'))
        print(
            template.format('-------', '-------------', '---------',
                            '-----------', '--------------------------',
                            '------------'))
        for queue_name, stats in sorted(by_queue.items(), key=itemgetter(0)):
            workers = stats['num_workers']
            concurrency_ = stats['concurrency']
            worker_hosts = stats['worker_hosts']
            print(
                template.format(
                    list(stats['pooling'])[0], '`{}`'.format(queue_name),
                    workers, concurrency_, concurrency_ // workers, ','.join(
                        sorted([
                            get_machine_alias(environment, worker_host)
                            for worker_host in worker_hosts
                        ]))))