Beispiel #1
0
def replicate(ctx, args):
    master = Node.from_uri(args.master)
    if not master.is_master():
        ctx.abort('Node {} is not a master.'.format(args.master))
    cluster = Cluster.from_node(master)
    cluster.add_node(args.slave)
    slave = Node.from_uri(args.slave)
    try:
        slave.replicate(master.name)
    except redis.ResponseError as e:
        ctx.abort(str(e))
    cluster.wait()
Beispiel #2
0
def add_multi(ctx, args):
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    if len(args.masters) > len(cluster.nodes):
        ctx.abort('Length of new node list should be less than length of '
                  'cluster master nodes.')
    if not cluster.healthy():
        ctx.abort(
            'Cluster not healthy. Run "redis-clu fix {}" first'.format(
                args.cluster))
    masters = filter(lambda n: n.is_master(), cluster.nodes)
    residual_count = len(masters) % len(args.masters)
    if residual_count:
        for i in range(len(args.masters) - residual_count):
            masters.append(MockNode())

    for master in args.masters:
        cluster.add_node(master)

    shard_ratio = len(masters) / len(args.masters)

    shuffle(masters)

    sub_clusters = []
    while masters:
        sub_nodes = masters[:shard_ratio]
        new_master = args.masters.pop()
        nodes = filter(lambda n: not isinstance(n, MockNode), sub_nodes)
        nodes.append(Node.from_uri(new_master))
        hash_slots = len(list(itertools.chain(
            *[n.slots for n in nodes])))
        sub_cluster = Cluster(nodes, hash_slots=hash_slots,
                              parent_nodes=cluster.nodes)
        sub_cluster.set_key_migration_count(int(args.keyMigrationCount))
        sub_clusters.append(sub_cluster)
        for sn in sub_nodes:
            masters.pop(masters.index(sn))

    future_to_args = dict()
    executor = concurrent.futures.ThreadPoolExecutor(
        max_workers=len(sub_clusters))

    for sub_cluster in sub_clusters:
        args = ()
        future = executor.submit(sub_cluster.reshard)
        future_to_args.setdefault(future, args)

    concurrent.futures.wait(future_to_args)
    executor.shutdown(wait=False)
    time.sleep(1)
    cluster.wait()

    for sub_cluster in sub_clusters:
        sub_cluster.print_attempts()
Beispiel #3
0
def replicate(ctx, args):
    master = Node.from_uri(args.master)
    if not master.is_master():
        ctx.abort('Node {} is not a master.'.format(args.master))
    cluster = Cluster.from_node(master)
    cluster.add_node(args.slave)
    slave = Node.from_uri(args.slave)
    try:
        slave.replicate(master.name)
    except redis.ResponseError as e:
        ctx.abort(str(e))
    cluster.wait()
Beispiel #4
0
def remove(ctx, args):
    '''
    remove node from cluster
    '''
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    if not cluster.healthy():
        ctx.abort('Cluster not healthy. Run "redis-clu fix {}" first'.format(
            args.cluster))
    cluster.set_key_migration_count(int(args.keyMigrationCount))
    cluster.remove_node(Node.from_uri(args.node))
    cluster.wait()
    cluster.print_attempts()
Beispiel #5
0
def add_multi(ctx, args):
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    if len(args.masters) > len(cluster.nodes):
        ctx.abort('Length of new node list should be less than length of '
                  'cluster master nodes.')
    if not cluster.healthy():
        ctx.abort('Cluster not healthy. Run "redis-clu fix {}" first'.format(
            args.cluster))
    masters = filter(lambda n: n.is_master(), cluster.nodes)
    residual_count = len(masters) % len(args.masters)
    if residual_count:
        for i in range(len(args.masters) - residual_count):
            masters.append(MockNode())

    for master in args.masters:
        cluster.add_node(master)

    shard_ratio = len(masters) / len(args.masters)

    shuffle(masters)

    sub_clusters = []
    while masters:
        sub_nodes = masters[:shard_ratio]
        new_master = args.masters.pop()
        nodes = filter(lambda n: not isinstance(n, MockNode), sub_nodes)
        nodes.append(Node.from_uri(new_master))
        hash_slots = len(list(itertools.chain(*[n.slots for n in nodes])))
        sub_cluster = Cluster(nodes,
                              hash_slots=hash_slots,
                              parent_nodes=cluster.nodes)
        sub_cluster.set_key_migration_count(int(args.keyMigrationCount))
        sub_clusters.append(sub_cluster)
        for sn in sub_nodes:
            masters.pop(masters.index(sn))

    future_to_args = dict()
    executor = concurrent.futures.ThreadPoolExecutor(
        max_workers=len(sub_clusters))

    for sub_cluster in sub_clusters:
        args = ()
        future = executor.submit(sub_cluster.reshard)
        future_to_args.setdefault(future, args)

    concurrent.futures.wait(future_to_args)
    executor.shutdown(wait=False)
    time.sleep(1)
    cluster.wait()

    for sub_cluster in sub_clusters:
        sub_cluster.print_attempts()
Beispiel #6
0
def remove(ctx, args):
    '''
    remove node from cluster
    '''
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    if not cluster.healthy():
        ctx.abort(
            'Cluster not healthy. Run "redis-clu fix {}" first'.format(
                args.cluster))
    cluster.set_key_migration_count(int(args.keyMigrationCount))
    cluster.remove_node(Node.from_uri(args.node))
    cluster.wait()
    cluster.print_attempts()
Beispiel #7
0
def status(args):
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    dis = []
    for n in cluster.masters:
        slaves = ','.join([s['addr'] for s in n.slaves(n.name)])
        msg = '{} {}:{} {} {}'.format(n.name, n.host, n.port, len(n.slots),
                                      slaves)
        dis.append(msg)
    echo('\n'.join(dis))
    echo('Masters:', len(cluster.masters))
    echo('Slaves:', len(cluster.nodes) - len(cluster.masters))
    covered_slots = sum(len(n.slots) for n in cluster.masters)
    echo('Covered Slots:', covered_slots)
    if covered_slots == cluster.CLUSTER_HASH_SLOTS:
        echo('Cluster is healthy!')
    else:
        echo('!!!Cluster is not healthy!!!')
        echo('Either there is no cluster or exists cluster is not healthy.')
        echo('If there is no cluster then run "redis-clu create <master_list>" or')
        echo('"redis-clu fix {}" would be great if there is exists cluster!'.format(args.cluster))

    echo('\n')

    for master in cluster.masters:
        echo(master)
        echo('===========================')
        echo(master.execute_command('info', 'keyspace'))
        echo('\n')
Beispiel #8
0
def status(args):
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    dis = []
    for n in cluster.masters:
        slaves = ','.join([s['addr'] for s in n.slaves(n.name)])
        msg = '{} {}:{} {} {}'.format(n.name, n.host, n.port, len(n.slots),
                                      slaves)
        dis.append(msg)
    echo('\n'.join(dis))
    echo('Masters:', len(cluster.masters))
    echo('Slaves:', len(cluster.nodes) - len(cluster.masters))
    covered_slots = sum(len(n.slots) for n in cluster.masters)
    echo('Covered Slots:', covered_slots)
    if covered_slots == cluster.CLUSTER_HASH_SLOTS:
        echo('Cluster is healthy!')
    else:
        echo('!!!Cluster is not healthy!!!')
        echo('Either there is no cluster or exists cluster is not healthy.')
        echo(
            'If there is no cluster then run "redis-clu create <master_list>" or'
        )
        echo('"redis-clu fix {}" would be great if there is exists cluster!'.
             format(args.cluster))

    echo('\n')

    for master in cluster.masters:
        echo(master)
        echo('===========================')
        echo(master.execute_command('info', 'keyspace'))
        echo('\n')
Beispiel #9
0
 def __init__(self, masters):
     master_candidates = [Node.from_uri(i) for i in masters]
     self.master_candidates = [
         MasterCandidate(i) for i in master_candidates
     ]
     self.masters = []
     self.cluster = None
Beispiel #10
0
def fix(args):
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    cluster.fix_open_slots()
    if int(args.force) == 1:
        cluster.bind_slots_force()
    else:
        cluster.fill_slots()
    cluster.wait()
    cluster.print_attempts()
Beispiel #11
0
def fix(args):
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    cluster.fix_open_slots()
    if int(args.force) == 1:
        cluster.bind_slots_force()
    else:
        cluster.fill_slots()
    cluster.wait()
    cluster.print_attempts()
Beispiel #12
0
def add(ctx, args):
    """
    add master node to cluster
    """
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    if not cluster.healthy():
        ctx.abort('Cluster not healthy. Run "redis-clu fix {}" first'.format(
            args.cluster))
    cluster.set_key_migration_count(int(args.keyMigrationCount))
    cluster.add_node(args.master)
    cluster.reshard()
    cluster.wait()
    cluster.print_attempts()
Beispiel #13
0
def add(ctx, args):
    """
    add master node to cluster
    """
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    if not cluster.healthy():
        ctx.abort(
            'Cluster not healthy. Run "redis-clu fix {}" first'.format(
                args.cluster))
    cluster.set_key_migration_count(int(args.keyMigrationCount))
    cluster.add_node(args.master)
    cluster.reshard()
    cluster.wait()
    cluster.print_attempts()
Beispiel #14
0
def reset(args):
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    future_to_args = dict()
    executor = concurrent.futures.ThreadPoolExecutor(
        max_workers=len(cluster.masters))

    for master in cluster.masters:
        f_args = ()
        future = executor.submit(master.flushall)
        future_to_args.setdefault(future, f_args)

    concurrent.futures.wait(future_to_args)
    executor.shutdown(wait=False)

    if int(args.hard) == 1:
        for node in cluster.nodes:
            node.reset(hard=True)
Beispiel #15
0
def reset(args):
    cluster = Cluster.from_node(Node.from_uri(args.cluster))
    future_to_args = dict()
    executor = concurrent.futures.ThreadPoolExecutor(
        max_workers=len(cluster.masters))

    for master in cluster.masters:
        f_args = ()
        future = executor.submit(master.flushall)
        future_to_args.setdefault(future, f_args)

    concurrent.futures.wait(future_to_args)
    executor.shutdown(wait=False)

    if int(args.hard) == 1:
        for node in cluster.nodes:
            node.reset(hard=True)
Beispiel #16
0
 def __init__(self, masters):
     master_candidates = [Node.from_uri(i) for i in masters]
     self.master_candidates = [MasterCandidate(i) for i in
                               master_candidates]
     self.masters = []
     self.cluster = None