Esempio n. 1
0
def __install_cstar_perf_tool(cluster_name, hosts, mount_host_src=False, first_cassandra_node=None):
    first_node = hosts.values()[0]
    other_nodes = hosts.values()[1:]

    if first_cassandra_node is None:
        # If a first cluster node was not explicitly set, assume we
        # mean the second node of the cluster, unless it's a single
        # node cluster, then it's node 0.
        if len(hosts) > 1:
            first_cassandra_node = 1
        else:
            first_cassandra_node = 0
            
    # Create the cluster config file
    cluster_config = {
        "block_devices": [],
        "blockdev_readahead": None,
        "hosts": {
            host : {
                "hostname": host,
                "internal_ip": ip,
                "external_ip": ip,
                "seed": True,
                "datacenter": 'dc1'
            } for host, ip in hosts.items()[first_cassandra_node:]
        },
        "name": cluster_name,
        "stress_node": first_node,
        "user":"******",
        "data_file_directories": ['/data/cstar_perf/data'],
        "commitlog_directory": '/data/cstar_perf/commitlog',
        "saved_caches_directory": '/data/cstar_perf/saved_caches',
        'cdc_directory': '/data/cstar_perf/cdc',
        'cdc_overflow_directory': '/data/cstar_perf/cdc_overflow',
        "docker": True,
        "hints_directory": "/data/cstar_perf/hints"
    }
    
    with fab.settings(hosts=first_node):
        fab_execute(fab_deploy.copy_cluster_config, cluster_config)

    # Setup ~/fab directory (java, ant, stress, etc) on the first node
    with fab.settings(hosts=first_node):
        fab_execute(fab_deploy.setup_fab_dir)
        # Install cstar_perf
        fab_execute(fab_deploy.install_cstar_perf_tool)
        # Install cstar_perf.frontend
        fab_execute(fab_deploy.install_cstar_perf_frontend)
    # rsync ~/fab to the other nodes:
    if len(other_nodes) > 0:
        with fab.settings(hosts=other_nodes):
            fab_execute(fab_deploy.copy_fab_dir, first_node)
Esempio n. 2
0
def enable_dse(cluster_name, dse_url, dse_username, dse_password):

    try:
        cluster = get_clusters(cluster_name, all_metadata=True)[cluster_name][0]
    except IndexError:
        raise ValueError("No cluster named {} found".format(cluster_name))

    cluster_ip = cluster['NetworkSettings']['IPAddress']
    with fab.settings(hosts=cluster_ip):
        fab_execute(fab_deploy.enable_dse, dse_url, dse_username, dse_password)

    with fab.settings(hosts=cluster_ip, user="******"):
        fab_execute(tasks.restart_all_services)
Esempio n. 3
0
def enable_dse(cluster_name, dse_url, dse_username, dse_password):

    try:
        cluster = get_clusters(cluster_name, all_metadata=True)[cluster_name][0]
    except IndexError:
        raise ValueError("No cluster named {} found".format(cluster_name))

    cluster_ip = cluster['NetworkSettings']['IPAddress']
    with fab.settings(hosts=cluster_ip):
        fab_execute(fab_deploy.enable_dse, dse_url, dse_username, dse_password)

    with fab.settings(hosts=cluster_ip, user="******"):
        fab_execute(tasks.restart_all_services)
Esempio n. 4
0
def execute(commands):
    results = []

    try:
        commands_to_run = [(v, [], {}, [], [], []) for v in commands]

        for name, args, kwargs, arg_hosts, arg_roles, arg_exclude_hosts \
            in commands_to_run:
            results.append(fab_execute(name, hosts=arg_hosts, roles=arg_roles,
                                       exclude_hosts=arg_exclude_hosts, *args,
                                       **kwargs))
            pass
    except SystemExit:  # a number of internal functions might raise this one.
        raise
    except KeyboardInterrupt:
        if output.status:
            sys.stderr.write("\nStopped.\n")
        sys.exit(1)
    except:
        sys.excepthook(*sys.exc_info())
        # we might leave stale threads if we don't explicitly exit()
        sys.exit(1)
    finally:
        disconnect_all()

    return results
Esempio n. 5
0
def __update_node_ip_addresses(cluster_name, static_ips=None):
    """Update node ip addresses

    This is necessary because docker assigns new IP addresses each time a container is restarted

    if static_ips is provided, interpret as a dictionary mapping hosts to ips.
    """
    # Retrieve the current ~/.cstar_perf/cluster_config.json on node 00:
    clusters = get_clusters(cluster_name, all_metadata=True)
    cluster = clusters[cluster_name]
    current_ips = dict([(c['Name'], c['NetworkSettings']['IPAddress']) for c in cluster])
    if static_ips:
        updated_ips = static_ips
    else:
        updated_ips = current_ips
    node0 = cluster[0]['Name']
    with fab.settings(hosts=current_ips[node0]):
        def get_cluster_config():
            cfg = StringIO()
            fab.get("~/.cstar_perf/cluster_config.json", cfg)
            cfg.seek(0)
            return json.load(cfg)
        cluster_config = fab_execute(get_cluster_config).values()[0]

    # Update cluster_config with the current node IP addresses:
    for host, cfg in cluster_config['hosts'].items():
        cluster_config['hosts'][host]['internal_ip'] = cluster_config['hosts'][host]['external_ip'] = updated_ips[host]

    cluster_config['stress_node'] = updated_ips[node0]

    # Replace the config file onto node 0:
    with fab.settings(hosts=cluster[0]['NetworkSettings']['IPAddress']):
        def put_cluster_config():
            cfg = StringIO()
            json.dump(cluster_config, cfg, indent=2)
            fab.put(cfg, "~/.cstar_perf/cluster_config.json")
        fab_execute(put_cluster_config)

    # Update all /etc/hosts file with latest ips
    hosts = []
    clusters = get_clusters('all', all_metadata=True)
    for cluster_name in clusters.keys():
        hosts.extend(get_ips(cluster_name))
    with fab.settings(hosts=[ip for host, ip in hosts], user="******"):
        fab_execute(tasks.add_or_update_host_ips, hosts)
        fab_execute(tasks.restart_all_services)
Esempio n. 6
0
def __update_node_ip_addresses(cluster_name, static_ips=None):
    """Update node ip addresses

    This is necessary because docker assigns new IP addresses each time a container is restarted

    if static_ips is provided, interpret as a dictionary mapping hosts to ips.
    """
    # Retrieve the current ~/.cstar_perf/cluster_config.json on node 00:
    clusters = get_clusters(cluster_name, all_metadata=True)
    cluster = clusters[cluster_name]
    current_ips = dict([(c['Name'], c['NetworkSettings']['IPAddress']) for c in cluster])
    if static_ips:
        updated_ips = static_ips
    else:
        updated_ips = current_ips
    node0 = cluster[0]['Name']
    with fab.settings(hosts=current_ips[node0]):
        def get_cluster_config():
            cfg = StringIO()
            fab.get("~/.cstar_perf/cluster_config.json", cfg)
            cfg.seek(0)
            return json.load(cfg)
        cluster_config = fab_execute(get_cluster_config).values()[0]

    # Update cluster_config with the current node IP addresses:
    for host, cfg in cluster_config['hosts'].items():
        cluster_config['hosts'][host]['internal_ip'] = cluster_config['hosts'][host]['external_ip'] = updated_ips[host]

    cluster_config['stress_node'] = updated_ips[node0]

    # Replace the config file onto node 0:
    with fab.settings(hosts=cluster[0]['NetworkSettings']['IPAddress']):
        def put_cluster_config():
            cfg = StringIO()
            json.dump(cluster_config, cfg, indent=2)
            fab.put(cfg, "~/.cstar_perf/cluster_config.json")
        fab_execute(put_cluster_config)

    # Update all /etc/hosts file with latest ips
    hosts = []
    clusters = get_clusters('all', all_metadata=True)
    for cluster_name in clusters.keys():
        hosts.extend(get_ips(cluster_name))
    with fab.settings(hosts=[ip for host, ip in hosts], user="******"):
        fab_execute(tasks.add_or_update_host_ips, hosts)
        fab_execute(tasks.restart_all_services)
Esempio n. 7
0
def associate(frontend_name, cluster_names, with_dse=False):

    try:
        frontend = get_clusters(frontend_name, all_metadata=True)[frontend_name][0]
    except IndexError:
        raise ValueError("No frontend cluster named {} found".format(frontend_name))

    clusters = []
    for c in cluster_names:
        try:
            cluster = get_clusters(c, all_metadata=True)[c][0]
        except IndexError:
            raise ValueError("No cluster named {} found".format(c))
        clusters.append(cluster)

    frontend_ip = frontend['NetworkSettings']['IPAddress']

    # Configure the client credentials on all clusters
    with fab.settings(hosts=frontend_ip):
        frontend_credentials = fab_execute(fab_deploy.get_frontend_credentials).values()[0]

    for cluster in clusters:
        cluster = cluster
        cluster_name = cluster['Config']['Labels']['cluster_name']
        nodes = get_clusters(c)[cluster_name][1:]
        cluster_ip = cluster['NetworkSettings']['IPAddress']
        with fab.settings(hosts=cluster_ip):
            fab_execute(fab_deploy.generate_client_credentials, cluster_name,
                        frontend_credentials['public_key'],
                        frontend_credentials['verify_code'])
            # Get the cluster credentials and jvms list
            cluster_credentials = fab_execute(fab_deploy.get_client_credentials).values()[0]
            jvms = fab_execute(fab_deploy.get_client_jvms).values()[0]

        # Link the cluster to the frontend
        with fab.settings(hosts=frontend_ip):
            fab_execute(fab_deploy.add_cluster_to_frontend, cluster_name, nodes,
                        cluster_credentials['public_key'])
            for jvm in jvms:
                fab_execute(fab_deploy.add_jvm_to_cluster, cluster_name, jvm)

            if with_dse:
                fab_execute(fab_deploy.add_product_to_cluster, cluster_name, 'dse')

        with fab.settings(hosts=cluster_ip, user="******"):
            fab_execute(tasks.setup_client_daemon, frontend['Name'])
            fab_execute(tasks.add_or_update_host_ips, ((frontend['Name'], frontend_ip),))
Esempio n. 8
0
def __install_cstar_perf_frontend(cluster_name, hosts, mount_host_src=False):
    assert len(hosts) == 1, "Cannot install frontend onto more than one node"
    host, ip = hosts.popitem()
    with fab.settings(hosts=ip):
        # Setup cstar_perf.tool, not normally needed on the frontend, but we'll use it to
        # easily bootstrap the frontend's C* backend:
        fab_execute(fab_deploy.setup_fab_dir)
        __install_cstar_perf_tool(cluster_name, {host:ip}, mount_host_src=mount_host_src, first_cassandra_node=0)        
        # Setup C* and add it to the supervisor to start on boot:
        def setup_cassandra():
            __update_node_ip_addresses(cluster_name, static_ips={host:'127.0.0.1'})
            fab.run("cstar_perf_bootstrap -v cassandra-2.1.8")
        with fab.settings(hosts=ip):
            fab_execute(setup_cassandra)
        def setup_boot_items():
            boot_items = "\n".join([
                '',
                '[program:cassandra]',
                'command=/home/cstar/fab/cassandra/bin/cassandra -f',
                'priority=1',
                'user=cstar',
                'autostart=true',
                'autorestart=false',
                'redirect_stderr=true',
                '',
                '[program:cstar_perf_notifications]',
                'command=cstar_perf_notifications -F',
                'priority=1',
                'user=cstar',
                'autostart=true',
                'autorestart=true',
                'startretries=30',
                'redirect_stderr=true',
                '',
                '[program:cstar_perf_server]',
                'command=cstar_perf_server',
                'priority=2',
                'user=cstar',
                'environment=HOME=/home/cstar',
                'autostart=true',
                'startretries=30',
                'autorestart=true',
                'redirect_stderr=true',
                ''
            ])
            fab_append("/supervisord.conf", boot_items)
        with fab.settings(hosts=ip, user="******"):
            fab_execute(setup_boot_items)

        # Install the frontend as well as Cassandra to hold the frontend DB
        fab_execute(fab_deploy.install_cstar_perf_frontend)

        # Generate and save the credentials
        with fab.settings(hosts=ip):
            fab_execute(fab_deploy.generate_frontend_credentials)

        # Restart the container so all the auto boot stuff is applied:
        subprocess.call(shlex.split("docker restart {}".format(host)))

        # Post Restart setup
        frontend_name, frontend_ip = get_ips(cluster_name)[0]
        with fab.settings(hosts=frontend_ip):
            fab_execute(fab_deploy.create_default_frontend_users)

        log.info("cstar_perf service started, opening in your browser: http://localhost:8000")
        webbrowser.open("http://*****:*****@example.com and password: admin")
        log.info("You will need to use the 'cstar_docker associate' command to link up a cluster")
Esempio n. 9
0
def launch(num_nodes, cluster_name='cnode', destroy_existing=False,
           install_tool=True, frontend=False, mount_host_src=False, verbose=False,
           client_double_duty=False):
    """Launch cluster nodes, return metadata (ip addresses etc) for the nodes"""
    assert num_nodes > 0, "Cannot start a cluster with {} nodes".format(num_nodes)
    if frontend:
        assert num_nodes == 1 and client_double_duty, "Can only start a frontend with a single node"
        
    cluster_type = 'frontend' if frontend else 'cluster'
        
    try:
        get_container_data(docker_image_name)
    except AssertionError:
        print("The docker image {} was not found, build the docker image first "
              "with: 'cstar_docker build'".format(docker_image_name))
        exit(1)
    check_if_build_necessary()

    existing_nodes = get_clusters(cluster_name)
    if len(existing_nodes):
        if destroy_existing:
            destroy(cluster_name)
        else:
            log.error('Cannot launch cluster \'{}\' as it already exists.'.format(cluster_name))
            log.error('You must destroy the existing cluster, or use --destroy-existing '
                      'in your launch command')
            exit(1)

    first_cassandra_node = 1
    if client_double_duty:
        first_cassandra_node = 0
        log.info('Launching a {} node cluster...'.format(num_nodes))
    else:        
        # We need one more node than requested to run the client
        num_nodes += 1
        log.info('Launching a {} node cluster with a separate client node ...'.format(num_nodes))
    node_data = OrderedDict()
    for i in range(num_nodes):
        node_name = "%s_%02d" % (cluster_name,i)
        ssh_path = os.path.split(get_ssh_key_pair()[0])[0]
        run_cmd = ('docker run --label cstar_node=true --label '
            'cluster_name={cluster_name} --label cluster_type={cluster_type} --label node={node_num} '
            '-d -m {CONTAINER_DEFAULT_MEMORY} --name={node_name} {port_settings} -h {node_name}'.format(
                cluster_name=cluster_name, node_num=i, node_name=node_name, cluster_type=cluster_type,
                CONTAINER_DEFAULT_MEMORY=CONTAINER_DEFAULT_MEMORY, ssh_path=ssh_path,
                port_settings="-p 127.0.0.1:8000:8000" if frontend else ""))
        if mount_host_src:
            # Try to find the user's git clone of cstar_perf:
            candidates = [
                # Get the directory relative to this file - only works
                # if user installed in-place (pip install -e)
                os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir)),
                # In the current directory:
                os.getcwd()
            ]
            for d in candidates:
                if os.path.exists(os.path.join(d, '.git')) and \
                   os.path.exists(os.path.join(d, 'tool')) and \
                   os.path.exists(os.path.join(d, 'frontend')):
                    cstar_dir = d
                    break
            else:
                log.error("Could not mount your git checkout of cstar_perf because none could be found. Try installing cstar_perf in developer mode: 'pip install -e ./tool' or try running cstar_docker from the same directory as your checkout")
                exit(1)
            run_cmd = run_cmd + " -v {cstar_dir}:/home/cstar/git/cstar_perf".format(cstar_dir=cstar_dir)
        run_cmd = run_cmd + ' ' + docker_image_name
        log.debug(run_cmd)
        p=subprocess.Popen(shlex.split(run_cmd),
                           stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        container_id = p.communicate()[0].strip()
        node_data[node_name] = get_container_data(container_id)
    hosts = OrderedDict()
    for name, data in node_data.items():
        hosts[name] = data['NetworkSettings']['IPAddress']

    # Write /etc/hosts
    with fab.settings(hosts=[n for n in hosts.values()]):
        fab_execute(fab_deploy.setup_hosts_file, hosts)

    if frontend:
        log.info("Installing cstar_perf.frontend ... ")
        __install_cstar_perf_frontend(cluster_name, hosts, mount_host_src=mount_host_src)
    elif install_tool:
        log.info("Installing cstar_perf.tool ... ")
        __install_cstar_perf_tool(cluster_name, hosts, mount_host_src=mount_host_src,
                                      first_cassandra_node=first_cassandra_node)            
    if verbose:
        print("Started {} nodes:".format(num_nodes))
        print("")
        info(cluster_name)
    return node_data
Esempio n. 10
0
def associate(frontend_name, cluster_names, with_dse=False):

    try:
        frontend = get_clusters(frontend_name, all_metadata=True)[frontend_name][0]
    except IndexError:
        raise ValueError("No frontend cluster named {} found".format(frontend_name))

    clusters = []
    for c in cluster_names:
        try:
            cluster = get_clusters(c, all_metadata=True)[c][0]
        except IndexError:
            raise ValueError("No cluster named {} found".format(c))
        clusters.append(cluster)

    frontend_ip = frontend['NetworkSettings']['IPAddress']

    # Configure the client credentials on all clusters
    with fab.settings(hosts=frontend_ip):
        frontend_credentials = fab_execute(fab_deploy.get_frontend_credentials).values()[0]

    for cluster in clusters:
        cluster = cluster
        cluster_name = cluster['Config']['Labels']['cluster_name']
        nodes = get_clusters(c)[cluster_name][1:]
        cluster_ip = cluster['NetworkSettings']['IPAddress']
        with fab.settings(hosts=cluster_ip):
            fab_execute(fab_deploy.generate_client_credentials, cluster_name,
                        frontend_credentials['public_key'],
                        frontend_credentials['verify_code'])
            # Get the cluster credentials and jvms list
            cluster_credentials = fab_execute(fab_deploy.get_client_credentials).values()[0]
            jvms = fab_execute(fab_deploy.get_client_jvms).values()[0]

        # Link the cluster to the frontend
        with fab.settings(hosts=frontend_ip):
            fab_execute(fab_deploy.add_cluster_to_frontend, cluster_name, nodes,
                        cluster_credentials['public_key'])
            for jvm in jvms:
                fab_execute(fab_deploy.add_jvm_to_cluster, cluster_name, jvm)

            if with_dse:
                fab_execute(fab_deploy.add_product_to_cluster, cluster_name, 'dse')

        with fab.settings(hosts=cluster_ip, user="******"):
            fab_execute(tasks.setup_client_daemon, frontend['Name'])
            fab_execute(tasks.add_or_update_host_ips, ((frontend['Name'], frontend_ip),))
Esempio n. 11
0
def __install_cstar_perf_frontend(cluster_name, hosts, mount_host_src=False):
    assert len(hosts) == 1, "Cannot install frontend onto more than one node"
    host, ip = hosts.popitem()
    with fab.settings(hosts=ip):
        # Setup cstar_perf.tool, not normally needed on the frontend, but we'll use it to
        # easily bootstrap the frontend's C* backend:
        fab_execute(fab_deploy.setup_fab_dir)
        __install_cstar_perf_tool(cluster_name, {host:ip}, mount_host_src=mount_host_src, first_cassandra_node=0)        
        # Setup C* and add it to the supervisor to start on boot:
        def setup_cassandra():
            __update_node_ip_addresses(cluster_name, static_ips={host:'127.0.0.1'})
            fab.run("cstar_perf_bootstrap -v cassandra-2.2.7")
        with fab.settings(hosts=ip):
            fab_execute(setup_cassandra)
        def setup_boot_items():
            boot_items = "\n".join([
                '',
                '[program:cassandra]',
                'command=/home/cstar/fab/cassandra/bin/cassandra -f',
                'priority=1',
                'user=cstar',
                'autostart=true',
                'autorestart=false',
                'redirect_stderr=true',
                '',
                '[program:cstar_perf_notifications]',
                'command=cstar_perf_notifications -F',
                'priority=1',
                'user=cstar',
                'autostart=true',
                'autorestart=true',
                'startretries=30',
                'redirect_stderr=true',
                '',
                '[program:cstar_perf_server]',
                'command=cstar_perf_server',
                'priority=2',
                'user=cstar',
                'environment=HOME=/home/cstar',
                'autostart=true',
                'startretries=30',
                'autorestart=true',
                'redirect_stderr=true',
                ''
            ])
            fab_append("/supervisord.conf", boot_items)
        with fab.settings(hosts=ip, user="******"):
            fab_execute(setup_boot_items)

        # Install the frontend as well as Cassandra to hold the frontend DB
        fab_execute(fab_deploy.install_cstar_perf_frontend)

        # Generate and save the credentials
        with fab.settings(hosts=ip):
            fab_execute(fab_deploy.generate_frontend_credentials)

        # Restart the container so all the auto boot stuff is applied:
        subprocess.call(shlex.split("docker restart {}".format(host)))

        # Post Restart setup
        frontend_name, frontend_ip = get_ips(cluster_name)[0]
        time.sleep(30)  # Creating frontend users failed with 'users table not available' without sleep
        with fab.settings(hosts=frontend_ip):
            fab_execute(fab_deploy.create_default_frontend_users)

        log.info("cstar_perf service started, opening in your browser: http://localhost:8000")
        webbrowser.open("http://*****:*****@example.com and password: admin")
        log.info("You will need to use the 'cstar_docker associate' command to link up a cluster")
Esempio n. 12
0
def launch(num_nodes, cluster_name='cnode', destroy_existing=False,
           install_tool=True, frontend=False, mount_host_src=False, verbose=False,
           client_double_duty=False):
    """Launch cluster nodes, return metadata (ip addresses etc) for the nodes"""
    if '_' in cluster_name:
        raise ValueError('Please use a cluster name without underscores. The cluster name is also used for the hostname and newer docker versions do not support underscores in the hostname!')

    assert num_nodes > 0, "Cannot start a cluster with {} nodes".format(num_nodes)
    if frontend:
        assert num_nodes == 1 and client_double_duty, "Can only start a frontend with a single node"
        
    cluster_type = 'frontend' if frontend else 'cluster'
        
    try:
        get_container_data(docker_image_name)
    except AssertionError:
        print("The docker image {} was not found, build the docker image first "
              "with: 'cstar_docker build'".format(docker_image_name))
        exit(1)
    check_if_build_necessary()

    existing_nodes = get_clusters(cluster_name)
    if len(existing_nodes):
        if destroy_existing:
            destroy(cluster_name)
        else:
            log.error('Cannot launch cluster \'{}\' as it already exists.'.format(cluster_name))
            log.error('You must destroy the existing cluster, or use --destroy-existing '
                      'in your launch command')
            exit(1)

    first_cassandra_node = 1
    if client_double_duty:
        first_cassandra_node = 0
        log.info('Launching a {} node cluster...'.format(num_nodes))
    else:        
        # We need one more node than requested to run the client
        num_nodes += 1
        log.info('Launching a {} node cluster with a separate client node ...'.format(num_nodes))
    node_data = OrderedDict()
    for i in range(num_nodes):
        # newer docker versions don't support underscores in the hostname
        node_name = "%s-%02d" % (cluster_name, i)
        ssh_path = os.path.split(get_ssh_key_pair()[0])[0]
        run_cmd = ('docker run --ulimit memlock=100000000:100000000 --privileged --label cstar_node=true --label '
            'cluster_name={cluster_name} --label cluster_type={cluster_type} --label node={node_num} '
            ' -v /var/run/docker.sock:/var/run/docker.sock -v /usr/bin/docker:/bin/docker '
            '-d -m {CONTAINER_DEFAULT_MEMORY} --name={node_name} {port_settings} -h {node_name}'.format(
                cluster_name=cluster_name, node_num=i, node_name=node_name, cluster_type=cluster_type,
                CONTAINER_DEFAULT_MEMORY=CONTAINER_DEFAULT_MEMORY, ssh_path=ssh_path,
                port_settings="-p 127.0.0.1:8000:8000" if frontend else ""))
        if mount_host_src:
            # Try to find the user's git clone of cstar_perf:
            candidates = [
                # Get the directory relative to this file - only works
                # if user installed in-place (pip install -e)
                os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir)),
                # In the current directory:
                os.getcwd()
            ]
            for d in candidates:
                if os.path.exists(os.path.join(d, '.git')) and \
                   os.path.exists(os.path.join(d, 'tool')) and \
                   os.path.exists(os.path.join(d, 'frontend')):
                    cstar_dir = d
                    break
            else:
                log.error("Could not mount your git checkout of cstar_perf because none could be found. Try installing cstar_perf in developer mode: 'pip install -e ./tool' or try running cstar_docker from the same directory as your checkout")
                exit(1)
            run_cmd = run_cmd + " -v {cstar_dir}:/home/cstar/git/cstar_perf".format(cstar_dir=cstar_dir)
        run_cmd = run_cmd + ' ' + docker_image_name
        log.debug(run_cmd)
        p=subprocess.Popen(shlex.split(run_cmd),
                           stdout=subprocess.PIPE)
        container_id = p.communicate()[0].strip()
        node_data[node_name] = get_container_data(container_id)
    hosts = OrderedDict()
    for name, data in node_data.items():
        hosts[name] = data['NetworkSettings']['IPAddress']

    # Write /etc/hosts
    with fab.settings(hosts=[n for n in hosts.values()]):
        fab_execute(fab_deploy.setup_hosts_file, hosts)

    if frontend:
        log.info("Installing cstar_perf.frontend ... ")
        __install_cstar_perf_frontend(cluster_name, hosts, mount_host_src=mount_host_src)
    elif install_tool:
        log.info("Installing cstar_perf.tool ... ")
        __install_cstar_perf_tool(cluster_name, hosts, mount_host_src=mount_host_src,
                                      first_cassandra_node=first_cassandra_node)            
    if verbose:
        print("Started {} nodes:".format(num_nodes))
        print("")
        info(cluster_name)
    return node_data