コード例 #1
0
def check_cluster_status(cluster_ID, redis_client):
    provider = redis_client.hget("cluster:%d" % cluster_ID, "provider")
    zone = redis_client.hget("cluster:%d" % cluster_ID, "zone")
    cluster_status = get_cluster_status(provider, cluster_ID, zone)

    num_masters = len(cluster_status["online_masters"])
    num_masters_booting = len(cluster_status["booting_masters"])
    master = cluster_status["master"]
    connectable = cluster_status["master_connectable"]
    num_slaves = len(cluster_status["online_slaves"])
    num_slaves_booting = len(cluster_status["booting_slaves"])
    if num_masters == 1:
        print "Master online"
    else:
        assert num_masters == 0, "Cannot have multiple masters online but "\
            "found %d" % num_masters

        if num_masters_booting == 1:
            print "Master booting"
        else:
            assert num_masters_booting == 0,\
                "Cannot have multiple masters booting but found %d"\
                % num_masters_booting
            print "Master offline"

    if connectable:
        print "Master connectable"
    else:
        print "Master not connectable"

    print "%d slaves online" % num_slaves
    print "%d slaves booting" % num_slaves_booting

    if master != None:
        print ""
        print "Cluster status page: %s:4280" % master[1]
        print "Master internal address: %s" % master[0]

    return 0
コード例 #2
0
def list_clusters():
    global redis_client, jinja_env, cluster_info
    print "Listing clusters"

    clusters = redis_client.smembers("clusters")
    clusters = map(lambda x: int(x), clusters)
    cluster_info = {}
    for cluster in clusters:
        info = redis_client.hgetall("cluster:%s" % cluster)
        cluster_info[cluster] = info
        provider = info["provider"]
        zone = info["zone"]
        cluster_status = get_cluster_status(provider, cluster, zone)
        for key, value in cluster_status.iteritems():
            cluster_info[cluster][key] = value

        master_status = "Offline"
        master_connectible = False
        if cluster_status["num_online_masters"] == 1:
            master_status = "Online"
        else:
            if cluster_status["num_booting_masters"] == 1:
                master_status = "Booting"

        cluster_info[cluster]["master_status"] = master_status

        master = cluster_info[cluster]["master"]
        if master != None:
            # Use external address.
            cluster_info[cluster]["master_address"] = master[1]
        else:
            cluster_info[cluster]["master_address"] = None

    template = jinja_env.get_template("display_clusters.jinja2")
    return template.render(clusters=clusters,
                           cluster_info=cluster_info,
                           now=time.asctime())
コード例 #3
0
def list_clusters():
    global redis_client, jinja_env, cluster_info
    print "Listing clusters"

    clusters = redis_client.smembers("clusters")
    clusters = map(lambda x : int(x), clusters)
    cluster_info = {}
    for cluster in clusters:
        info = redis_client.hgetall("cluster:%s" % cluster)
        cluster_info[cluster] = info
        provider = info["provider"]
        zone = info["zone"]
        cluster_status = get_cluster_status(provider, cluster, zone)
        for key, value in cluster_status.iteritems():
            cluster_info[cluster][key] = value

        master_status = "Offline"
        master_connectible = False
        if cluster_status["num_online_masters"] == 1:
            master_status = "Online"
        else:
            if cluster_status["num_booting_masters"] == 1:
                master_status = "Booting"

        cluster_info[cluster]["master_status"] = master_status

        master = cluster_info[cluster]["master"]
        if master != None:
            # Use external address.
            cluster_info[cluster]["master_address"] = master[1]
        else:
            cluster_info[cluster]["master_address"] = None

    template = jinja_env.get_template("display_clusters.jinja2")
    return template.render(
        clusters = clusters, cluster_info = cluster_info, now = time.asctime())
コード例 #4
0
def main():
    # Read the cluster config to get cluster ID.
    parser = ConfigParser.SafeConfigParser()
    parser.read(CLUSTER_CONFIG)

    cluster_ID = int(parser.get("cluster", "id"))
    provider = parser.get("cluster", "provider")

    zone = read_conf_file("%s.conf" % provider, provider, "zone")

    # Store master address information
    master = get_cluster_status(provider, cluster_ID, zone)["master"]
    if master == None:
        print >>sys.stderr, "Could not find master hostname"
        return 1

    # Set master hostname in cluster.conf
    parser.set("cluster", "master_internal_address", master[0])
    parser.set("cluster", "master_external_address", master[1])

    with open(CLUSTER_CONFIG, "w") as config_file:
        parser.write(config_file)

    return 0
コード例 #5
0
def main():
    # Read the cluster config to get cluster ID.
    parser = ConfigParser.SafeConfigParser()
    parser.read(CLUSTER_CONFIG)

    cluster_ID = int(parser.get("cluster", "id"))
    provider = parser.get("cluster", "provider")

    zone = read_conf_file("%s.conf" % provider, provider, "zone")

    # Store master address information
    master = get_cluster_status(provider, cluster_ID, zone)["master"]
    if master == None:
        print >> sys.stderr, "Could not find master hostname"
        return 1

    # Set master hostname in cluster.conf
    parser.set("cluster", "master_internal_address", master[0])
    parser.set("cluster", "master_external_address", master[1])

    with open(CLUSTER_CONFIG, "w") as config_file:
        parser.write(config_file)

    return 0
コード例 #6
0
def modify_clusters():
    global redis_client, aws_access_key_id, aws_secret_access_key,\
        cluster_info, provider_info
    print "Modifying Clusters"
    if bottle.request.POST.new_amazon_cluster:
        print "Launching new Amazon cluster"
        cluster_name = bottle.request.POST.cluster_name
        cluster_size = int(bottle.request.POST.cluster_size)
        instance_type = bottle.request.POST.instance_type
        AMI_ID = bottle.request.POST.AMI_ID
        master_instance_type = bottle.request.POST.master_instance_type
        subnet_ID = bottle.request.POST.subnet_ID
        security_group_ID = bottle.request.POST.security_group_ID
        S3_bucket = bottle.request.POST.S3_bucket
        private_key = bottle.request.POST.private_key
        public_key = bottle.request.POST.public_key
        username = bottle.request.POST.username
        themis_config_directory = bottle.request.POST.themis_config_directory
        placement_group = bottle.request.POST.placement_group
        if placement_group == "":
            placement_group = None
        EBS_optimized = bottle.request.POST.EBS_optimized
        if EBS_optimized == "Yes":
            EBS_optimized = True
        else:
            EBS_optimized = False

        try:
            launch_amazon_cluster(provider_info["amazon"], cluster_name,
                                  cluster_size, instance_type, AMI_ID,
                                  master_instance_type, subnet_ID,
                                  security_group_ID, S3_bucket, private_key,
                                  public_key, themis_config_directory,
                                  placement_group, EBS_optimized, username,
                                  redis_client)
        except ProcessExecutionError as e:
            return display_error(e)

    elif bottle.request.POST.new_google_cluster:
        print "Launching new Google cluster"
        cluster_name = bottle.request.POST.cluster_name
        cluster_size = int(bottle.request.POST.cluster_size)
        instance_type = bottle.request.POST.instance_type
        local_ssds = bottle.request.POST.local_ssds
        image = bottle.request.POST.image
        master_instance_type = bottle.request.POST.master_instance_type
        zone = bottle.request.POST.zone
        network = bottle.request.POST.network
        bucket = bottle.request.POST.bucket
        private_key = bottle.request.POST.private_key
        public_key = bottle.request.POST.public_key
        themis_config_directory = bottle.request.POST.themis_config_directory

        try:
            launch_google_cluster(cluster_name, cluster_size, instance_type,
                                  local_ssds, image, master_instance_type,
                                  network, zone, bucket, private_key,
                                  public_key, themis_config_directory,
                                  provider_info["google"], redis_client)
        except ProcessExecutionError as e:
            return display_error(e)

    elif bottle.request.POST.terminate:
        cluster_ID = int(bottle.request.POST.terminate)
        print "Terminating cluster %d" % cluster_ID

        try:
            terminate_cluster(cluster_ID, redis_client)
        except ProcessExecutionError as e:
            return display_error(e)

    elif bottle.request.POST.bring_online:
        cluster_ID = int(bottle.request.POST.bring_online)
        print "Bringing cluster %d online" % cluster_ID

        print "Fetching relevant information from redis..."
        # Fetch instance type from redis
        instance_type = redis_client.hget("cluster:%d" % cluster_ID,
                                          "instance_type")
        themis_directory = redis_client.hget("cluster:%d" % cluster_ID,
                                             "themis_directory")

        # Run configuration command on master via its external address.
        master = cluster_info[cluster_ID]["master"][1]
        print "Adding nodes to cluster..."
        ssh_client = paramiko.SSHClient()
        ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh_client.connect(master,
                           username=cluster_info[cluster_ID]["username"])

        # Fetch a recent list of online nodes.
        zone = cluster_info[cluster_ID]["zone"]
        provider = cluster_info[cluster_ID]["provider"]
        cluster_status = get_cluster_status(provider, cluster_ID, zone)
        node_list = cluster_status["online_slaves"]
        # Use internal addresses
        node_list = [node[0] for node in node_list]

        device_map = None
        if provider == "amazon":
            device_map = provider_info[provider]["device_map"]
        elif provider == "google":
            # We can look up the number of devices directly from redis.
            device_map = {}
            device_map[instance_type] = int(
                redis_client.hget("cluster:%d" % cluster_ID, "local_ssds"))

        # Configure the master with cluster information
        command = "%s add" % os.path.join(
            themis_directory,
            "src/scripts/themis/cluster/configure_cluster.py")
        for node in node_list:
            command = "%s %s" % (command, node)

        channel = ssh_client.get_transport().open_session()
        channel.get_pty()
        channel.exec_command(command)
        return_code = channel.recv_exit_status()
        if return_code != 0:
            while channel.recv_stderr_ready():
                stderr = channel.recv_stderr(1024)
                sys.stderr.write(stderr)
            sys.exit(return_code)

        # Format disks
        print "Formatting disks..."
        devices_are_partitions = ""
        # NVME devices on GCE show up as partitions rather than devices that
        # need to be partitioned with fdisk, so if using NVME,
        # devices_are_partitions should be set to the '--format_disks'
        # option. However, since the nvme debian image appears to be buggy,
        # we'll launch in SCSI mode, which does require the fdisk, so leave
        # this option string blank for both providers.
        command = "%s \"%s %s --format_disks\"" % (
            os.path.join(themis_directory,
                         "src/scripts/themis/cluster/parallel_ssh.py"),
            os.path.join(themis_directory,
                         "src/scripts/themis/cluster/mount_disks.py"),
            devices_are_partitions)

        channel = ssh_client.get_transport().open_session()
        channel.get_pty()
        channel.exec_command(command)
        return_code = channel.recv_exit_status()
        if return_code != 0:
            while channel.recv_stderr_ready():
                stderr = channel.recv_stderr(1024)
                sys.stderr.write(stderr)
            sys.exit(return_code)

        # Set master hostname for slave nodes
        print "Setting master information on slaves..."
        command = "%s \"%s\"" % (
            os.path.join(themis_directory,
                         "src/scripts/themis/cluster/parallel_ssh.py"),
            os.path.join(themis_directory,
                         "src/scripts/themis/cloud/set_master_hostname.py"))

        channel = ssh_client.get_transport().open_session()
        channel.get_pty()
        channel.exec_command(command)
        return_code = channel.recv_exit_status()
        if return_code != 0:
            while channel.recv_stderr_ready():
                stderr = channel.recv_stderr(1024)
                sys.stderr.write(stderr)
            sys.exit(return_code)

        # Build themis rc file so nodes can see master redis
        print "Building .themisrc files"
        command = "%s -m \"%s\"" % (
            os.path.join(themis_directory,
                         "src/scripts/themis/cluster/parallel_ssh.py"),
            os.path.join(themis_directory,
                         "src/scripts/themis/cluster/build_themis_rc.py"))

        channel = ssh_client.get_transport().open_session()
        channel.get_pty()
        channel.exec_command(command)
        return_code = channel.recv_exit_status()
        if return_code != 0:
            while channel.recv_stderr_ready():
                stderr = channel.recv_stderr(1024)
                sys.stderr.write(stderr)
            sys.exit(return_code)

        # By default, populate with eth0 and all devices - half I/O and half
        # intermediate. If there are extra devices give them to the I/O
        print "Configuring cluster with default interface and disks..."
        interface = "eth0"
        num_devices = device_map[instance_type]
        disk_list = ["/mnt/disks/disk_%d" % x for x in xrange(num_devices)]
        if num_devices == 1:
            # Special case for 1 device, use for both input and output.
            io_disks = disk_list
            intermediate_disks = disk_list
        else:
            io_devices = num_devices / 2
            if num_devices % 2 > 0:
                # Give extra device to the I/O disks.
                io_devices += 1
            io_disks = disk_list[0:io_devices]
            intermediate_disks = disk_list[io_devices:]

        # Configure the cluster
        command = "%s interfaces %s" % (os.path.join(
            themis_directory,
            "src/scripts/themis/cluster/configure_cluster.py"), interface)

        channel = ssh_client.get_transport().open_session()
        channel.get_pty()
        channel.exec_command(command)
        return_code = channel.recv_exit_status()
        if return_code != 0:
            while channel.recv_stderr_ready():
                stderr = channel.recv_stderr(1024)
                sys.stderr.write(stderr)
            sys.exit(return_code)

        if len(io_disks) > 0:
            command = "%s io_disks" % os.path.join(
                themis_directory,
                "src/scripts/themis/cluster/configure_cluster.py")
            for disk in io_disks:
                command = "%s %s" % (command, disk)

            channel = ssh_client.get_transport().open_session()
            channel.get_pty()
            channel.exec_command(command)
            return_code = channel.recv_exit_status()
            if return_code != 0:
                while channel.recv_stderr_ready():
                    stderr = channel.recv_stderr(1024)
                    sys.stderr.write(stderr)
                sys.exit(return_code)

        if len(intermediate_disks) > 0:
            command = "%s intermediate_disks" % os.path.join(
                themis_directory,
                "src/scripts/themis/cluster/configure_cluster.py")
            for disk in intermediate_disks:
                command = "%s %s" % (command, disk)

            channel = ssh_client.get_transport().open_session()
            channel.get_pty()
            channel.exec_command(command)
            return_code = channel.recv_exit_status()
            if return_code != 0:
                while channel.recv_stderr_ready():
                    stderr = channel.recv_stderr(1024)
                    sys.stderr.write(stderr)
                sys.exit(return_code)

        # Set cluster status to online.
        redis_client.hset("cluster:%d" % cluster_ID, "cluster_status",
                          "Online")

    elif bottle.request.POST.persist_to_storage:
        cluster_ID = int(bottle.request.POST.persist_to_storage)
        provider = cluster_info[cluster_ID]["provider"]
        print "Persisting logs from cluster %d to cloud storage" % cluster_ID

        print "Fetching relevant information from redis..."
        bucket = redis_client.hget("cluster:%d" % cluster_ID, "bucket")
        log_directory = redis_client.hget("cluster:%d" % cluster_ID,
                                          "log_directory")
        themis_directory = redis_client.hget("cluster:%d" % cluster_ID,
                                             "themis_directory")

        # Run configuration command on master via its external address.
        master = cluster_info[cluster_ID]["master"][1]
        print "Persisting logs to storage..."
        ssh_client = paramiko.SSHClient()
        ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh_client.connect(master,
                           username=cluster_info[cluster_ID]["username"])

        command = os.path.join(themis_directory,
                               "src/scripts/themis/cloud/upload_logs.py")

        channel = ssh_client.get_transport().open_session()
        channel.get_pty()
        channel.exec_command(command)
        return_code = channel.recv_exit_status()
        if return_code != 0:
            while channel.recv_stderr_ready():
                stderr = channel.recv_stderr(1024)
                sys.stderr.write(stderr)
            sys.exit(return_code)

    return list_clusters()
コード例 #7
0
def modify_clusters():
    global redis_client, aws_access_key_id, aws_secret_access_key,\
        cluster_info, provider_info
    print "Modifying Clusters"
    if bottle.request.POST.new_amazon_cluster:
        print "Launching new Amazon cluster"
        cluster_name = bottle.request.POST.cluster_name
        cluster_size = int(bottle.request.POST.cluster_size)
        instance_type = bottle.request.POST.instance_type
        AMI_ID = bottle.request.POST.AMI_ID
        master_instance_type = bottle.request.POST.master_instance_type
        subnet_ID = bottle.request.POST.subnet_ID
        security_group_ID = bottle.request.POST.security_group_ID
        S3_bucket = bottle.request.POST.S3_bucket
        private_key = bottle.request.POST.private_key
        public_key = bottle.request.POST.public_key
        username = bottle.request.POST.username
        themis_config_directory = bottle.request.POST.themis_config_directory
        placement_group = bottle.request.POST.placement_group
        if placement_group == "":
            placement_group = None
        EBS_optimized = bottle.request.POST.EBS_optimized
        if EBS_optimized == "Yes":
            EBS_optimized = True
        else:
            EBS_optimized = False

        try:
            launch_amazon_cluster(
                provider_info["amazon"], cluster_name, cluster_size,
                instance_type, AMI_ID, master_instance_type, subnet_ID,
                security_group_ID, S3_bucket, private_key, public_key,
                themis_config_directory, placement_group, EBS_optimized,
                username, redis_client)
        except ProcessExecutionError as e:
            return display_error(e)

    elif bottle.request.POST.new_google_cluster:
        print "Launching new Google cluster"
        cluster_name = bottle.request.POST.cluster_name
        cluster_size = int(bottle.request.POST.cluster_size)
        instance_type = bottle.request.POST.instance_type
        local_ssds = bottle.request.POST.local_ssds
        persistent_ssds = bottle.request.POST.persistent_ssds
        image = bottle.request.POST.image
        master_instance_type = bottle.request.POST.master_instance_type
        zone = bottle.request.POST.zone
        network = bottle.request.POST.network
        bucket = bottle.request.POST.bucket
        private_key = bottle.request.POST.private_key
        public_key = bottle.request.POST.public_key
        themis_config_directory = bottle.request.POST.themis_config_directory

        try:
            launch_google_cluster(
                cluster_name, cluster_size, instance_type, local_ssds, persistent_ssds,
                image, master_instance_type, network, zone, bucket, private_key,
                public_key, themis_config_directory, provider_info["google"],
                redis_client)
        except ProcessExecutionError as e:
            return display_error(e)

    elif bottle.request.POST.terminate:
        cluster_ID = int(bottle.request.POST.terminate)
        print "Terminating cluster %d" % cluster_ID

        try:
            terminate_cluster(cluster_ID, redis_client)
        except ProcessExecutionError as e:
            return display_error(e)

    elif bottle.request.POST.bring_online:
        cluster_ID = int(bottle.request.POST.bring_online)
        print "Bringing cluster %d online" % cluster_ID

        print "Fetching relevant information from redis..."
        # Fetch instance type from redis
        instance_type = redis_client.hget(
            "cluster:%d" % cluster_ID, "instance_type")
        themis_directory = redis_client.hget(
            "cluster:%d" % cluster_ID, "themis_directory")

        # Run configuration command on master via its external address.
        master = cluster_info[cluster_ID]["master"][1]
        print "Adding nodes to cluster..."
        ssh_client = paramiko.SSHClient()
        ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh_client.connect(
            master, username=cluster_info[cluster_ID]["username"])

        # Fetch a recent list of online nodes.
        zone = cluster_info[cluster_ID]["zone"]
        provider = cluster_info[cluster_ID]["provider"]
        cluster_status = get_cluster_status(provider, cluster_ID, zone)
        node_list = cluster_status["online_slaves"]
        # Use internal addresses
        node_list = [node[0] for node in node_list]

        device_map = None
        if provider == "amazon":
            device_map = provider_info[provider]["device_map"]
        elif provider == "google":
            # We can look up the number of devices directly from redis.
            device_map = {}
            device_map[instance_type] = int(redis_client.hget(
                "cluster:%d" % cluster_ID, "local_ssds"))
            device_map[instance_type + "_persist"] = int(redis_client.hget(
                "cluster:%d" % cluster_ID, "persistent_ssds"))

        # Configure the master with cluster information
        command = "%s add" % os.path.join(
            themis_directory, "src/scripts/themis/cluster/configure_cluster.py")
        for node in node_list:
            command = "%s %s" % (command, node)

        channel = ssh_client.get_transport().open_session()
        channel.request_forward_agent(paramiko.agent.AgentClientProxy)
        channel.get_pty()
        channel.exec_command(command)
        return_code = channel.recv_exit_status()
        if return_code != 0:
            while channel.recv_ready():
                stdout = channel.recv(1024)
                sys.stdout.write(stdout)
            while channel.recv_stderr_ready():
                stderr = channel.recv_stderr(1024)
                sys.stderr.write(stderr)
            sys.exit(return_code)

        # Format disks
        print "Formatting disks, setting master info, and building themis rc..."
        devices_are_partitions = ""
        # NVME devices on GCE show up as partitions rather than devices that
        # need to be partitioned with fdisk, so if using NVME,
        # devices_are_partitions should be set to the '--format_disks'
        # option. However, since the nvme debian image appears to be buggy,
        # we'll launch in SCSI mode, which does require the fdisk, so leave
        # this option string blank for both providers.
        command_1 = "%s %s --format_disks" % (os.path.join(
                themis_directory,
                "src/scripts/themis/cluster/mount_disks.py"),
                devices_are_partitions)
        command_2 = "%s" % os.path.join(
                themis_directory,
                "src/scripts/themis/cloud/set_master_hostname.py")
        command_3 = "%s" % os.path.join(
                themis_directory,
                "src/scripts/themis/cluster/build_themis_rc.py")
        full_command = "%s \"%s; %s; %s\"; %s" % (
            os.path.join(
                themis_directory,
                "src/scripts/themis/cluster/parallel_ssh.py"),
            command_1,
            command_2,
            command_3,
            command_3)

        # command = "%s \"%s %s --format_disks\"" % (
        #     os.path.join(
        #         themis_directory,
        #         "src/scripts/themis/cluster/parallel_ssh.py"),
        #     os.path.join(
        #         themis_directory,
        #         "src/scripts/themis/cluster/mount_disks.py"),
        #     devices_are_partitions)
        channel = ssh_client.get_transport().open_session()
        channel.request_forward_agent(paramiko.agent.AgentClientProxy)
        channel.get_pty()
        channel.exec_command(full_command)
        return_code = channel.recv_exit_status()
        if return_code != 0:
            while channel.recv_ready():
                stdout = channel.recv(1024)
                sys.stdout.write(stdout)
            while channel.recv_stderr_ready():
                stderr = channel.recv_stderr(1024)
                sys.stderr.write(stderr)
            sys.exit(return_code)

        # Set master hostname for slave nodes
        # print "Setting master information on slaves..."
        # command = "%s \"%s\"" % (
        #               os.path.join(
        #                   themis_directory,
        #                   "src/scripts/themis/cluster/parallel_ssh.py"),
        #               os.path.join(
        #                   themis_directory,
        #                   "src/scripts/themis/cloud/set_master_hostname.py"))

        # channel = ssh_client.get_transport().open_session()
        # channel.request_forward_agent(paramiko.agent.AgentClientProxy)
        # channel.get_pty()
        # channel.exec_command(command)
        # return_code = channel.recv_exit_status()
        # if return_code != 0:
        #     while channel.recv_stderr_ready():
        #         stderr = channel.recv_stderr(1024)
        #         sys.stderr.write(stderr)
        #     sys.exit(return_code)

        # # Build themis rc file so nodes can see master redis
        # print "Building .themisrc files"
        # command = "%s -m \"%s\"" % (
        #               os.path.join(
        #                   themis_directory,
        #                   "src/scripts/themis/cluster/parallel_ssh.py"),
        #               os.path.join(
        #                   themis_directory,
        #                   "src/scripts/themis/cluster/build_themis_rc.py"))

        # channel = ssh_client.get_transport().open_session()
        # channel.request_forward_agent(paramiko.agent.AgentClientProxy)
        # channel.get_pty()
        # channel.exec_command(command)
        # return_code = channel.recv_exit_status()
        # if return_code != 0:
        #     while channel.recv_stderr_ready():
        #         stderr = channel.recv_stderr(1024)
        #         sys.stderr.write(stderr)
        #     sys.exit(return_code)

        # By default, populate with eth0 and all devices - half I/O and half
        # intermediate. If there are extra devices give them to the I/O
        print "Configuring cluster with default interface and disks..."
        interface = "eth0"
        num_devices = device_map[instance_type]
        disk_list = ["/mnt/disks/disk_%d" % x for x in xrange(num_devices)]
        if num_devices == 1:
            # Special case for 1 device, use for both input and output.
            io_disks = disk_list
            intermediate_disks = disk_list
        else:
            io_devices = num_devices / 2
            if num_devices % 2 > 0:
                # Give extra device to the I/O disks.
                io_devices += 1
            io_disks = disk_list[0:io_devices]
            intermediate_disks = disk_list[io_devices:]

        # If there's persistent disks, use those as the sole I/O disks.
        num_persist = device_map[instance_type + "_persist"]
        if num_persist:
            persist_disk_list = ["/mnt/disks/disk_persist_%d" % x for x in xrange(num_persist)]
            io_disks = persist_disk_list
            intermediate_disks = disk_list

        # Configure the cluster
        command = "%s interfaces %s" % (
            os.path.join(
                themis_directory,
                "src/scripts/themis/cluster/configure_cluster.py"),
            interface)

        channel = ssh_client.get_transport().open_session()
        channel.request_forward_agent(paramiko.agent.AgentClientProxy)
        channel.get_pty()
        channel.exec_command(command)
        return_code = channel.recv_exit_status()
        if return_code != 0:
            while channel.recv_ready():
                stdout = channel.recv(1024)
                sys.stdout.write(stdout)
            while channel.recv_stderr_ready():
                stderr = channel.recv_stderr(1024)
                sys.stderr.write(stderr)
            sys.exit(return_code)

        if len(io_disks) > 0:
            command = "%s io_disks" % os.path.join(
                themis_directory, "src/scripts/themis/cluster/configure_cluster.py")
            for disk in io_disks:
                command = "%s %s" % (command, disk)

            channel = ssh_client.get_transport().open_session()
            channel.request_forward_agent(paramiko.agent.AgentClientProxy)
            channel.get_pty()
            channel.exec_command(command)
            return_code = channel.recv_exit_status()
            if return_code != 0:
                while channel.recv_ready():
                    stdout = channel.recv(1024)
                    sys.stdout.write(stdout)
                while channel.recv_stderr_ready():
                    stderr = channel.recv_stderr(1024)
                    sys.stderr.write(stderr)
                sys.exit(return_code)

        if len(intermediate_disks) > 0:
            command = "%s intermediate_disks" % os.path.join(
                themis_directory, "src/scripts/themis/cluster/configure_cluster.py")
            for disk in intermediate_disks:
                command = "%s %s" % (command, disk)

            channel = ssh_client.get_transport().open_session()
            channel.request_forward_agent(paramiko.agent.AgentClientProxy)
            channel.get_pty()
            channel.exec_command(command)
            return_code = channel.recv_exit_status()
            if return_code != 0:
                while channel.recv_ready():
                    stdout = channel.recv(1024)
                    sys.stdout.write(stdout)
                while channel.recv_stderr_ready():
                    stderr = channel.recv_stderr(1024)
                    sys.stderr.write(stderr)
                sys.exit(return_code)

        # Set cluster status to online.
        redis_client.hset("cluster:%d" % cluster_ID, "cluster_status", "Online")

    elif bottle.request.POST.persist_to_storage:
        cluster_ID = int(bottle.request.POST.persist_to_storage)
        provider = cluster_info[cluster_ID]["provider"]
        print "Persisting logs from cluster %d to cloud storage" % cluster_ID

        print "Fetching relevant information from redis..."
        bucket = redis_client.hget(
            "cluster:%d" % cluster_ID, "bucket")
        log_directory = redis_client.hget(
            "cluster:%d" % cluster_ID, "log_directory")
        themis_directory = redis_client.hget(
            "cluster:%d" % cluster_ID, "themis_directory")

        # Run configuration command on master via its external address.
        master = cluster_info[cluster_ID]["master"][1]
        print "Persisting logs to storage..."
        ssh_client = paramiko.SSHClient()
        ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh_client.connect(
            master, username=cluster_info[cluster_ID]["username"])

        command = os.path.join(
            themis_directory, "src/scripts/themis/cloud/upload_logs.py")

        channel = ssh_client.get_transport().open_session()
        channel.request_forward_agent(paramiko.agent.AgentClientProxy)
        channel.get_pty()
        channel.exec_command(command)
        return_code = channel.recv_exit_status()
        if return_code != 0:
            while channel.recv_stderr_ready():
                stderr = channel.recv_stderr(1024)
                sys.stderr.write(stderr)
            sys.exit(return_code)

    return list_clusters()