コード例 #1
0
ファイル: client.py プロジェクト: cstavr/SynnefoSSH
class SynnefoClient(object):
    """Synnefo Client

    Wrapper class around clients of kamaki's clients for various Synnefo
    services:

    * astakos: Astakos client
    * compute: Cyclades Compute client
    * network: Cyclades Network client
    * image:   Cyclades Plankton client

    """
    def __init__(self, cloud=None, auth_url=None, token=None):
        if cloud is not None:
            auth_url, token = utils.get_cloud_credentials(cloud)
        self.auth_url, self.token = auth_url, token
        self.astakos = AstakosClient(self.auth_url, self.token)
        self.endpoints = self.get_api_endpoints()
        self.volume = BlockStorageClient(self.endpoints["cyclades_volume"],
                                         token)
        self.compute = CycladesClient(self.endpoints["cyclades_compute"],
                                      token)
        self.cyclades_networks = CycladesNetworkClient(self.endpoints["cyclades_network"],
                                      token)
        self.network = NetworkClient(self.endpoints["cyclades_network"], token)
        self.image = ImageClient(self.endpoints["cyclades_plankton"], token)

    def get_api_endpoints(self):
        """Get service endpoints from Astakos"""
        _endpoints = self.astakos.get_endpoints()["access"]
        endpoints = {}
        for service in _endpoints["serviceCatalog"]:
            endpoints[service["name"]] = service["endpoints"][0]["publicURL"]
        return endpoints
コード例 #2
0
def authenticate_clients():
    """
    function to instantiate Clients (astakos, cyclades, compute)
    """
    try:
        astakos_client = AstakosClient(AUTHENTICATION_URL,
                                       TOKEN)
        astakos_client.authenticate()
        logging.info('Successful authentication')
    except ClientError:
        logging.info('\n Failed to authenticate user token')
        print 'Failed to authenticate user token'
    try:
        endpoints = astakos_client.get_endpoints()
        cyclades_base_url = parse_astakos_endpoints(endpoints,
                                                  'cyclades_compute')
        cyclades_network_base_url = parse_astakos_endpoints(endpoints,
                                                          'cyclades_network')

    except ClientError:
        print('Failed to get endpoints for cyclades')
    try:
        cyclades_client = CycladesClient(cyclades_base_url, TOKEN)
        compute_client = ComputeClient(cyclades_base_url, TOKEN)
        network_client = CycladesNetworkClient(cyclades_network_base_url,
                                               TOKEN)
        return cyclades_client, compute_client, network_client, astakos_client
    except ClientError:
        print 'Failed to initialize Cyclades client'
コード例 #3
0
ファイル: hadoop_cluster.py プロジェクト: themiszamani/hadoop
def main():
    """Parse arguments, use kamaki to create cluster, setup using ssh"""

    (opts, args) = parse_arguments(sys.argv[1:])

    global CYCLADES, TOKEN

    AUTHENTICATION_URL = opts.cyclades
    TOKEN = opts.token

    # Cleanup stale servers from previous runs
    if opts.show_stale:
        cleanup_servers(prefix=opts.prefix, delete_stale=opts.delete_stale)
        return 0

    # Initialize a kamaki instance, get endpoints
    user = AstakosClient(AUTHENTICATION_URL, TOKEN)
    my_accountData = user.authenticate()
    endpoints = user.get_endpoints() 
    cyclades_endpoints = user.get_endpoints('compute')
    cyclades_base_url = parseAstakosEndpoints(endpoints,'cyclades_compute')
    cyclades_network_base_url = parseAstakosEndpoints(endpoints,'cyclades_network')
    my_cyclades_client = CycladesClient(cyclades_base_url, TOKEN)
    my_compute_client = ComputeClient(cyclades_base_url, TOKEN)
    my_network_client = CycladesNetworkClient(cyclades_network_base_url, TOKEN) 

    cnt = int(opts.clustersize)	# calculate size of cluster into 'cnt'
    # Initialize
    nodes = []
    masterName = ''
    # Create a file to store the root password for later use
    pass_fname = opts.hadoop_dir+'/bak/adminPass'+str(datetime.now())[:19].replace(' ', '')
    adminPass_f = open(pass_fname, 'w')

    myNetworks = my_network_client.list_networks();  
    NetWork_free = parseNetwork(myNetworks,'public');
    myIp = my_network_client.create_floatingip(NetWork_free);  
    LastIp = myIp.get("floating_ip_address")

    initialClusterSize = 0
    server = {}
    if opts.extend == False:
        # Create master node (0th node)
        server = create_machine(opts, my_cyclades_client, 0)
        if server == {}:
            return
    else:
        servers = my_cyclades_client.list_servers(detail=True)
        cluster = [s for s in servers if s["name"].startswith(opts.prefix)]
        initialClusterSize = len(cluster)
        if initialClusterSize==0:
            log.info("Cluster cannot be expanded: it does not exist.")
            return

    servername = "%s-0" % (opts.prefix)
    masterName = servername
    nodes.append(server)

    # Create slave (worker) nodes
    if cnt>1 or opts.extend:
        startingOffset = 1
        if opts.extend: startingOffset = initialClusterSize
        for i in xrange(startingOffset, initialClusterSize+cnt):
            server = {}
            server = create_machine(opts, my_cyclades_client, i)
            if server == {}:
                return;
            nodes.append(server)
            servername = "%s-%d" % (opts.prefix, i)
            # Write the root password to a file
            adminPass_f.write('machine = %s, password = %s\n' % (servername, server['adminPass']))

    adminPass_f.close()

    # Setup Hadoop files and settings on all cluster nodes
    # Create the 'cluster' dictionary out of servers, with only Hadoop-relevant keys (name, ip, integer key)
    servers = my_cyclades_client.list_servers(detail=True)
    cluster = [s for s in my_cyclades_client.list_servers(detail=True) if s["name"].startswith(opts.prefix)]
    cluster = [(s["name"], s["attachments"][1]["ipv4"], int(s["name"][s["name"].find('-')+1:])) for s in cluster]
    cluster = sorted(cluster, key=lambda cluster: cluster[2])

    # Prepare Ansible-Hadoop config files (hosts, conf/slaves)
    hosts = open(opts.hadoop_dir+'/hosts', 'w')
    hosts.write('[master]\n')
    for i in xrange(0, initialClusterSize+cnt):
        for s in cluster:
            if s[0] == opts.prefix+"-"+str(i):
                if s[0] == masterName:
                    hosts.write(s[1]+'\n\n'+'[slaves]\n')
                else:
                    hosts.write(s[1]+'\n')
    hosts.close()

    slaves = open(opts.hadoop_dir+'/conf/slaves', 'w')
    for s in cluster[1:]:
        slaves.write(s[1]+'\n')
    slaves.close()

    # Execute respective ansible playbook
    if (opts.extend==False):
        cmd = "ansible-playbook hadoop.yml -i hosts -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l master"
        retval = os.system(cmd)
        cmd = "ansible-playbook hadoop.yml -i hosts -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l slaves"
        retval = os.system(cmd)
        slave_ip_list = []
        for i in xrange(1, cnt):
            slave_ip_list.append(cluster[i][1]) 
        enable_ssh_login(cluster[0][1], [cluster[0][1]])
        enable_ssh_login(cluster[0][1], slave_ip_list)
    else:
        hosts_latest = open(opts.hadoop_dir+'/hosts.latest', 'w')
        hosts_latest.write('[master]\n')
        hosts_latest.write(cluster[0][1]+'\n\n'+'[slaves]\n')
        for i in xrange(initialClusterSize, initialClusterSize+cnt):
            hosts_latest.write(cluster[i][1]+'\n')
        hosts_latest.close()
        cmd = "ansible-playbook hadoop.yml -i hosts.latest -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l slaves"
        retval = os.system(cmd) 
        slave_ip_list = []
        for i in xrange(initialClusterSize, initialClusterSize+cnt):
            slave_ip_list.append(cluster[i][1]) 
        enable_ssh_login(cluster[0][1], slave_ip_list)

    # Update conf/slaves in master
    cmd = "ansible-playbook hadoop.yml -i hosts -vv --extra-vars \""+"master_ip="+cluster[0][1]+"\""+" -l master -t slaves"
    retval = os.system(cmd)

    log.info("Done.")
コード例 #4
0
def main():
    """Parse arguments, use kamaki to create cluster, setup using ansible playbooks"""

    (opts, args) = parse_arguments(sys.argv[1:])

    global CYCLADES, TOKEN, my_vnat_network, my_network_client

    AUTHENTICATION_URL = opts.cyclades
    TOKEN = opts.token

    # Cleanup stale servers from previous runs
    if opts.show_stale:
        cleanup_servers(prefix=opts.prefix, delete_stale=opts.delete_stale)
        return 0

    # Initialize a kamaki instance, get endpoints
    user = AstakosClient(AUTHENTICATION_URL, TOKEN)
    my_accountData = user.authenticate()
    endpoints = user.get_endpoints() 
    cyclades_endpoints = user.get_endpoints('compute')
    cyclades_base_url = parseAstakosEndpoints(endpoints, 'cyclades_compute')
    cyclades_network_base_url = parseAstakosEndpoints(endpoints, 'cyclades_network')
    my_cyclades_client = CycladesClient(cyclades_base_url, TOKEN)
    my_compute_client = ComputeClient(cyclades_base_url, TOKEN)
    my_network_client = CycladesNetworkClient(cyclades_network_base_url, TOKEN) 

    my_vnat_network = {}

    # check if 'Hadoop' vnat is created...
    hadoop_vnat_created = False
    my_network_dict = my_network_client.list_networks()
    for n in my_network_dict:
        if n['name'] == 'Hadoop': 
            hadoop_vnat_created = True
            my_vnat_network = n

    # ...else create it
    if hadoop_vnat_created == False:
        log.info("Creating vNAT")
        my_vnat_network = my_network_client.create_network(type='MAC_FILTERED', name='Hadoop');
        my_subnet = my_network_client.create_subnet(network_id=my_vnat_network['id'], cidr='192.168.0.0/24');

    cnt = int(opts.clustersize)	# calculate size of cluster into 'cnt'
    # Initialize
    nodes = []
    masterName = ''

    # Create a file to store the root password for later use
    if not os.path.exists(opts.hadoop_dir+'/bak'):
        os.makedirs(opts.hadoop_dir+'/bak')
    pass_fname = opts.hadoop_dir+'/bak/adminPass'+str(datetime.now())[:19].replace(' ', '')
    adminPass_f = open(pass_fname, 'w')

    initialClusterSize = 0
    server = {}
    if opts.extend == False:
        # Create master node (0th node)
        server = create_machine(opts, my_cyclades_client, 0)
        if server == {}:
            return
    else:
        servers = my_cyclades_client.list_servers(detail=True)
        cluster = [s for s in servers if s["name"].startswith(opts.prefix)]
        initialClusterSize = len(cluster)
        if initialClusterSize==0:
            log.info("Cluster cannot be expanded: it does not exist.")
            return

    servername = "%s-0" % (opts.prefix)
    masterName = servername
    nodes.append(server)

    # Create slave (worker) nodes
    if cnt>1 or opts.extend:
        startingOffset = 1
        if opts.extend: startingOffset = initialClusterSize
        for i in xrange(startingOffset, initialClusterSize+cnt):
            server = {}
            server = create_machine(opts, my_cyclades_client, i)
            if server == {}:
                return;
            nodes.append(server)
            servername = "%s-%d" % (opts.prefix, i)
            # Write the root password to a file
            adminPass_f.write('machine = %s, password = %s\n' % (servername, server['adminPass']))

    adminPass_f.close()

    # Setup Hadoop files and settings on all cluster nodes
    # Create the 'cluster' dictionary out of servers, with only Hadoop-relevant keys (name, ip, integer key)
    servers = my_cyclades_client.list_servers(detail=True)
    cluster = [s for s in my_cyclades_client.list_servers(detail=True) if s["name"].startswith(opts.prefix)]
    cluster0 = [(s["name"], s["attachments"], int(s["name"][s["name"].find('-')+1:])) for s in cluster]
    cluster0 = sorted(cluster0, key=lambda cluster0: cluster0[2])
    cluster = [(cluster0[0][0], cluster0[0][1][2]["ipv4"], cluster0[0][2])]	# master IP, different index 
    cluster2 = [(s[0], s[1][1]['ipv4'], int(s[2])) for s in cluster0[1:]]	# slave IPs
    cluster += cluster2

    # Prepare Ansible-Hadoop config files (hosts, conf/slaves. vnat/etchosts)
    hosts = open(opts.hadoop_dir+'/hosts', 'w')
    hosts.write('[master]\n')
    etchosts = open(opts.hadoop_dir+'/vnat/etchosts', 'w')
    for i in xrange(0, initialClusterSize+cnt):
        for s in cluster:
            if s[0] == opts.prefix+"-"+str(i):
                if s[0] == masterName:
                    hosts.write(s[1]+'\n\n'+'[slaves]\n')
                else:
                    hosts.write(s[1]+'\n')
                etchosts.write(s[1]+'\t'+s[0]+'\n')
    hosts.close()
    etchosts.close()

    slaves = open(opts.hadoop_dir+'/vnat/slaves', 'w')
    for s in cluster[1:]:
        slaves.write(s[0]+'\n')
    slaves.close()

    # Execute respective ansible playbook
    if (opts.extend==False):
        cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_master=True, master_node="+cluster[0][0]+" master_ip="+cluster[0][1]+"\""+" -l master"
        print cmd
        retval = os.system(cmd)
        cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_slave=True, master_node="+cluster[0][0]+" master_ip="+cluster[0][1]+"\""+" -l slaves"
        print cmd
        retval = os.system(cmd)
        slave_ip_list = []
        for i in xrange(1, cnt):
            slave_ip_list.append(cluster[i][0])
        enable_ssh_login(cluster[0][1], [cluster[0][0]])
        enable_ssh_login(cluster[0][1], slave_ip_list)
    else:
        hosts_latest = open(opts.hadoop_dir+'/hosts.latest', 'w')
        hosts_latest.write('[master]\n')
        hosts_latest.write(cluster[0][1]+'\n\n'+'[slaves]\n')
        for i in xrange(initialClusterSize, initialClusterSize+cnt):
            hosts_latest.write(cluster[i][1]+'\n')
        hosts_latest.close()
        # update etc/hosts in all nodes - TODO: de-duplicate entries
        cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_master=True, master_ip="+cluster[0][1]+"\""+" -t etchosts"
        print cmd
        retval = os.system(cmd) 
        cmd = "ansible-playbook hadoop_vnat.yml -i hosts.latest -vv --extra-vars \""+"is_slave=True, master_node="+cluster[0][0]+" master_ip="+cluster[0][1]+"\""+" -l slaves"
        print cmd
        retval = os.system(cmd) 
        slave_ip_list = []
        for i in xrange(initialClusterSize, initialClusterSize+cnt):
            slave_ip_list.append(cluster[i][0])
        print "slave_ip_list=", slave_ip_list 
        enable_ssh_login(cluster[0][1], slave_ip_list)

    # Update conf/slaves in master
    cmd = "ansible-playbook hadoop_vnat.yml -i hosts -vv --extra-vars \""+"is_master=True, master_ip="+cluster[0][1]+"\""+" -l master -t slaves"
    print cmd
    retval = os.system(cmd)

    log.info("Done.")