def remote_from_cluster_stdout(self, stdout, mname='ambari'): """ take the output from the up_aws_cluster script and parse it first check it was successful, then find one machine (mname) connect command so that I may return a remoteHost object and the iid remoteHost, iid """ import kavedeploy as lD connectcmd = "" for line in range(len(stdout.split('\n'))): if (mname + " connect remotely with") in stdout.split("\n")[line]: connectcmd = stdout.split("\n")[line + 1].strip() adict = stdout.split("\n")[-2].replace("Complete, created:", "") # try interpreting as json adict = d2j(adict) iid, ip = adict[mname] self.assertTrue(ip in connectcmd, ip + " Wrong IP seen for connecting to " + mname + ' ' + connectcmd) jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"])) import json acconf = json.loads(jsondat.read()) jsondat.close() keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"] self.assertTrue(keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd, "wrong keyfile seen in (" + connectcmd + ")") return lD.remoteHost("root", ip, keyfile), iid
def remote_from_cluster_stdout(self, stdout, mname='ambari'): """ take the output from the up_aws_cluster script and parse it first check it was successful, then find one machine (mname) connect command so that I may return a remoteHost object and the iid remoteHost, iid """ import kavedeploy as lD connectcmd = "" for line in range(len(stdout.split('\n'))): if (mname + " connect remotely with") in stdout.split("\n")[line]: connectcmd = stdout.split("\n")[line + 1].strip() adict = stdout.split("\n")[-2].replace("Complete, created:", "") # try interpreting as json adict = d2j(adict) iid, ip = adict[mname] self.assertTrue( ip in connectcmd, ip + " Wrong IP seen for connecting to " + mname + ' ' + connectcmd) jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"])) import json acconf = json.loads(jsondat.read()) jsondat.close() keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"] self.assertTrue( keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd, "wrong keyfile seen in (" + connectcmd + ")") return lD.remoteHost("root", ip, keyfile), iid
def deploy_os(self, osval, instancetype="m4.large"): """ Up one centos machine with the scripts and return an lD.remoteHost to that machine instancetype -> None: m4.large """ import kavedeploy as lD import kaveaws as lA instancetype = lA.chooseinstancetype(instancetype) deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../') stdout = lD.run_quiet(deploy_dir + "/aws/deploy_known_instance.py " + osval + " Test-" + osval + "-" + self.service + " " + instancetype + " --not-strict") self.assertTrue(stdout.split("\n")[-1].startswith("OK, iid ")) iid = stdout.split("\n")[-1].strip()[len("OK, iid "):].split(" ")[0] ip = stdout.split("\n")[-1].strip().split(" ")[-1] self.assertTrue(stdout.split("\n")[-3].startswith("connect remotely with:")) connectcmd = stdout.split("\n")[-2] self.assertTrue(ip in connectcmd, "wrong IP seen in (" + connectcmd + ")") jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"])) import json acconf = json.loads(jsondat.read()) jsondat.close() keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"] self.assertTrue(keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd, "wrong keyfile seen in (" + connectcmd + ")") ambari = lD.remoteHost("root", ip, keyfile) ambari.register() import time time.sleep(5) if osval.startswith("Centos"): # add 10GB to /opt stdout = lD.run_quiet( deploy_dir + "/aws/add_ebsvol_to_instance.py " + iid + " --not-strict") return ambari, iid
def deploy_dev(self, instancetype="m4.large"): """ Up one centos machine with the scripts and return an lD.remoteHost to that machine instancetype -> None: m4.large """ import kavedeploy as lD import kaveaws as lA instancetype = lA.chooseinstancetype(instancetype) deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../') stdout = lD.run_quiet(deploy_dir + "/aws/deploy_one_centos_instance.py Test-" + self.service + " " + instancetype + " --ambari-dev --not-strict") self.assertTrue(stdout.split("\n")[-1].startswith("OK, iid ")) iid = stdout.split("\n")[-1].strip()[len("OK, iid "):].split(" ")[0] ip = stdout.split("\n")[-1].strip().split(" ")[-1] self.assertTrue( stdout.split("\n")[-3].startswith("connect remotely with:")) connectcmd = stdout.split("\n")[-2] self.assertTrue(ip in connectcmd, "wrong IP seen in (" + connectcmd + ")") jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"])) import json acconf = json.loads(jsondat.read()) jsondat.close() keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"] self.assertTrue( keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd, "wrong keyfile seen in (" + connectcmd + ")") # add 10GB as /opt by default! ambari = lD.remoteHost("root", ip, keyfile) ambari.register() # # configure keyless access to itself! Needed for blueprints, but already done now by the new_dev_image script, # but the internal ip will be different here! # lD.add_as_host(edit_remote=ambari,add_remote=ambari,dest_internal_ip=lA.priv_ip(iid)) #done in the deploy # script... # lD.configure_keyless(ambari, ambari, dest_internal_ip=lA.priv_ip(iid), preservehostname=True) abranch = "" if self.branch: abranch = self.branch ambari.cp( os.path.realpath(os.path.dirname(lD.__file__)) + "/../remotescripts/default.netrc", "~/.netrc") return ambari, iid
def runTest(self): """ Tests which cehck the function of the deployment library, but do not need any environment parameters or access to aws """ import kavedeploy as lD lD.testproxy() self.assertIsNot(lD.which("ls"), None) self.assertRaises(RuntimeError, lD.run_quiet, ("thisisnotacommand")) stdout = lD.run_quiet(['which', 'ls'], shell=False) self.assertTrue('/bin/ls' in stdout) self.assertIsNot(lD.which("pdsh"), None, "pdsh is not installed, please install it in order to test the multiremotes functionality, " "sudo yum -y install pdsh") lD.run_quiet("touch /tmp/fake_test_ssh_key.pem") lD.run_quiet("chmod 400 /tmp/fake_test_ssh_key.pem") test = lD.remoteHost("root", "test", '/tmp/fake_test_ssh_key.pem') test = lD.multiremotes([test.host], access_key='/tmp/fake_test_ssh_key.pem')
def deploy_dev(self, instancetype="m4.large"): """ Up one centos machine with the scripts and return an lD.remoteHost to that machine instancetype -> None: m4.large """ import kavedeploy as lD import kaveaws as lA instancetype = lA.chooseinstancetype(instancetype) deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../') stdout = lD.run_quiet(deploy_dir + "/aws/deploy_one_centos_instance.py Test-" + self.service + " " + instancetype + " --ambari-dev --not-strict") self.assertTrue(stdout.split("\n")[-1].startswith("OK, iid ")) iid = stdout.split("\n")[-1].strip()[len("OK, iid "):].split(" ")[0] ip = stdout.split("\n")[-1].strip().split(" ")[-1] self.assertTrue(stdout.split("\n")[-3].startswith("connect remotely with:")) connectcmd = stdout.split("\n")[-2] self.assertTrue(ip in connectcmd, "wrong IP seen in (" + connectcmd + ")") jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"])) import json acconf = json.loads(jsondat.read()) jsondat.close() keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"] self.assertTrue(keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd, "wrong keyfile seen in (" + connectcmd + ")") # add 10GB as /opt by default! ambari = lD.remoteHost("root", ip, keyfile) ambari.register() # # configure keyless access to itself! Needed for blueprints, but already done now by the new_dev_image script, # but the internal ip will be different here! # lD.add_as_host(edit_remote=ambari,add_remote=ambari,dest_internal_ip=lA.priv_ip(iid)) #done in the deploy # script... # lD.configure_keyless(ambari, ambari, dest_internal_ip=lA.priv_ip(iid), preservehostname=True) abranch = "" if self.branch: abranch = self.branch ambari.cp(os.path.realpath(os.path.dirname(lD.__file__)) + "/../remotescripts/default.netrc", "~/.netrc") return ambari, iid
if __name__ == "__main__": ip, iid, security_conf, dest_type = parse_opts() # only needed in main function installfrom = os.path.realpath(os.sep.join(__file__.split(os.sep)[:-1])) liblocation = os.path.realpath(installfrom) jsondat = open(os.path.expanduser(security_conf)) security_config = json.loads(jsondat.read()) jsondat.close() sys.path.append(liblocation) import kavedeploy as lD import kaveaws as lA lA.checksecjson(security_config, requirefield=[], requirekeys=["SSH"]) if ip is None: lA.testaws() ip = lA.pub_ip(iid) git = False gitenv = None if lD.detect_proxy(): print "Did you already configure this machine to access port " + str( lD.proxy_port) + "? If not you'll need to turn your proxy off." lD.testproxy() remote = lD.remoteHost('root', ip, security_config["AccessKeys"]["SSH"]["KeyFile"]) if "GIT" in security_config["AccessKeys"]: git = True gitenv = security_config["AccessKeys"]["GIT"] lD.deploy_our_soft(remote, pack="kavetoolbox", git=git, gitenv=gitenv, options='--' + dest_type) # if dest_type == "workstation": # lD.confremotessh(remote)
print "submitted" iid = lA.iid_from_up_json(upped)[0] import time time.sleep(5) lA.name_resource(iid, username + "_dev_box") ip = lA.pub_ip(iid) acount = 0 while (ip is None and acount < 20): print "waiting for IP" lD.mysleep(1) ip = lA.pub_ip(iid) acount = acount + 1 remoteuser = lA.default_usernamedict[lA.default_os] remote = lD.remoteHost(remoteuser, ip, keyloc) print "waiting until contactable" lD.wait_until_up(remote, 20) remote = lD.remote_cp_authkeys(remote, 'root') if "Tags" in security_config: resources = lA.find_all_child_resources(iid) lA.tag_resources(resources, security_config["Tags"]) remote.register() ################################################## # Renaming, configuring firewall and adding more disk space ################################################## print "Renaming, configuring firewall and adding more disk space" lD.rename_remote_host(remote, username + "_dev_box", 'kave.io') remote.run("mkdir -p /etc/kave/") remote.run(
def add_new_ebs_vol(iid, conf, access_key): """ Create and name a new ebs volume, give it to a pre-existing instance and mount it on that instance conf is a dictionary which must contain: "Mount": "where-to_mount_it", "Size" : SizeIngGB, "Attach" : "aws_expected_device_name", "Fdisk" : "device_name_seen_by_fdisk" e.g.: "Mount": "/opt2", "Size" : 1, "Attach" : "/dev/sdb", "Fdisk" : "/dev/xvdb" Fdisk is optional, if not given it will be guessed from "Attach" and the region. i.e.: region Attach FDisk eu-* sd<X> xvd<X> (e.g. sdb->xvdb) ap-* sd<Y> xvd<Y+4> (e.g. sbd->xvdf) """ try: i = desc_instance(iid) except lD.ShellExecuteError: raise ValueError(iid + " is not one of your instance IDs") # get a reference to this instance ip = pub_ip(iid) remote = lD.remoteHost("root", ip, access_key) # choose the fdisk device in the case this is a broken Tokyo centos6 instance if "Fdisk" not in conf: import string alpha = string.ascii_lowercase skip = 0 if detect_region().startswith('eu'): # eu-* sd<X> xvd<X> (e.g. sdb->xvdb) skip = 0 elif detect_region().startswith( 'ap') and remote.detect_linux_version() in ["Centos6"]: # ap-* sd<Y> xvd<Y+4> (e.g. sbd->xvdf) skip = 4 conf["Fdisk"] = '/dev/xvd' + alpha[alpha.index(conf["Attach"][-1]) + skip] av_zone = i["Reservations"][0]["Instances"][0]["Placement"][ "AvailabilityZone"] voljson = runawstojson("ec2 create-volume --size " + str(conf["Size"]) + " --availability-zone " + av_zone) instnam = "" for tag in i["Reservations"][0]["Instances"][0]["Tags"]: if tag["Key"] == "Name": instnam = tag["Value"] # print voljson vol_id = voljson["VolumeId"] name_resource(vol_id, instnam + conf["Mount"].replace("/", "_")) time.sleep(5) count = 0 while count < 10: descvol = runawstojson("ec2 describe-volumes --volume " + vol_id) # print descvol if descvol['Volumes'][0]["State"] == "available": break time.sleep(5) count = count + 1 resjson = runawstojson("ec2 attach-volume --volume-id " + vol_id + " --instance-id " + iid + " --device " + conf["Attach"]) # print resjson time.sleep(5) count = 0 while count < 10: descvol = runawstojson("ec2 describe-volumes --volume " + vol_id) # print descvol if descvol['Volumes'][0]['Attachments'][0]["State"] == "attached": break time.sleep(5) count = count + 1 remote.cp( os.path.dirname(__file__) + "/../remotescripts/fdiskwrap.sh", "~/fdiskwrap.sh") remote.run("chmod a+x fdiskwrap.sh") try: remote.run("./fdiskwrap.sh " + conf["Fdisk"]) except lD.ShellExecuteError: time.sleep(30) remote.run("./fdiskwrap.sh " + conf["Fdisk"]) remote.run("mkfs.ext4 -b 4096 " + conf["Fdisk"] + "1 ") remote.run("bash -c 'echo \"" + conf["Fdisk"] + "1 " + conf["Mount"] + " ext4 defaults 1 1\" >> /etc/fstab'") mvto = " /" + conf["Mount"].replace("/", "_") remote.run("bash -c \"mv " + conf["Mount"] + mvto + "; mkdir " + conf["Mount"] + "; mount " + conf["Mount"] + ";\"") remote.run("bash -c \"if [ -d " + mvto + " ] ; then chmod --reference " + mvto + " " + conf["Mount"] + " ; fi\"") remote.run("bash -c 'shopt -s dotglob; if [ \"$(ls -A " + mvto + ")\" ] ; then mv " + mvto + "/* " + conf["Mount"] + "/ ; fi'") res = remote.run("df -hP") if conf["Mount"] not in res: raise lD.ShellExecuteError( "Could not mount the requested disk, resulted in " + res) return vol_id
url = prot + ambhost + ':' + str(port) + api + apath # print url s = lD.request_session() req = s.post(url, auth=HTTPBasicAuth(user, passwd), headers={ 'X-Requested-By': 'ambari'}, data=json.dumps(data)) return _r2j(req) ret = ambari_get("clusters") ################################################################## # Start ambari agents ################################################################## print "Attempting to start ambari agents on all", len(hosts), "nodes" sys.stdout.flush() ambari = lD.remoteHost("root", thehost, access_key) # Step one, install myself, dsh and deploy ambari agents to all nodes lD.install_pdsh(ambari) # modify iptables, only in case of Centos6 lD.disable_security(ambari) admin = ambari.run("hostname") whole_cluster = lD.multiremotes(hosts, jump=ambari) # Check if all nodes in the cluster are contactable try: whole_cluster.check(firsttime=True) except lD.ShellExecuteError: print "Could not access machines with passwordless ssh, the ambari node must have passwordless ssh access to the " \
liblocation = os.path.realpath(installfrom) jsondat = open(os.path.expanduser(security_conf)) security_config = json.loads(jsondat.read()) jsondat.close() sys.path.append(liblocation) import kavedeploy as lD import kaveaws as lA lA.checksecjson(security_config, requirefield=[], requirekeys=["SSH"]) if ip is None: lA.testaws() ip = lA.pub_ip(iid) git = False gitenv = None if lD.detect_proxy(): print "Did you already configure this machine to access port " + str( lD.proxy_port) + "? If not you'll need to turn your proxy off." lD.testproxy() remote = lD.remoteHost('root', ip, security_config["AccessKeys"]["SSH"]["KeyFile"]) if "GIT" in security_config["AccessKeys"]: git = True gitenv = security_config["AccessKeys"]["GIT"] lD.deploy_our_soft(remote, pack="kavetoolbox", git=git, gitenv=gitenv, options='--' + dest_type) # if dest_type == "workstation": # lD.confremotessh(remote)
ip = None acount = 0 while (ip is None and acount < 10): try: ip = lA.pub_ip(iid) except ValueError: pass if ip is None: print "waiting for IP" lD.mysleep(1) acount = acount + 1 if ip is None: raise SystemError(iid + " no ip assigned after quite some time") remoteuser = lA.default_usernamedict[lA.default_os] remote = lD.remoteHost(remoteuser, ip, amazon_keyfile) lD.wait_until_up(remote, 20) remote = lD.remote_cp_authkeys(remote, 'root') if "Tags" in security_config: resources = lA.find_all_child_resources(iid) lA.tag_resources(resources, security_config["Tags"]) remote.register() instance_to_remote[iid] = remote allremotes = [ "ssh:root@" + remote.host for remote in instance_to_remote.values() ] allremotes = lD.multiremotes(list_of_hosts=allremotes, access_key=amazon_keyfile) print "test local PDSH, install pdcp"
def add_new_ebs_vol(iid, conf, access_key): """ Create and name a new ebs volume, give it to a pre-existing instance and mount it on that instance conf is a dictionary which must contain: "Mount": "where-to_mount_it", "Size" : SizeIngGB, "Attach" : "aws_expected_device_name", "Fdisk" : "device_name_seen_by_fdisk" e.g.: "Mount": "/opt2", "Size" : 1, "Attach" : "/dev/sdb", "Fdisk" : "/dev/xvdb" Fdisk is optional, if not given it will be guessed from "Attach" and the region. i.e.: region Attach FDisk eu-* sd<X> xvd<X> (e.g. sdb->xvdb) ap-* sd<Y> xvd<Y+4> (e.g. sbd->xvdf) """ try: i = desc_instance(iid) except lD.ShellExecuteError: raise ValueError(iid + " is not one of your instance IDs") # get a reference to this instance ip = pub_ip(iid) remote = lD.remoteHost("root", ip, access_key) # choose the fdisk device in the case this is a broken Tokyo centos6 instance if "Fdisk" not in conf: import string alpha = string.ascii_lowercase skip = 0 if detect_region().startswith('eu'): # eu-* sd<X> xvd<X> (e.g. sdb->xvdb) skip = 0 elif detect_region().startswith('ap') and remote.detect_linux_version() in ["Centos6"]: # ap-* sd<Y> xvd<Y+4> (e.g. sbd->xvdf) skip = 4 conf["Fdisk"] = '/dev/xvd' + alpha[alpha.index(conf["Attach"][-1]) + skip] av_zone = i["Reservations"][0]["Instances"][0]["Placement"]["AvailabilityZone"] voljson = runawstojson("ec2 create-volume --size " + str(conf["Size"]) + " --availability-zone " + av_zone) instnam = "" for tag in i["Reservations"][0]["Instances"][0]["Tags"]: if tag["Key"] == "Name": instnam = tag["Value"] # print voljson vol_id = voljson["VolumeId"] name_resource(vol_id, instnam + conf["Mount"].replace("/", "_")) time.sleep(5) count = 0 while count < 10: descvol = runawstojson("ec2 describe-volumes --volume " + vol_id) # print descvol if descvol['Volumes'][0]["State"] == "available": break time.sleep(5) count = count + 1 resjson = runawstojson( "ec2 attach-volume --volume-id " + vol_id + " --instance-id " + iid + " --device " + conf["Attach"]) # print resjson time.sleep(5) count = 0 while count < 10: descvol = runawstojson("ec2 describe-volumes --volume " + vol_id) # print descvol if descvol['Volumes'][0]['Attachments'][0]["State"] == "attached": break time.sleep(5) count = count + 1 remote.cp(os.path.dirname(__file__) + "/../remotescripts/fdiskwrap.sh", "~/fdiskwrap.sh") remote.run("chmod a+x fdiskwrap.sh") try: remote.run("./fdiskwrap.sh " + conf["Fdisk"]) except lD.ShellExecuteError: time.sleep(30) remote.run("./fdiskwrap.sh " + conf["Fdisk"]) remote.run("mkfs.ext4 -b 4096 " + conf["Fdisk"] + "1 ") remote.run("bash -c 'echo \"" + conf["Fdisk"] + "1 " + conf["Mount"] + " ext4 defaults 1 1\" >> /etc/fstab'") mvto = " /" + conf["Mount"].replace("/", "_") remote.run("bash -c \"mv " + conf["Mount"] + mvto + "; mkdir " + conf["Mount"] + "; mount " + conf["Mount"] + ";\"") remote.run("bash -c \"if [ -d " + mvto + " ] ; then chmod --reference " + mvto + " " + conf["Mount"] + " ; fi\"") remote.run("bash -c 'shopt -s dotglob; if [ \"$(ls -A " + mvto + ")\" ] ; then mv " + mvto + "/* " + conf[ "Mount"] + "/ ; fi'") res = remote.run("df -hP") if conf["Mount"] not in res: raise lD.ShellExecuteError("Could not mount the requested disk, resulted in " + res) return vol_id
ip = None acount = 0 while (ip is None and acount < 10): try: ip = lA.pub_ip(iid) except ValueError: pass if ip is None: print "waiting for IP" lD.mysleep(1) acount = acount + 1 if ip is None: raise SystemError(iid + " no ip assigned after quite some time") remoteuser = lA.default_usernamedict[lA.default_os] remote = lD.remoteHost(remoteuser, ip, amazon_keyfile) lD.wait_until_up(remote, 20) remote = lD.remote_cp_authkeys(remote, 'root') if "Tags" in security_config: resources = lA.find_all_child_resources(iid) lA.tag_resources(resources, security_config["Tags"]) remote.register() instance_to_remote[iid] = remote allremotes = ["ssh:root@" + remote.host for remote in instance_to_remote.values()] allremotes = lD.multiremotes(list_of_hosts=allremotes, access_key=amazon_keyfile) print "test local PDSH, install pdcp" print allremotes.run("echo yes") allremotes.run("yum clean all") lD.install_pdsh(allremotes)
print "submitted" iid = lA.iid_from_up_json(upped)[0] import time time.sleep(5) lA.name_resource(iid, "new-dev-image") ip = lA.pub_ip(iid) acount = 0 while (ip is None and acount < 20): print "waiting for IP" lD.mysleep(1) ip = lA.pub_ip(iid) acount = acount + 1 remoteuser = lA.default_usernamedict[lA.default_os] remote = lD.remoteHost(remoteuser, ip, keyloc) print "waiting until contactable" lD.wait_until_up(remote, 20) remote = lD.remote_cp_authkeys(remote, 'root') if "Tags" in security_config: resources = lA.find_all_child_resources(iid) lA.tag_resources(resources, security_config["Tags"]) remote.register() print "Renaming, configuring firewall and adding more disk space" lD.rename_remote_host(remote, "ambari", 'kave.io') remote.run("mkdir -p /etc/kave/") remote.run("/bin/echo http://repos:[email protected]/ >> /etc/kave/mirror") remote.run('yum install curl nss -y; yum update curl nss -y --enablerepo="updates"') lD.add_as_host(edit_remote=remote, add_remote=remote, dest_internal_ip=lA.priv_ip(iid)) lD.configure_keyless(remote, remote, dest_internal_ip=lA.priv_ip(iid), preservehostname=True) # nope! Don't want 443 as ssh by default any longer!
acount = 0 while (ip is None and acount < 20): print "waiting for IP" lD.mysleep(1) ip = lA.pub_ip(iid) acount = acount + 1 if osval == "Centos6": remoteuser = '******' else: remoteuser = ''.join([i for i in osval if not i.isdigit()]).lower() if os.path.exists(os.path.realpath(os.path.expanduser(keyloc))): print "waiting until contactable, ctrl-C to quit" try: remote = lD.remoteHost(remoteuser, ip, keyloc) lD.wait_until_up(remote, 20) if "Tags" in security_config: resources = lA.find_all_child_resources(iid) lA.tag_resources(resources, security_config["Tags"]) remote.register() remote = lD.remote_cp_authkeys(remote, 'root') lD.rename_remote_host(remote, machinename, 'kave.io') lD.confallssh(remote) if osval.startswith("Centos"): remote.run("yum clean all") remote.describe() except KeyboardInterrupt: pass else: print "Warning: not contactable since keyfile supplied does not exist locally,",
req = s.post(url, auth=HTTPBasicAuth(user, passwd), headers={'X-Requested-By': 'ambari'}, data=json.dumps(data)) return _r2j(req) ret = ambari_get("clusters") ################################################################## # Start ambari agents ################################################################## print "Attempting to start ambari agents on all", len(hosts), "nodes" sys.stdout.flush() ambari = lD.remoteHost("root", thehost, access_key) # Step one, install myself, dsh and deploy ambari agents to all nodes lD.install_pdsh(ambari) # modify iptables, only in case of Centos6 lD.disable_security(ambari) admin = ambari.run("hostname") whole_cluster = lD.multiremotes(hosts, jump=ambari) # Check if all nodes in the cluster are contactable try: whole_cluster.check(firsttime=True) except lD.ShellExecuteError: print "Could not access machines with passwordless ssh, the ambari node must have passwordless ssh access to the " \