def deploy_os(self, osval, instancetype="m4.large"): """ Up one centos machine with the scripts and return an lD.remoteHost to that machine instancetype -> None: m4.large """ import kavedeploy as lD import kaveaws as lA instancetype = lA.chooseinstancetype(instancetype) deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../') stdout = lD.run_quiet(deploy_dir + "/aws/deploy_known_instance.py " + osval + " Test-" + osval + "-" + self.service + " " + instancetype + " --not-strict") self.assertTrue(stdout.split("\n")[-1].startswith("OK, iid ")) iid = stdout.split("\n")[-1].strip()[len("OK, iid "):].split(" ")[0] ip = stdout.split("\n")[-1].strip().split(" ")[-1] self.assertTrue(stdout.split("\n")[-3].startswith("connect remotely with:")) connectcmd = stdout.split("\n")[-2] self.assertTrue(ip in connectcmd, "wrong IP seen in (" + connectcmd + ")") jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"])) import json acconf = json.loads(jsondat.read()) jsondat.close() keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"] self.assertTrue(keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd, "wrong keyfile seen in (" + connectcmd + ")") ambari = lD.remoteHost("root", ip, keyfile) ambari.register() import time time.sleep(5) if osval.startswith("Centos"): # add 10GB to /opt stdout = lD.run_quiet( deploy_dir + "/aws/add_ebsvol_to_instance.py " + iid + " --not-strict") return ambari, iid
def check_opts(): if "-h" in sys.argv or "--help" in sys.argv: help() sys.exit(0) if "--verbose" in sys.argv: lD.debug = True sys.argv = [s for s in sys.argv if s != "--verbose"] else: lD.debug = False if "--strict" in sys.argv: sys.argv = [s for s in sys.argv if s not in ["--not-strict"]] lD.strict_host_key_checking = True if "--not-strict" in sys.argv: sys.argv = [s for s in sys.argv if s not in ["--not-strict"]] lD.strict_host_key_checking = False if "--this-branch" in sys.argv: sys.argv = [s for s in sys.argv if s not in ["--this-branch"]] global version version = lD.run_quiet("bash -c \"cd " + os.path.dirname(__file__) + "; git branch | sed -n '/\* /s///p'\"") stdout = lD.run_quiet("bash -c 'cd " + os.path.dirname(__file__) + "; git branch -r;'") if not ("origin/" + version in [s.strip() for s in stdout.split() if len(s.strip())]): raise NameError("There is no remote branch called " + version + " push your branch back to the origin and try again") if len(sys.argv) < 3: help() raise AttributeError("You did not supply enough parameters") if len(sys.argv) > 5: help() raise AttributeError("You supplied too many parameters") if not os.path.exists(os.path.expanduser(sys.argv[2])): raise IOError("json config file must exist " + sys.argv[2])
def check_opts(): if "-h" in sys.argv or "--help" in sys.argv: help() sys.exit(0) if "--verbose" in sys.argv: lD.debug = True sys.argv = [s for s in sys.argv if s != "--verbose"] else: lD.debug = False if "--strict" in sys.argv: sys.argv = [s for s in sys.argv if s not in ["--not-strict"]] lD.strict_host_key_checking = True if "--not-strict" in sys.argv: sys.argv = [s for s in sys.argv if s not in ["--not-strict"]] lD.strict_host_key_checking = False if "--this-branch" in sys.argv: sys.argv = [s for s in sys.argv if s not in ["--this-branch"]] global version version = lD.run_quiet("bash -c \"cd " + os.path.dirname(__file__) + "; git branch | sed -n '/\* /s///p'\"") stdout = lD.run_quiet("bash -c 'cd " + os.path.dirname(__file__) + "; git branch -r;'") if not ("origin/" + version in [s.strip() for s in stdout.split() if len(s.strip())]): raise NameError( "There is no remote branch called " + version + " push your branch back to the origin and try again") if len(sys.argv) < 3: help() raise AttributeError("You did not supply enough parameters") if len(sys.argv) > 5: help() raise AttributeError("You supplied too many parameters") if not os.path.exists(os.path.expanduser(sys.argv[2])): raise IOError("json config file must exist " + sys.argv[2])
def parse_opts(): global skip_ambari global skip_blueprint global version if "-h" in sys.argv or "--help" in sys.argv: help() sys.exit(0) if "--skip-ambari" in sys.argv: skip_ambari = True sys.argv = [s for s in sys.argv if s != "--skip-ambari"] if "--skip-blueprint" in sys.argv: skip_blueprint = True sys.argv = [s for s in sys.argv if s != "--skip-blueprint"] if "--verbose" in sys.argv: lD.debug = True sys.argv = [s for s in sys.argv if s != "--verbose"] if "--this-branch" in sys.argv: version = lD.run_quiet( "bash -c \"cd " + os.path.dirname(__file__) + "; git branch | sed -n '/\* /s///p'\"") stdout = lD.run_quiet("bash -c 'cd " + os.path.dirname(__file__) + "; git branch -r;'") if ("origin/" + version not in [s.strip() for s in stdout.split() if len(s.strip())]): raise AttributeError("There is no remote branch called " + version + " push your branch back to the origin to deploy") sys.argv = [s for s in sys.argv if s != "--this-branch"] if len(sys.argv) > 2: help() raise AttributeError("You supplied too many arguments") iid = None if len(sys.argv) > 1: iid = sys.argv[1] return iid
def pre_check(self): """ Check that security config exists and that lD library is importable """ import kavedeploy as lD import kaveaws as lA lD.debug = self.debug lD.strict_host_key_checking = False import os if "AWSSECCONF" not in os.environ: raise SystemError( "You need to set the environment variable AWSSECCONF to point to your security config file before " "running this test") self.assertTrue(lA.testaws(), "Local aws installation incomplete, try again") self.assertTrue( len(lA.detect_region()) > 0, "Failed to detect aws region, have you run aws configure?") import json jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"])) l = jsondat.read() jsondat.close() # print l security_config = json.loads(l) # print lD.checksecjson(security_config) self.assertTrue( lA.checksecjson(security_config), "Security config not readable correctly or does not contain enough keys!" ) if self.branch == "__local__": self.branch = lD.run_quiet("bash -c \"cd " + os.path.dirname(__file__) + "; git branch | sed -n '/\* /s///p'\"") if self.branch == "__service__": self.branch = self.service if self.branch is not None: stdout = lD.run_quiet("bash -c 'cd " + os.path.dirname(__file__) + "; git branch -r;'") self.assertTrue( "origin/" + self.branch in [s.strip() for s in stdout.split() if len(s.strip())], "There is no remote branch called " + self.branch + " push your branch back to the origin " "to run this automated test") return lD
def runTest(self): """ Each service must appear twice in service.sh """ # this list is a list of things that you can't install with service.sh, probably because the service has # multiple components ignore_services = [] # find the name of all our services # check they exist at least twice in service.sh import os import sys dir = os.path.realpath(os.path.dirname(__file__) + "/../../") # check they exist at least twice in service.sh import kavedeploy as lD lD.debug = False for service, sdir in base.find_services(): if service in ignore_services: continue cmd = "grep -e " + service + " " + dir + "/bin/service.sh" + " | wc -l" stdout = lD.run_quiet(cmd) count = int(stdout.strip()) self.assertTrue( count >= 2, "Not enough copies of " + service + " in service.sh")
def deploy_blueprint(self, ambari, blueprint, cluster): """ Deploy a blueprint on this ambari node, and wait for it to be up! """ cname = cluster.split('/')[-1].split('.')[0] import kavedeploy as lD ip = ambari.host deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../') # wait until ambari server is up self.wait_for_ambari(ambari) stdout = lD.run_quiet(deploy_dir + "/deploy_from_blueprint.py " + blueprint + " " + cluster + " " + ip + " $AWSSECCONF --not-strict") state = self.monitor_request(ambari, cname) if state == "ABORTED": print "Trying to recover from aborted blueprint with restarts" ambari.cp(os.path.realpath(os.path.dirname(lD.__file__)) + "/../remotescripts/default.netrc", "~/.netrc") stdout = ambari.run("./[a,A]mbari[k,K]ave/dev/restart_all_services.sh " + cname) reqid = stdout.strip().split("\n")[-1] state = self.monitor_request(ambari, cname, requestid=reqid) self.assertFalse(state == "FAILED", "deploy from blueprint failed (" + ' '.join(ambari.sshcmd()) + ")") self.assertFalse(state == "ABORTED", "deploy from blueprint aborted (" + ' '.join(ambari.sshcmd()) + ")") self.assertFalse(state == "UNKNOWN", self.service + " did not install from blueprint after 60 minutes (" + ' '.join(ambari.sshcmd()) + ")") if state == "COMPLETED": # done! return True else: raise ValueError("Unknown state: " + str(state) + " (" + ' '.join(ambari.sshcmd()) + ")") return
def checksecjson(json, requirefield=["SecurityGroup"], requirekeys=["AWS", "GIT", "SSH"]): """ Check if security JSON file has all required fields and if the key file has the correct permissions :param requirefield: which is the required field :param requirekeys: ssh keys to be verified """ missing = [k for k in requirefield if k not in json.keys()] if len(missing): raise IOError("Your json file is missing the following keys " + missing.__str__()) if not len(requirekeys): return if "AccessKeys" not in json: raise KeyError("You must specify access keys " + requirekeys.__str__()) missing = [k for k in requirekeys if k not in json["AccessKeys"]] if len(missing): raise IOError("Your json file keys are missing the following Access Keys " + missing.__str__()) for key, val in json["AccessKeys"].iteritems(): if key not in requirekeys: continue if not os.path.exists(os.path.expanduser(val["KeyFile"])): raise IOError("Keyfiles must exist " + val["KeyFile"]) if "------" not in lD.run_quiet("ls -l " + val["KeyFile"]): raise IOError( "Your private keyfile " + val["KeyFile"] + " " + key + " needs to have X00 permissions (400 or 600).") return True
def checksecjson(json, requirefield=["SecurityGroup"], requirekeys=["AWS", "GIT", "SSH"]): missing = [k for k in requirefield if k not in json.keys()] if len(missing): raise IOError("Your json file is missing the following keys " + missing.__str__()) if not len(requirekeys): return if "AccessKeys" not in json: raise KeyError("You must specify access keys " + requirekeys.__str__()) missing = [k for k in requirekeys if k not in json["AccessKeys"]] if len(missing): raise IOError( "Your json file keys are missing the following Access Keys " + missing.__str__()) for key, val in json["AccessKeys"].iteritems(): if key not in requirekeys: continue if not os.path.exists(os.path.expanduser(val["KeyFile"])): raise IOError("Keyfiles must exist " + val["KeyFile"]) if "------" not in lD.run_quiet("ls -l " + val["KeyFile"]): raise IOError("Your private keyfile " + val["KeyFile"] + " " + key + " needs to have X00 permissions (400 or 600).") return True
def runawstojson(cmd): prox = lD.detect_proxy() and lD.no_ssl_over_proxy if prox: cmd = "--no-verify-ssl " + cmd output = lD.run_quiet("aws " + cmd) if len(output.strip()): return json.loads(output.strip()) else: return {}
def runTest(self): """ Tests which cehck the function of the deployment library, but do not need any environment parameters or access to aws """ import kavedeploy as lD lD.testproxy() self.assertIsNot(lD.which("ls"), None) self.assertRaises(RuntimeError, lD.run_quiet, ("thisisnotacommand")) stdout = lD.run_quiet(['which', 'ls'], shell=False) self.assertTrue('/bin/ls' in stdout) self.assertIsNot(lD.which("pdsh"), None, "pdsh is not installed, please install it in order to test the multiremotes functionality, " "sudo yum -y install pdsh") lD.run_quiet("touch /tmp/fake_test_ssh_key.pem") lD.run_quiet("chmod 400 /tmp/fake_test_ssh_key.pem") test = lD.remoteHost("root", "test", '/tmp/fake_test_ssh_key.pem') test = lD.multiremotes([test.host], access_key='/tmp/fake_test_ssh_key.pem')
def deploycluster(self, clusterfile, cname=None): """ Wrapper around up_aws_cluster.py """ if cname is None: cname = "Test-" + self.service import kavedeploy as lD deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../') cmd = deploy_dir + "/aws/up_aws_cluster.py " + cname + " " + clusterfile + " --not-strict" if self.branchtype in ["__local__"]: cmd = cmd + " --this-branch" return lD.run_quiet(cmd)
def deploy_dev(self, instancetype="m4.large"): """ Up one centos machine with the scripts and return an lD.remoteHost to that machine instancetype -> None: m4.large """ import kavedeploy as lD import kaveaws as lA instancetype = lA.chooseinstancetype(instancetype) deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../') stdout = lD.run_quiet(deploy_dir + "/aws/deploy_one_centos_instance.py Test-" + self.service + " " + instancetype + " --ambari-dev --not-strict") self.assertTrue(stdout.split("\n")[-1].startswith("OK, iid ")) iid = stdout.split("\n")[-1].strip()[len("OK, iid "):].split(" ")[0] ip = stdout.split("\n")[-1].strip().split(" ")[-1] self.assertTrue( stdout.split("\n")[-3].startswith("connect remotely with:")) connectcmd = stdout.split("\n")[-2] self.assertTrue(ip in connectcmd, "wrong IP seen in (" + connectcmd + ")") jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"])) import json acconf = json.loads(jsondat.read()) jsondat.close() keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"] self.assertTrue( keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd, "wrong keyfile seen in (" + connectcmd + ")") # add 10GB as /opt by default! ambari = lD.remoteHost("root", ip, keyfile) ambari.register() # # configure keyless access to itself! Needed for blueprints, but already done now by the new_dev_image script, # but the internal ip will be different here! # lD.add_as_host(edit_remote=ambari,add_remote=ambari,dest_internal_ip=lA.priv_ip(iid)) #done in the deploy # script... # lD.configure_keyless(ambari, ambari, dest_internal_ip=lA.priv_ip(iid), preservehostname=True) abranch = "" if self.branch: abranch = self.branch ambari.cp( os.path.realpath(os.path.dirname(lD.__file__)) + "/../remotescripts/default.netrc", "~/.netrc") return ambari, iid
def pre_check(self): """ Check that security config exists and that lD library is importable """ import kavedeploy as lD import kaveaws as lA lD.debug = self.debug lD.strict_host_key_checking = False import os if "AWSSECCONF" not in os.environ: raise SystemError( "You need to set the environment variable AWSSECCONF to point to your security config file before " "running this test") self.assertTrue(lA.testaws(), "Local aws installation incomplete, try again") self.assertTrue(len(lA.detect_region()) > 0, "Failed to detect aws region, have you run aws configure?") import json jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"])) l = jsondat.read() jsondat.close() # print l security_config = json.loads(l) # print lD.checksecjson(security_config) self.assertTrue(lA.checksecjson(security_config), "Security config not readable correctly or does not contain enough keys!") if self.branch == "__local__": self.branch = lD.run_quiet( "bash -c \"cd " + os.path.dirname(__file__) + "; git branch | sed -n '/\* /s///p'\"") if self.branch == "__service__": self.branch = self.service if self.branch is not None: stdout = lD.run_quiet("bash -c 'cd " + os.path.dirname(__file__) + "; git branch -r;'") self.assertTrue("origin/" + self.branch in [s.strip() for s in stdout.split() if len(s.strip())], "There is no remote branch called " + self.branch + " push your branch back to the origin " "to run this automated test") return lD
def runawstojson(cmd): """ Run aws CLI command and return the response as JSON :param cmd: command to be executed """ prox = lD.detect_proxy() and lD.no_ssl_over_proxy if prox: cmd = "--no-verify-ssl " + cmd output = lD.run_quiet("aws " + cmd) if len(output.strip()): return json.loads(output.strip()) else: return {}
def deploy_dev(self, instancetype="m4.large"): """ Up one centos machine with the scripts and return an lD.remoteHost to that machine instancetype -> None: m4.large """ import kavedeploy as lD import kaveaws as lA instancetype = lA.chooseinstancetype(instancetype) deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../') stdout = lD.run_quiet(deploy_dir + "/aws/deploy_one_centos_instance.py Test-" + self.service + " " + instancetype + " --ambari-dev --not-strict") self.assertTrue(stdout.split("\n")[-1].startswith("OK, iid ")) iid = stdout.split("\n")[-1].strip()[len("OK, iid "):].split(" ")[0] ip = stdout.split("\n")[-1].strip().split(" ")[-1] self.assertTrue(stdout.split("\n")[-3].startswith("connect remotely with:")) connectcmd = stdout.split("\n")[-2] self.assertTrue(ip in connectcmd, "wrong IP seen in (" + connectcmd + ")") jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"])) import json acconf = json.loads(jsondat.read()) jsondat.close() keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"] self.assertTrue(keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd, "wrong keyfile seen in (" + connectcmd + ")") # add 10GB as /opt by default! ambari = lD.remoteHost("root", ip, keyfile) ambari.register() # # configure keyless access to itself! Needed for blueprints, but already done now by the new_dev_image script, # but the internal ip will be different here! # lD.add_as_host(edit_remote=ambari,add_remote=ambari,dest_internal_ip=lA.priv_ip(iid)) #done in the deploy # script... # lD.configure_keyless(ambari, ambari, dest_internal_ip=lA.priv_ip(iid), preservehostname=True) abranch = "" if self.branch: abranch = self.branch ambari.cp(os.path.realpath(os.path.dirname(lD.__file__)) + "/../remotescripts/default.netrc", "~/.netrc") return ambari, iid
def runTest(self): """ Check that we can install the head of KaveToolbox on aws machines Three OSes are possible, Centos6, Centos7 and Ubuntu14 """ # create remote machine import os import sys lD = self.pre_check() ambari, iid = self.deploy_os(self.ostype) if self.ostype.startswith("Redhat7"): # add default 10GB in /opt deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../') stdout = lD.run_quiet(deploy_dir + "/aws/add_ebsvol_to_instance.py " + iid + " --not-strict ") self.deploy_ktb(ambari) self.wait_for_ktb(ambari) return self.check(ambari)
def runTest(self): """ Check that we can install the head of KaveToolbox on aws machines Three OSes are possible, Centos6, Centos7 and Ubuntu14 """ # create remote machine import os import sys lD = self.pre_check() ambari, iid = self.deploy_os(self.ostype) if self.ostype.startswith("Ubuntu"): ambari.run('apt-get update') else: # add default 10GB in /opt deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../') stdout = lD.run_quiet(deploy_dir + "/aws/add_ebsvol_to_instance.py " + iid + " --not-strict ") self.deploy_ktb(ambari) self.wait_for_ktb(ambari) return self.check(ambari)
def runTest(self): """ Each service must appear twice in service.sh """ # this list is a list of things that you can't install with service.sh, probably because the service has # multiple components ignore_services = [] # find the name of all our services # check they exist at least twice in service.sh import os import sys dir = os.path.realpath(os.path.dirname(__file__) + "/../../") # check they exist at least twice in service.sh import kavedeploy as lD lD.debug = False for service, sdir in base.find_services(): if service in ignore_services: continue cmd = "grep -e " + service + " " + dir + "/bin/service.sh" + " | wc -l" stdout = lD.run_quiet(cmd) count = int(stdout.strip()) self.assertTrue(count >= 2, "Not enough copies of " + service + " in service.sh")
import kaveaws as lA import json try: jsondat = open(keyfile) security_config = json.loads(jsondat.read()) jsondat.close() lA.checksecjson(security_config, requirekeys=["AWS"]) keyfile = security_config["AccessKeys"]["AWS"]["KeyFile"] except: pass if not os.path.exists(os.path.expanduser(keyfile)): raise IOError("That is not a valid keyfile!", keyfile) if "------" not in lD.run_quiet("ls -l " + keyfile): raise IOError("Your private keyfile " + keyfile + " needs to have X00 permissions (400 or 600).") print "Choose instance ID from:" iidtoip = {} nametoip = {} ips = [] print "Name, iid, security_group(s), instancetype, publicIP, status" json = lA.desc_instance() for reservation in json["Reservations"]: for instance in reservation["Instances"]: # print instance if "PublicIpAddress" not in instance or not len(instance["PublicIpAddress"]): continue else:
lA.checksecjson(security_config, requirekeys=["AWS", "SSH"]) security_group = security_config["SecurityGroup"] amazon_keyfile = security_config["AccessKeys"]["AWS"]["KeyFile"] amazon_keypair_name = security_config["AccessKeys"]["AWS"]["KeyName"] gitenv = None git = False if "GIT" in security_config["AccessKeys"]: git = True gitenv = github_keyfile = security_config["AccessKeys"]["GIT"] subnet = None if "Subnet" in security_config.keys(): subnet = security_config["Subnet"] # Check that pdsh is locally installed try: lD.run_quiet('which pdsh') except lD.ShellExecuteError: raise SystemError('pdsh is not installed, please install pdsh first. Pdsh is useful to speed up large deployments.') dnsiid = None vpcid = None if "CloudFormation" in cluster_config: print "============================================" print "Create a new VPC from cloud formation script" print "============================================" sys.stdout.flush() # replace default keys with those from the security config file? import datetime _vpc_name = cluster_name + "-" + \
def detect_region(): """ return aws cli region setting, needed to choose instance to create tokyo images, should extend it to other regions... """ return lD.run_quiet("aws configure get region")
lD.configure_keyless(remote, remote, lA.priv_ip(iid), preservehostname=True) # # # INSTALL AMBARI HEAD and Deploy a very simple default blueprint! # # if not skip_ambari: print "Installing ambari " + version + " from git" lD.deploy_our_soft(remote, version=version, git=git, gitenv=gitenv) print "Awaiting ambari installation ..." lD.wait_for_ambari(remote, check_inst=['inst.stderr', 'inst.stdout']) if not skip_blueprint: print "Deploying default blueprint" stdout = lD.run_quiet( base + "/../deploy_from_blueprint.py --not-strict " + base + "/../blueprints/default.blueprint.json " + base + "/../blueprints/default.cluster.json " + remote.host + " " + secf) print stdout print "Awaiting blueprint completion" lD.waitforrequest(remote, 'default', 1) # # # Stop the instance and create an image from it! # # print "Creating image from this installation" instance = lA.desc_instance(iid)["Reservations"][0]["Instances"][0] # print instance if instance["State"]["Name"] is "running": lA.killinstance(iid, "stop")
import kaveaws as lA import json try: jsondat = open(keyfile) security_config = json.loads(jsondat.read()) jsondat.close() lA.checksecjson(security_config, requirekeys=["AWS"]) keyfile = security_config["AccessKeys"]["AWS"]["KeyFile"] except: pass if not os.path.exists(os.path.expanduser(keyfile)): raise IOError("That is not a valid keyfile!", keyfile) if "------" not in lD.run_quiet("ls -l " + keyfile): raise IOError("Your private keyfile " + keyfile + " needs to have X00 permissions (400 or 600).") print "Choose instance ID from:" iidtoip = {} nametoip = {} ips = [] print "Name, iid, security_group(s), instancetype, publicIP, status" json = lA.desc_instance() for reservation in json["Reservations"]: for instance in reservation["Instances"]: # print instance if "PublicIpAddress" not in instance or not len( instance["PublicIpAddress"]):
lA.checksecjson(security_config, requirekeys=["AWS", "SSH"]) security_group = security_config["SecurityGroup"] amazon_keyfile = security_config["AccessKeys"]["AWS"]["KeyFile"] amazon_keypair_name = security_config["AccessKeys"]["AWS"]["KeyName"] gitenv = None git = False if "GIT" in security_config["AccessKeys"]: git = True gitenv = github_keyfile = security_config["AccessKeys"]["GIT"] subnet = None if "Subnet" in security_config.keys(): subnet = security_config["Subnet"] # Check that pdsh is locally installed try: lD.run_quiet('which pdsh') except lD.ShellExecuteError: raise SystemError( 'pdsh is not installed, please install pdsh first. Pdsh is useful to speed up large deployments.' ) dnsiid = None vpcid = None if "CloudFormation" in cluster_config: print "============================================" print "Create a new VPC from cloud formation script" print "============================================" sys.stdout.flush() # replace default keys with those from the security config file? import datetime