Ejemplo n.º 1
0
    def deploy_os(self, osval, instancetype="m4.large"):
        """
        Up one centos machine with the scripts and return an lD.remoteHost to that machine
        instancetype -> None: m4.large
        """
        import kavedeploy as lD
        import kaveaws as lA
        instancetype = lA.chooseinstancetype(instancetype)
        deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../')
        stdout = lD.run_quiet(deploy_dir + "/aws/deploy_known_instance.py "
                              + osval + " Test-" + osval + "-" + self.service + " "
                              + instancetype + " --not-strict")
        self.assertTrue(stdout.split("\n")[-1].startswith("OK, iid "))
        iid = stdout.split("\n")[-1].strip()[len("OK, iid "):].split(" ")[0]
        ip = stdout.split("\n")[-1].strip().split(" ")[-1]
        self.assertTrue(stdout.split("\n")[-3].startswith("connect remotely with:"))
        connectcmd = stdout.split("\n")[-2]
        self.assertTrue(ip in connectcmd, "wrong IP seen in (" + connectcmd + ")")
        jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"]))
        import json

        acconf = json.loads(jsondat.read())
        jsondat.close()
        keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"]
        self.assertTrue(keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd,
                        "wrong keyfile seen in (" + connectcmd + ")")
        ambari = lD.remoteHost("root", ip, keyfile)
        ambari.register()
        import time
        time.sleep(5)
        if osval.startswith("Centos"):
            # add 10GB to /opt
            stdout = lD.run_quiet(
                deploy_dir + "/aws/add_ebsvol_to_instance.py " + iid + " --not-strict")
        return ambari, iid
Ejemplo n.º 2
0
    def deploy_os(self, osval, instancetype="m4.large"):
        """
        Up one centos machine with the scripts and return an lD.remoteHost to that machine
        instancetype -> None: m4.large
        """
        import kavedeploy as lD
        import kaveaws as lA
        instancetype = lA.chooseinstancetype(instancetype)
        deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../')
        stdout = lD.run_quiet(deploy_dir + "/aws/deploy_known_instance.py "
                              + osval + " Test-" + osval + "-" + self.service + " "
                              + instancetype + " --not-strict")
        self.assertTrue(stdout.split("\n")[-1].startswith("OK, iid "))
        iid = stdout.split("\n")[-1].strip()[len("OK, iid "):].split(" ")[0]
        ip = stdout.split("\n")[-1].strip().split(" ")[-1]
        self.assertTrue(stdout.split("\n")[-3].startswith("connect remotely with:"))
        connectcmd = stdout.split("\n")[-2]
        self.assertTrue(ip in connectcmd, "wrong IP seen in (" + connectcmd + ")")
        jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"]))
        import json

        acconf = json.loads(jsondat.read())
        jsondat.close()
        keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"]
        self.assertTrue(keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd,
                        "wrong keyfile seen in (" + connectcmd + ")")
        ambari = lD.remoteHost("root", ip, keyfile)
        ambari.register()
        import time
        time.sleep(5)
        if osval.startswith("Centos"):
            # add 10GB to /opt
            stdout = lD.run_quiet(
                deploy_dir + "/aws/add_ebsvol_to_instance.py " + iid + " --not-strict")
        return ambari, iid
Ejemplo n.º 3
0
    def deploy_dev(self, instancetype="m4.large"):
        """
        Up one centos machine with the scripts and return an lD.remoteHost to that machine
        instancetype -> None: m4.large
        """
        import kavedeploy as lD
        import kaveaws as lA
        instancetype = lA.chooseinstancetype(instancetype)

        deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../')
        stdout = lD.run_quiet(deploy_dir +
                              "/aws/deploy_one_centos_instance.py Test-" +
                              self.service + " " + instancetype +
                              " --ambari-dev --not-strict")
        self.assertTrue(stdout.split("\n")[-1].startswith("OK, iid "))
        iid = stdout.split("\n")[-1].strip()[len("OK, iid "):].split(" ")[0]
        ip = stdout.split("\n")[-1].strip().split(" ")[-1]
        self.assertTrue(
            stdout.split("\n")[-3].startswith("connect remotely with:"))
        connectcmd = stdout.split("\n")[-2]
        self.assertTrue(ip in connectcmd,
                        "wrong IP seen in (" + connectcmd + ")")
        jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"]))
        import json

        acconf = json.loads(jsondat.read())
        jsondat.close()
        keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"]
        self.assertTrue(
            keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd,
            "wrong keyfile seen in (" + connectcmd + ")")
        # add 10GB as /opt by default!
        ambari = lD.remoteHost("root", ip, keyfile)
        ambari.register()
        #
        # configure keyless access to itself! Needed for blueprints, but already done now by the new_dev_image script,
        #  but the internal ip will be different here!
        # lD.add_as_host(edit_remote=ambari,add_remote=ambari,dest_internal_ip=lA.priv_ip(iid)) #done in the deploy
        # script...
        #
        lD.configure_keyless(ambari,
                             ambari,
                             dest_internal_ip=lA.priv_ip(iid),
                             preservehostname=True)
        abranch = ""
        if self.branch:
            abranch = self.branch
        ambari.cp(
            os.path.realpath(os.path.dirname(lD.__file__)) +
            "/../remotescripts/default.netrc", "~/.netrc")
        return ambari, iid
Ejemplo n.º 4
0
    def deploy_dev(self, instancetype="m4.large"):
        """
        Up one centos machine with the scripts and return an lD.remoteHost to that machine
        instancetype -> None: m4.large
        """
        import kavedeploy as lD
        import kaveaws as lA
        instancetype = lA.chooseinstancetype(instancetype)

        deploy_dir = os.path.realpath(os.path.dirname(lD.__file__) + '/../')
        stdout = lD.run_quiet(deploy_dir + "/aws/deploy_one_centos_instance.py Test-"
                              + self.service + " " + instancetype + " --ambari-dev --not-strict")
        self.assertTrue(stdout.split("\n")[-1].startswith("OK, iid "))
        iid = stdout.split("\n")[-1].strip()[len("OK, iid "):].split(" ")[0]
        ip = stdout.split("\n")[-1].strip().split(" ")[-1]
        self.assertTrue(stdout.split("\n")[-3].startswith("connect remotely with:"))
        connectcmd = stdout.split("\n")[-2]
        self.assertTrue(ip in connectcmd, "wrong IP seen in (" + connectcmd + ")")
        jsondat = open(os.path.expanduser(os.environ["AWSSECCONF"]))
        import json

        acconf = json.loads(jsondat.read())
        jsondat.close()
        keyfile = acconf["AccessKeys"]["SSH"]["KeyFile"]
        self.assertTrue(keyfile in connectcmd or os.path.expanduser(keyfile) in connectcmd,
                        "wrong keyfile seen in (" + connectcmd + ")")
        # add 10GB as /opt by default!
        ambari = lD.remoteHost("root", ip, keyfile)
        ambari.register()
        #
        # configure keyless access to itself! Needed for blueprints, but already done now by the new_dev_image script,
        #  but the internal ip will be different here!
        # lD.add_as_host(edit_remote=ambari,add_remote=ambari,dest_internal_ip=lA.priv_ip(iid)) #done in the deploy
        # script...
        #
        lD.configure_keyless(ambari, ambari, dest_internal_ip=lA.priv_ip(iid), preservehostname=True)
        abranch = ""
        if self.branch:
            abranch = self.branch
        ambari.cp(os.path.realpath(os.path.dirname(lD.__file__))
                  + "/../remotescripts/default.netrc",
                  "~/.netrc")
        return ambari, iid
Ejemplo n.º 5
0
keypair = security_config["AccessKeys"]["AWS"]["KeyName"]
keyloc = security_config["AccessKeys"]["AWS"]["KeyFile"]
subnet = None

if "Subnet" in security_config:
    subnet = security_config["Subnet"]

lA.testaws()

if lD.detect_proxy() and lD.proxy_blocks_22:
    raise SystemError(
        "This proxy blocks port 22, that means you can't ssh to your machines to do the initial configuration. To "
        "skip this check set kavedeploy.proxy_blocks_22 to false and kavedeploy.proxy_port=22")

lD.testproxy()
instancetype = lA.chooseinstancetype(instancetype)

upped = lA.up_os(osval, instancetype, security_group, keypair, subnet=subnet)
print "submitted"

iid = lA.iid_from_up_json(upped)[0]

import time

time.sleep(5)
lA.name_resource(iid, machinename)

ip = lA.pub_ip(iid)
acount = 0
while (ip is None and acount < 20):
    print "waiting for IP"
Ejemplo n.º 6
0
jsondat = open(secf)
security_config = json.loads(jsondat.read())
jsondat.close()
lA.checksecjson(security_config, requirekeys=["AWS"])

security_group = security_config["SecurityGroup"]
keypair = security_config["AccessKeys"]["AWS"]["KeyName"]
keyloc = security_config["AccessKeys"]["AWS"]["KeyFile"]
subnet = None

if "Subnet" in security_config:
    subnet = security_config["Subnet"]

lA.testaws()

instancetype = lA.chooseinstancetype("c4.2xlarge")

##################################################
# Create machine
##################################################

print "upping new", instancetype
if lD.detect_proxy() and lD.proxy_blocks_22:
    raise SystemError(
        "This proxy blocks port 22, that means you can't ssh to your machines to do the initial configuration. To "
        "skip this check set kavedeploy.proxy_blocks_22 to false and kavedeploy.proxy_port=22"
    )
lD.testproxy()

upped = lA.up_default(instancetype, security_group, keypair, subnet=subnet)
print "submitted"
Ejemplo n.º 7
0
keypair = security_config["AccessKeys"]["AWS"]["KeyName"]
keyloc = security_config["AccessKeys"]["AWS"]["KeyFile"]
subnet = None

if "Subnet" in security_config:
    subnet = security_config["Subnet"]

lA.testaws()

if lD.detect_proxy() and lD.proxy_blocks_22:
    raise SystemError(
        "This proxy blocks port 22, that means you can't ssh to your machines to do the initial configuration. To "
        "skip this check set kavedeploy.proxy_blocks_22 to false and kavedeploy.proxy_port=22")

lD.testproxy()
instancetype = lA.chooseinstancetype(instancetype)

upped = lA.up_os(osval, instancetype, security_group, keypair, subnet=subnet)
print "submitted"

iid = lA.iid_from_up_json(upped)[0]

import time

time.sleep(5)
lA.name_resource(iid, machinename)

ip = lA.pub_ip(iid)
acount = 0
while (ip is None and acount < 20):
    print "waiting for IP"
Ejemplo n.º 8
0
    # sys.exit(1)

print "===================================="
print "up the instance groups"
print "===================================="
sys.stdout.flush()

for instancegroup in cluster_config["InstanceGroups"]:
    count = instancegroup["Count"]
    autoname = True
    if count < 0:
        count = 1
        autoname = False
    if count == 0:
        continue
    up = lA.up_default(type=lA.chooseinstancetype(
        instancegroup["InstanceType"]),
                       security_group=security_group,
                       keys=amazon_keypair_name,
                       count=count,
                       subnet=subnet)
    instancegroups[instancegroup["Name"]] = lA.iid_from_up_json(up)

instance_to_remote = {}

print "Created IIDs:", instancegroups

print "===================================="
print "wait for them all to be up"
print "===================================="
sys.stdout.flush()
import time
Ejemplo n.º 9
0
jsondat = open(secf)
security_config = json.loads(jsondat.read())
jsondat.close()
lA.checksecjson(security_config, requirekeys=["AWS"])

security_group = security_config["SecurityGroup"]
keypair = security_config["AccessKeys"]["AWS"]["KeyName"]
keyloc = security_config["AccessKeys"]["AWS"]["KeyFile"]
subnet = None

if "Subnet" in security_config:
    subnet = security_config["Subnet"]

lA.testaws()

instancetype = lA.chooseinstancetype("c4.2xlarge")

##################################################
# Create machine
##################################################

print "upping new", instancetype
if lD.detect_proxy() and lD.proxy_blocks_22:
    raise SystemError(
        "This proxy blocks port 22, that means you can't ssh to your machines to do the initial configuration. To "
        "skip this check set kavedeploy.proxy_blocks_22 to false and kavedeploy.proxy_port=22")
lD.testproxy()

upped = lA.up_default(instancetype, security_group, keypair, subnet=subnet)
print "submitted"
iid = lA.iid_from_up_json(upped)[0]
Ejemplo n.º 10
0
    # sys.exit(1)

print "===================================="
print "up the instance groups"
print "===================================="
sys.stdout.flush()

for instancegroup in cluster_config["InstanceGroups"]:
    count = instancegroup["Count"]
    autoname = True
    if count < 0:
        count = 1
        autoname = False
    if count == 0:
        continue
    up = lA.up_default(type=lA.chooseinstancetype(instancegroup["InstanceType"]),
                       security_group=security_group, keys=amazon_keypair_name,
                       count=count, subnet=subnet)
    instancegroups[instancegroup["Name"]] = lA.iid_from_up_json(up)

instance_to_remote = {}

print "Created IIDs:", instancegroups

print "===================================="
print "wait for them all to be up"
print "===================================="
sys.stdout.flush()
import time

time.sleep(5)
Ejemplo n.º 11
0
security_group = security_config["SecurityGroup"]
keypair = security_config["AccessKeys"]["AWS"]["KeyName"]
keyloc = security_config["AccessKeys"]["AWS"]["KeyFile"]
git = False
gitenv = None
if "GIT" in security_config["AccessKeys"]:
    git = True
    gitenv = security_config["AccessKeys"]["GIT"]
subnet = None

if "Subnet" in security_config:
    subnet = security_config["Subnet"]

lA.testaws()

instancetype = lA.chooseinstancetype("m4.large")

if iid is None:
    print "upping new", instancetype
    if lD.detect_proxy() and lD.proxy_blocks_22:
        raise SystemError(
            "This proxy blocks port 22, that means you can't ssh to your machines to do the initial configuration. To "
            "skip this check set kavedeploy.proxy_blocks_22 to false and kavedeploy.proxy_port=22")
    lD.testproxy()

    upped = lA.up_default(instancetype, security_group, keypair, subnet=subnet)
    print "submitted"
    iid = lA.iid_from_up_json(upped)[0]
    import time

    time.sleep(5)