def main(): parser = setupArgs() args = parser.parse_args() lcm.opsc_url = args.opsc_ip + ':8888' lcm.waitForOpsC(pause=6, trys=200) # Block waiting for OpsC to spin up lcm.waitForCluster(cname=args.clustername, pause=6, trys=200) # Block until cluster created clusters = requests.get( "http://{url}/api/v1/lcm/clusters/".format(url=lcm.opsc_url)).json() for r in clusters['results']: if r['name'] == args.clustername: cid = r['id'] lcm.waitForNodes(numnodes=args.clustersize, pause=6, trys=400) if args.dclevel: datacenters = requests.get( "http://{url}/api/v1/lcm/datacenters/".format( url=lcm.opsc_url)).json() for r in datacenters['results']: dcid = r['id'] print("Triggering install for DC, id = {i}".format(i=dcid)) lcm.triggerInstall(None, dcid, args.dbpasswd) else: print("Triggering install for cluster, id = {i}".format(i=cid)) lcm.triggerInstall(cid, None, args.dbpasswd)
def main(): parser = setupArgs() args = parser.parse_args() clustername = args.clustername lcm.opsc_url = args.opsc_ip + ':8888' pause = args.pause trys = args.trys user = args.username keypath = os.path.abspath(args.privkey) with open(keypath, 'r') as keyfile: privkey = keyfile.read() print "Create cluster {c} at {u} with keypath {k}".format(c=clustername, u=lcm.opsc_url, k=keypath) # Yay globals! # These should move to a config file, passed as arg maybe ? dserepo = json.dumps({ "name": "DSE repo", "username": "******", "password": "******" }) dsecred = json.dumps({ "become-mode": "sudo", "use-ssh-keys": True, "name": "DSE creds", "login-user": user, "ssh-private-key": privkey, "become-user": None }) defaultconfig = json.dumps({ "name": "Default config", "datastax-version": "5.0.5", "json": { 'cassandra-yaml': { "authenticator": "AllowAllAuthenticator", "num_tokens": 64, "endpoint_snitch": "GossipingPropertyFileSnitch" } } }) lcm.waitForOpsC(pause=pause, trys=trys) # Block waiting for OpsC to spin up # return config instead of bool? c = lcm.checkForCluster(clustername) if (c == False): # cluster doesn't esist -> must be 1st node -> do setup print("Cluster {n} doesn't exist, creating...".format(n=clustername)) cred = lcm.addCred(dsecred) repo = lcm.addRepo(dserepo) conf = lcm.addConfig(defaultconfig) cid = lcm.addCluster(clustername, cred['id'], repo['id'], conf['id']) else: print("Cluster {n} exists".format(n=clustername))
def main(): parser = setupArgs() args = parser.parse_args() lcm.opsc_url = args.opsc_ip + ':8888' lcm.waitForOpsC(pause=6, trys=200) # Block waiting for OpsC to spin up pause = 60 trys = 100 count = 0 while (True): count += 1 if (count > trys): print "Maximum attempts, exiting" exit() try: jobs = requests.get("http://{url}/api/v1/lcm/jobs/".format( url=lcm.opsc_url)).json() except requests.exceptions.Timeout as e: print( "Request {c} to OpsC timeout after initial connection, exiting." .format(c=count)) exit() except requests.exceptions.ConnectionError as e: print( "Request {c} to OpsC refused after initial connection, exiting." .format(c=count)) exit() lcm.pretty(jobs) if (jobs['count'] == 0): print "No jobs found on try {c}, sleeping {p} sec...".format( c=count, p=pause) time.sleep(pause) continue if (runningJob(jobs)): print "Jobs running/pending on try {c}, sleeping {p} sec...".format( c=count, p=pause) time.sleep(pause) continue if ((not runningJob(jobs)) and (jobs['count'] < args.num)): print "Jobs found on try {c} but num {j} < {n}, sleeping {p} sec...".format( c=count, j=jobs['count'], n=args.num, p=pause) time.sleep(pause) continue if ((not runningJob(jobs)) and (jobs['count'] >= args.num)): print "No jobs running/pending and num >= {n} on try {c}, exiting".format( n=args.num, c=count) break
def main(): parser = setupArgs() args = parser.parse_args() pause = args.pause trys = args.trys clustername = args.clustername lcm.opsc_url = args.opsc_ip + ':8888' dcname = args.dcname password = args.dbpasswd dcsize = args.dcsize clustersize = args.clustersize rack = args.rack nodeid = args.nodeid privateip = args.privip publicip = args.pubip lcm.waitForOpsC(pause=pause, trys=trys) # Block waiting for OpsC to spin up lcm.waitForCluster(clustername, pause, trys) # Block until cluster created clusters = requests.get( "http://{url}/api/v1/lcm/clusters/".format(url=lcm.opsc_url)).json() for r in clusters['results']: if r['name'] == clustername: cid = r['id'] # Check if the DC --this-- node should belong to exists, if not add DC c = lcm.checkForDC(dcname) if (c == False): print("Datacenter {n} doesn't exist, creating...".format(n=dcname)) lcm.addDC(dcname, cid) else: print("Datacenter {d} exists".format(d=dcname)) # kludge, assuming ony one cluster dcid = "" datacenters = requests.get("http://{url}/api/v1/lcm/datacenters/".format( url=lcm.opsc_url)).json() for d in datacenters['results']: if (d['name'] == dcname): dcid = d['id'] # always add self to DC nodes = requests.get( "http://{url}/api/v1/lcm/datacenters/{dcid}/nodes/".format( url=lcm.opsc_url, dcid=dcid)).json() nodecount = nodes['count'] # simple counting for node number hits a race condition... work around #nodename = 'node'+str(nodecount) # aws metadata service instance-id #inst = requests.get("http://169.254.169.254/latest/meta-data/instance-id").content nodename = 'node-' + nodeid nodeconf = json.dumps({ 'name': nodename, "datacenter-id": dcid, "rack": rack, "ssh-management-address": publicip, "listen-address": privateip, "rpc-address": "0.0.0.0", "broadcast-address": publicip, "broadcast-rpc-address": publicip }) node = requests.post( "http://{url}/api/v1/lcm/nodes/".format(url=lcm.opsc_url), data=nodeconf).json() print("Added node '{n}', json:".format(n=nodename)) lcm.pretty(node) nodes = requests.get( "http://{url}/api/v1/lcm/datacenters/{dcid}/nodes/".format( url=lcm.opsc_url, dcid=dcid)).json() nodecount = nodes['count'] # vvv could get pulled out totalnodes = 0 for d in datacenters['results']: nodes = requests.get( "http://{url}/api/v1/lcm/datacenters/{dcid}/nodes/".format( url=lcm.opsc_url, dcid=d['id'])).json() totalnodes += nodes['count'] if (clustersize != 0 and totalnodes == clustersize): print("Last node added, triggering cluster install job...") lcm.triggerInstall(cid, None, password) return elif (clustersize == 0 and nodecount == dcsize): print("Last node added, triggering install job...") lcm.triggerInstall(None, dcid, password)
def main(): parser = setupArgs() args = parser.parse_args() pause = args.pause trys = args.trys clustername = args.clustername lcm.opsc_url = args.opsc_ip + ':8888' #datacenters = ['dc0','dc1','dc2'] dcname = args.dcname dcsize = args.dcsize #pubkey = args.pubkey nodeid = args.nodeid privateip = args.privip publicip = args.pubip lcm.waitForOpsC(pause=pause, trys=trys) # Block waiting for OpsC to spin up #writepubkey(pubkey) # ^^^ no-op, should happen up in the IaaS? # Check if the DC --this-- node should belong to exists, if not add DC c = lcm.checkForDC(dcname) if (c == False): print("Datacenter {n} doesn't exist, creating...".format(n=dcname)) clusters = requests.get("http://{url}/api/v1/lcm/clusters/".format( url=lcm.opsc_url)).json() cid = clusters['results'][0]['id'] lcm.addDC(dcname, cid) else: print("Datacenter {d} exists".format(d=dcname)) # kludge, assuming ony one cluster dcid = "" datacenters = requests.get("http://{url}/api/v1/lcm/datacenters/".format( url=lcm.opsc_url)).json() for d in datacenters['results']: if (d['name'] == dcname): dcid = d['id'] # always add self to DC nodes = requests.get( "http://{url}/api/v1/lcm/datacenters/{dcid}/nodes/".format( url=lcm.opsc_url, dcid=dcid)).json() nodecount = nodes['count'] # simple counting for node number hits a race condition... work around #nodename = 'node'+str(nodecount) # aws metadata service instance-id #inst = requests.get("http://169.254.169.254/latest/meta-data/instance-id").content nodename = 'node-' + nodeid nodeconf = json.dumps({ 'name': nodename, "datacenter-id": dcid, "ssh-management-address": publicip, "listen-address": privateip, "rpc-address": privateip, "broadcast-address": publicip, "broadcast-rpc-address": publicip }) node = requests.post( "http://{url}/api/v1/lcm/nodes/".format(url=lcm.opsc_url), data=nodeconf).json() print("Added node '{n}', json:".format(n=nodename)) lcm.pretty(node) nodes = requests.get( "http://{url}/api/v1/lcm/datacenters/{dcid}/nodes/".format( url=lcm.opsc_url, dcid=dcid)).json() nodecount = nodes['count'] if (nodecount == dcsize): print("Last node added, triggering install job...") lcm.triggerInstall(dcid)
def main(): parser = setupArgs() args = parser.parse_args() clustername = args.clustername lcm.opsc_url = args.opsc_ip + ':8888' pause = args.pause trys = args.trys user = args.username password = args.password privkey = args.privkey datapath = args.datapath dsever = args.dsever repouser = args.repouser repopw = args.repopw if (password == None and privkey == None): print "Error: must pass either private key or password" exit(1) # Yay globals! # These should move to a config file, passed as arg maybe ? dserepo = json.dumps({ "name": "DSE repo", "username": repouser, "password": repopw }) if (privkey != None): keypath = os.path.abspath(args.privkey) with open(keypath, 'r') as keyfile: privkey = keyfile.read() print "Will create cluster {c} at {u} with keypath {k}".format( c=clustername, u=lcm.opsc_url, k=keypath) dsecred = json.dumps({ "become-mode": "sudo", "use-ssh-keys": True, "name": "DSE creds", "login-user": user, "ssh-private-key": privkey, "become-user": None }) else: print "Will create cluster {c} at {u} with password".format( c=clustername, u=lcm.opsc_url) dsecred = json.dumps({ "become-mode": "sudo", "use-ssh-keys": False, "name": "DSE creds", "login-user": user, "login-password": password, "become-user": None }) defaultconfig = { "name": "Default config", "datastax-version": dsever, "json": { 'cassandra-yaml': { "authenticator": "com.datastax.bdp.cassandra.auth.DseAuthenticator", "num_tokens": 32, "endpoint_snitch": "GossipingPropertyFileSnitch" }, "dse-yaml": { "authorization_options": { "enabled": True }, "authentication_options": { "enabled": True } } } } # Since this isn't being called on the nodes where 'datapatah' exists # checking is pointless if (datapath != ""): defaultconfig["json"]["cassandra-yaml"]["data_file_directories"] = [ os.path.join(datapath, "data") ] defaultconfig["json"]["cassandra-yaml"][ "saved_caches_directory"] = os.path.join(datapath, "saved_caches") defaultconfig["json"]["cassandra-yaml"][ "commitlog_directory"] = os.path.join(datapath, "commitlog") defaultconfig = json.dumps(defaultconfig) lcm.waitForOpsC(pause=pause, trys=trys) # Block waiting for OpsC to spin up # return config instead of bool? c = lcm.checkForCluster(clustername) if (c == False): # cluster doesn't esist -> must be 1st node -> do setup print("Cluster {n} doesn't exist, creating...".format(n=clustername)) cred = lcm.addCred(dsecred) repo = lcm.addRepo(dserepo) conf = lcm.addConfig(defaultconfig) cid = lcm.addCluster(clustername, cred['id'], repo['id'], conf['id']) else: print("Cluster {n} exists".format(n=clustername))