def createPHD(clusterConfigFilePath, overrideClusterId,nodeCnt): ''' return: { "dns": "172.17.0.2", "hosts": "172.17.0.2 master\n172.17.0.3 slave1\n172.17.04 slave2" } ''' print "Cluster Build Start Time: "+str(time.asctime(time.localtime(time.time()))) dnsServerAddress = None hosts = "" hadoopHosts=[] rootPassword = "******" encPassword = crypt.crypt(rootPassword,"salt") with open(clusterConfigFilePath, "r") as conffile: conf = conffile.read() try: clusterConfig = json.loads(conf) clusterConfig["nodes"][0]["hostname"] = str(clusterConfig["nodes"][0]["hostname"]).replace("6",nodeCnt) except ValueError as e: print "Given cluster config json file " + clusterConfigFilePath + " is invalid " print e.message return 1 # docker build if Dockerfile is specified clusterConfig = _flattenDockerfile(clusterConfig) clusterConfig = _flattenHostname(clusterConfig) if overrideClusterId != None: clusterConfig["id"] = overrideClusterId # Append DNS dnsNode = { "hostname" : "dclouddns", "imageName" : REPO_DNS_BASE, "cmd" : "service sshd start && tail -f /var/log/yum.log" } clusterConfig["nodes"].insert(0, dnsNode) clusterConfig = _flattenHostname(clusterConfig) basePath = os.path.split(__file__)[0] pccPath = configFile.getParam( _WORK_DIR+"/.dcloud.ini","PHD","pcc") pccPath = os.path.split(pccPath)[0] volumes = ['/mnt/pcc','/mnt/config'] volumeBinds = { pccPath : {'bind' : '/mnt/pcc', "ro" : False}, str(basePath)+"/template" : {'bind' : '/mnt/config', "ro" : False} } #pullImages(dnsNode["imageName"],phd,dockerFilePath) print "Setting Images for Use" pullImages(dnsNode["imageName"]) for i in range(len(clusterConfig["nodes"])): dnsList=[] node = clusterConfig["nodes"][i] containerName = _generateContainerName(clusterConfig["id"], node["hostname"]) #containerName = str(clusterConfig["id"])+"."+str(node["hostname"]) domainName = clusterConfig["domain"] fqdn = node["hostname"] + "." + clusterConfig["domain"] cmdString = "bash -c '"+node["cmd"]+"'" dockerClient=dockerpy.Client() #containerId = dockerClient.create_container(node["imageName"],command=cmdString,hostname=node["hostname"],domainname=domainName,detach=True,name=containerName,volumes=volumes)["Id"] #containerId = dockerClient.create_container(node["imageName"],command=cmdString,hostname=node["hostname"]+"."+domainName,domainname=domainName,detach=True,name=containerName,volumes=volumes)["Id"] containerId = dockerClient.create_container(node["imageName"],command=cmdString,hostname=node["hostname"]+"."+domainName,detach=True,name=containerName,volumes=volumes)["Id"] if i == 0: dnsList.append("127.0.0.1") else: dnsList.append(dnsServerAddress) if "dns" in clusterConfig: for dnsIp in clusterConfig["dns"]: dnsList.append(dnsIp) dockerClient.start(containerId,dns=dnsList,dns_search=domainName,privileged=True,binds=volumeBinds) containerInfo = dockerClient.inspect_container(containerId) containerIP = containerInfo['NetworkSettings']['IPAddress'] if i == 0: dnsServerAddress = containerIP hosts += containerIP + " " + fqdn + " " + node["hostname"] + "\n" if (node["hostname"] != "dclouddns"): hadoopHosts.append({"hostname":node["hostname"],"ip":containerIP,"fqdn":fqdn,"id":containerId}) print "DNS Server Address: " + dnsServerAddress if(not ssh.connection_check(dnsServerAddress, "root", "changeme")): print "**** ERROR ****" print "ssh connection to root@" + dnsServerAddress + " could not be established" return 1 ssh.exec_command2(dnsServerAddress, "root", "changeme", "echo '" + hosts + "' > /etc/dcloud/dnsmasq/hosts") ssh.exec_command2(dnsServerAddress, "root", "changeme", "service dnsmasq restart") print "Pivotal Hadoop Hosts:" print "-----------------------------------------" lines = hosts.split('\n') print "Management Host: " + lines[0] print "Hadoop Nodes:" lineCnt = 0 for line in lines: if lineCnt > 0 : print line lineCnt+=1 result = RunResult() result.dns = dnsServerAddress result.hosts = hosts #print "gpadmin user created on "+host["hostname"] #print "gpadmin users created" #print "Sharing Root SSH Keys Across Cluster" #shareSSHKeys(clusterConfig["id"],"root",rootPassword) #print "Sharing Root SSH Keys Completed" mgmtContainerHostname = dockerDriver.getContainerId(clusterConfig["nodes"][1]["hostname"]) mgmtIPaddress = hadoopHosts[0]["ip"] phdDesigner.sparkInstall(configFile,"root","changeme",hadoopHosts,clusterConfig["id"] ,nodeCnt) phdDesigner.servicePlacementV2(nodeCnt,hadoopHosts,clusterConfig["domain"],clusterConfig["id"]) phdDesigner.icmInstall(mgmtIPaddress,_WORK_DIR+"/.dcloud.ini","root","changeme",hadoopHosts,clusterConfig["id"]) print "Sharing gpadmin keys across Cluster" shareSSHKeys(clusterConfig["id"],"gpadmin",rootPassword) print "Sharing gpadmin keys COmpleted" phdDesigner.initializeHAWQ("gpadmin","changeme",hadoopHosts,clusterConfig["id"],nodeCnt) print "Cluster Build End Time: "+str(time.asctime(time.localtime(time.time()))) print "Update Docker Host /etc/hosts for hostname based access" hostsfile(clusterConfig["id"],"/etc/hosts") return 0
def icmInstall(mgmtServer,cfgFile,username,password,hadoopHosts,clusterId): print "ICM Install on "+ str(mgmtServer) #print cfgFile pccPath = configFile.getParam(cfgFile,"PHD","pcc") #filename = ssh.putFile(mgmtServer,pccPath,username,password) fileName = os.path.split(pccPath)[1] print "PCC Filename: "+fileName ssh.exec_command2(mgmtServer,username,password,"yum -y install tar") print "Untarring: "+"tar xvfz /mnt/pcc/"+fileName ssh.exec_command2(mgmtServer,username,password,"cd /tmp;tar xvfz /mnt/pcc/"+fileName) print "Installing PCC..." #ssh.exec_command2(mgmtServer,username,password,"echo \"echo not running.\" > /etc/init.d/iptables") #print ssh.exec_command2(mgmtServer,username,password,"/tmp/"+filename[:-7]+"/install") ssh.exec_command2(mgmtServer,username,password,"/tmp/PCC*/install") print "Install Complete" print "Transfer Configuration: " # #tar File, send across, untar # basePath = os.path.split(__file__)[0] # tar = tarfile.open("/tmp/clusterConfig.tar","w") # fileList = [] # for root, subFolders, files in os.walk(str(basePath)+"/template/"): # for file in files: # fullPath = os.path.join(root,file) # relativePath = ((root.replace(str(basePath),""))+"/"+file)[1:] # tar.add(fullPath,relativePath) # tar.close() # # # # ssh.putFile(mgmtServer,"/tmp/clusterConfig.tar",username,password) # ssh.exec_command2(mgmtServer,username,password,"cd /tmp;tar xvf /tmp/clusterConfig.tar") # ICM Setup phdPath = configFile.getParam(cfgFile,"PHD","phd") #fileName = ssh.putFile(mgmtServer,phdPath,username,password) fileName = os.path.split(phdPath)[1] print "PHD Filename: "+fileName hawqPath = configFile.getParam(cfgFile,"PHD","pads") hawqFileName = os.path.split(hawqPath)[1] print "HAWQ Filename: "+ hawqFileName print "tar xvfz /tmp/"+fileName ssh.exec_command2(mgmtServer,username,password,"cd /tmp;tar xvfz /mnt/pcc/"+fileName) print "untar complete" print "tar xvfz /tmp/"+hawqFileName ssh.exec_command2(mgmtServer,username,password,"cd /tmp;tar xvfz /mnt/pcc/"+hawqFileName) print "untar complete" #ssh.exec_command2(mgmtServer,username,password,"echo \"changeme\" | passwd --stdin gpadmin") print "changed GPADMIN password" print "Start Import of /mnt/pcc/"+fileName[:-7] # import stack details stackConfig = { "stack_name": "PHD-2.1.0.0", "stack_properties": { "rpm_rel": "46", "hive_package_version": "0.12.0_gphd_3_1_0_0-175", "hadoop_rpm_version": "2.2.0_gphd_3_1_0_0-175", "rpm_label": "gphd", "hbase_package_version": "0.96.0_gphd_3_1_0_0-175", "zookeeper_package_version": "3.4.5_gphd_3_1_0_0-175", "rpm_version": "2_0_2_0", "pig_package_version": "0.12.0_gphd_3_1_0_0-175", "mahout_package_version": "0.7_gphd_3_1_0_0-175" }, "stack_type": "phd" } # apiClient.importStack(stackConfig) ssh.exec_command2(mgmtServer,"gpadmin","changeme","icm_client import -s /tmp/"+fileName[:-7]) print "Import Complete" print "Start Import of /mnt/pcc/"+hawqFileName[:-7] ssh.exec_command2(mgmtServer,"gpadmin","changeme","icm_client import -s /tmp/"+hawqFileName[:-7]) print "Starting Cluster Deploy..." # RUN PREPAREHOSTS VIA API inputData={} hostList = [] hostCnt = 0 for host in hadoopHosts: if hostCnt > 0: hostList.append(host["hostname"]) hostCnt+=1 inputData["hosts"] = hostList inputData["jdkPath"] = "" inputData["verbose"] = True inputData["setupNTP"] = False inputData["ntpServer"] = "" inputData["disableSELinux"] = False inputData["disableIPTables"] = False inputData["sysConfigDir"] = "" inputData["skipPasswordlessSSH"] = False inputData["rootPassword"]= password inputData["gpadminPassword"]= password inputData["gphdStackVer"]="PHD-2.1.0.0" inputData["clusterId"] = clusterId # Get OAuth file ssh.getFile(mgmtServer,"/etc/gphd/gphdmgr/conf/oauth2-clients.conf","/tmp/oauth2-clients.conf",username,password) apiClient = APIClient (mgmtServer) apiClient.prepareHosts(inputData) # Thread the deploy and then look through install status. #clusterConfigJSON = json.dumps(open(str(basePath)+"/template/clusterConfig.xml","r").readlines()) #print APIClient.deployConfiguration(apiClient,clusterConfigJSON,inputData) #print apiClient.getClusterTemplateJson(inputData) ssh.exec_command2(mgmtServer,"gpadmin","changeme","icm_client deploy -s -p -c /mnt/config") print "Cluster Deploy Complete: PCC is available at https://"+mgmtServer+":5443" print "Starting Cluster..." ssh.exec_command2(mgmtServer,"gpadmin","changeme","icm_client start -l phdcluster") print "Cluster Started" #print apiClient.getClusterStatus(inputData)