def prepareKVMHost(self,mgmtHostInfo,hypervisor): self.logger.info("preparing kvm host %s"%hypervisor['hostname']) ssh = SSHClient() ssh.set_missing_host_key_policy(AutoAddPolicy()) ssh.connect(hypervisor['ip'], 22,username="******",password=hypervisor['password']) scp = SCPClient(ssh.get_transport()) scp.put("/etc/puppet/modules/kvm-agent/files/authorized_keys","/root/.ssh/") mgmtSsh=remoteSSHClient(mgmtHostInfo['ip'], 22, "root", mgmtHostInfo['password']) self.logger.info("copying the cloudstack rpms to kvm host") bash("scp -r -q -o StrictHostKeyChecking=no /etc/puppet/modules/kvm-agent root@%s:/root"%hypervisor['ip']) kvmSsh=remoteSSHClient(hypervisor['ip'], 22, "root", hypervisor['password']) kvmSsh.execute("mkdir /tmp/cloudstack") mgmtSsh.execute("scp -r -q -o StrictHostKeyChecking=no -i /root/.ssh/id_rsa.mgmt /root/cloudstack-repo/* root@%s:/tmp/cloudstack"%hypervisor['ip']) kvmSsh.execute("puppet apply --debug --modulepath=/root -e 'include kvm-agent' >> puppetRun.log 2>&1") kvmSsh.close() mgmtSsh.close() self.logger.info("kicked off puppet install of kvm")
def __init__(self): self.logger=logging.getLogger("git-server") self.gitServer='172.16.88.6' self.gitServerSshPort=22 self.gitServerUserName='******' self.gitServerPassword='******' self.gitRepoPath='/root/cloudstack' self.pullFrom='github' self.syncTo='origin' self.gitHost=remoteSSHClient(self.gitServer,self.gitServerSshPort,self.gitServerUserName,self.gitServerPassword) self.gitDb=gitDb()
def create(self,repo_url, branch,githubPrno,commit,configName,profile): # Re-check because ssh connect works soon as post-installation occurs. But # server is rebooted after post-installation. Assuming the server is up is # wrong in these cases. To avoid this we will check again before continuing # to add the hosts to cloudstack try: hosts=[] self.mgmtHostInfo={} self.mgmtHostInfo.update({'startTime':time.strftime("%c"),'repo_url':repo_url,'branch':branch}) self.prepareCloudstackRepo() prepare_mgmt = True self.logger.info("Configuring management server") self.configureManagementServer(profile, branch, configName) hosts.append(self.mgmtHostInfo['hostname']) self.waitForHostReady(hosts) mgmtSsh=remoteSSHClient(self.mgmtHostInfo['ip'], 22, "root", self.mgmtHostInfo['password']) #mgmtSsh.execute("echo 'export http_proxy=http://172.16.88.5:3128' >> /root/.bashrc; source /root/.bashrc") mgmtSsh.execute("puppet agent -t > /var/log/puppet.log ") mgmtSsh.execute("ssh-copy-id root@%s"%self.mgmtHostInfo['ip']) self.waitTillPuppetFinishes() #git proxy config #mgmtSsh.execute("git config --global http.proxy http://172.16.88.5:3128; git config --global https.proxy http://172.16.88.5:3128") delay(30) if prepare_mgmt: compute_hosts=self.hostImager.imageHosts(self.mgmtHostInfo) self.prepareManagementServer(repo_url,branch,githubPrno,commit) self.mgmtHostInfo.update({'repo_url':repo_url,'branch':branch}) self.hostImager.checkIfHostsUp(compute_hosts) return self.mgmtHostInfo except Exception as e: self.logger.exception(e) #cleanup resources and exit. self.resourceMgr.freeConfig(self.mgmtHostInfo['configfile']) xenssh=remoteSSHClient(self.infraxen_ip,22, "root", self.infraxen_passwd) self.logger.debug("bash vm-uninstall.sh -n %s"%(self.mgmtHostInfo['hostname'])) #xenssh.execute("xe vm-uninstall force=true vm=%s"%self.mgmtHostInfo['hostname']) #bash("cobbler system remove --name=%s"%(self.mgmtHostInfo['hostname'])) #bash("cobbler sync") sys.exit(1)
def prepareManagementServer(self,url,branch,githubPrno,commit): """ Prepare the mgmt server for a marvin test run """ buildLog="" self.logger.info("preparing management server") if self._isPortListening(host=self.mgmtHostInfo['ip'], port=22, timeout=300) \ and self._isPortListening(host=self.mgmtHostInfo['ip'], port=3306, timeout=30): mgmt_ip = self.mgmtHostInfo['ip'] mgmt_pass = self.mgmtHostInfo['password'] self.mgmtHostInfo['branch']=branch with contextlib.closing(remoteSSHClient(mgmt_ip, 22, "root", mgmt_pass)) as ssh: # Open up 8096 for Marvin initial signup and register package=self.hostImager.pacakageIfKVM() ssh.execute("python /root/buildAndDeploySimulator.py --package %s --noSimulator %s -u %s %s --fqdn %s %s -s True -o True > /var/log/cloudstack.log"%(package, self.mgmtHostInfo['noSimulator'], url, ("--githubPrno %s"%githubPrno if (githubPrno is not None) else "-b %s"%branch), self.mgmtHostInfo['hostname']+"."+self.DOMAIN, ("" if commit==None else "-c %s"%commit))) buildlog="".join(ssh.stdout) if(len(ssh.errorlog) >0): raise Exception("failed to build cloudstack errorLog: %s"%ssh.errorlog) ssh.execute("sh /root/secseeder.sh > secseeder.log 2>&1") self.hostImager.seedBuiltinTemplates() ssh.execute("cd /automation/cloudstack/; git log | grep -m 1 'commit' | cut -f 2 -d ' '") self.logger.info('build from commit_id %s'%ssh.stdout[0]) self.mgmtHostInfo.update({'commit_id':ssh.stdout[0]}) retry=3 while retry !=0: if not (self._isPortListening(host=self.mgmtHostInfo['ip'], port=8080, timeout=300)): ssh.execute("python /root/restartMgmtServer.py -p /automation/cloudstack/ --noSimulator %s >> /var/log/cloudstack.log"%self.mgmtHostInfo['noSimulator'] ) self.logger.debug("exccede timeout restarting the management server and trying again") retry=retry-1 else: break else: raise Exception("Reqd services (ssh, mysql) on management server are not up. Aborting") if self._isPortListening(host=self.mgmtHostInfo['ip'], port=8096, timeout=10): self.logger.info("All reqd services are up on the management server %s"%self.mgmtHostInfo['hostname']) return else: self.logger.error("Build log.......................... \n %s"%buildlog) raise Exception("Management server %s is not up. Aborting"%self.mgmtHostInfo['hostname']) #seed systemvm templates '''result=ssh.execute("sh /root/secseeder.sh > secseeder.log 2>&1") if (result !=0): self.logger.error(''.join(error)); raise Exception("failed to seed systemvm templates")''' #execute post install tasks. self.hostImager.execPostInstallHooks(self.mgmtHostInfo)
def refreshHost(self, mgmtHostInfo,branch,githubPrno,commit,reImageHosts=True): """ Prepare the mgmt server for a marvin test run """ buildlog="" self.logger.info("refreshing managemet server") self.mgmtHostInfo=mgmtHostInfo self.mgmtHostInfo.update({'startTime':time.strftime("%c")}) self.mgmtHostInfo['branch']=branch if reImageHosts: compute_hosts=self.hostImager.imageHosts(self.mgmtHostInfo) if self._isPortListening(host=mgmtHostInfo['ip'], port=22, timeout=300) \ and self._isPortListening(host=mgmtHostInfo['ip'], port=3306, timeout=30): mgmt_ip = self.mgmtHostInfo['ip'] mgmt_pass = self.mgmtHostInfo['password'] with contextlib.closing(remoteSSHClient(mgmt_ip, 22, "root", mgmt_pass)) as ssh: self.logger.info("seeding systemvm templates") #ssh.execute("sh /root/secseeder.sh > secseeder.log 2>&1") #self.hostImager.seedBuiltinTemplates() package=self.hostImager.pacakageIfKVM() config=configGenerator.getSetupConfig(self.mgmtHostInfo['configfile'].replace(' ','')) noSimulator=self.checkIfSimulatorBuild(config) self.mgmtHostInfo.update({'noSimulator':noSimulator}) out=ssh.execute("python /root/refreshHost.py -p %s --noSimulator %s %s %s > /var/log/cloudstack.log"%(package, self.mgmtHostInfo['noSimulator'],("--githubPrno %s"%githubPrno if (githubPrno is not None) else "-b %s"%branch), ("" if commit==None else "-c %s"%commit))) buildlog="".join(ssh.stdout) if(out !=0): self.logger.info("build log ...............\n%s"%buildlog) errorlog="failed to build cloudstack errorLog: %s"%ssh.errorlog ssh.execute("cat /var/log/cloudstck.log") self.logger.error("".join(ssh.stdout)) raise Exception("failed to build cloudstack errorLog: %s"%errorlog) ssh.execute("cd /automation/cloudstack/; git log | grep -m 1 'commit' | tr -d 'commit' | tr -d ' '") self.logger.info('building from commit_id %s'%ssh.stdout[0]) self.mgmtHostInfo.update({'commit_id':ssh.stdout[0]}) ssh.execute("sh /root/secseeder.sh > secseeder.log 2>&1") self.hostImager.seedBuiltinTemplates() retry=3 while retry !=0: if not (self._isPortListening(host=self.mgmtHostInfo['ip'], port=8080, timeout=300)): ssh.execute("python /root/restartMgmtServer.py -p /automation/cloudstack/ --noSimulator %s >> /var/log/cloudstack.log"%self.mgmtHostInfo['noSimulator']) self.logger.info("exceded timeout restarting the management server and trying again") retry=retry-1 else: break self.hostImager.checkIfHostsUp(compute_hosts) self.hostImager.execPostInstallHooks(self.mgmtHostInfo) return self.mgmtHostInfo
def destroy(self,hostname): #print "****in destroy *******" if self.__con or self.connect(): cur =self.__con.cursor(MySQLdb.cursors.DictCursor) #print hostname #print "SELECT * FROM `resource_db`.`host` WHERE hostname='%s' and state='free'"%hostname cur.execute("SELECT * FROM `resource_db`.`host` WHERE hostname='%s' and state='free'"%hostname) host= cur.fetchone() #print host hostInfo=self.getDict(host,cur) #print "SELECT * FROM `resource_db`.`static_config` WHERE id='%s'"%hostInfo['config_id'] cur.execute("SELECT * FROM `resource_db`.`static_config` WHERE id='%s'"%hostInfo['config_id']) configInfo=self.getDict(cur.fetchone(),cur) self.logger.info("destroying host %s"%hostInfo['hostname']) bash("cobbler system remove --name=%s"%hostInfo['hostname']) xenssh = \ remoteSSHClient(hostInfo['infra_server'],22, "root", hostInfo['infra_server_passwd']) self.logger.debug("bash vm-uninstall.sh -n %s"%(hostInfo['hostname'])) xenssh.execute("xe vm-uninstall force=true vm=%s"%hostInfo['hostname']) #print hostInfo self.resourceMgr.freeConfig(configInfo['configfile']) self.remove(hostInfo['id'])
def seedBuiltinTemplates(self): ssh=remoteSSHClient("nfs-server", 22, "root", "password") for zone in self.json_config.zones: for pod in zone.pods: for cluster in pod.clusters: if cluster.hypervisor.lower() == "xenserver": hypervisor="xen" else: hypervisor=cluster.hypervisor.lower() if hypervisor=="simulator": continue for sstor in zone.secondaryStorages: path = urlparse.urlsplit(sstor.url).path if(hypervisor=='xen'): if(ssh.execute("mkdir -p %s/template/tmpl/1/5/"%path)!=0): self.logger.error("failed to create directory on nfs-server %s"%"".join(ssh.errorlog)) if(ssh.execute("cp -f /export/BUILTIN/XEN/* %s/template/tmpl/1/5/."%path)!=0): self.logger.error("failed to copy builtin template %s"%"".join(ssh.errorlog)) if(hypervisor=='kvm'): if(ssh.execute("mkdir -p %s/template/tmpl/1/4/"%path)!=0): self.logger.error("failed to create directory on nfs-server %s"%"".join(ssh.errorlog)) if(ssh.execute("cp -f /export/BUILTIN/KVM/* %s/template/tmpl/1/4/."%path)!=0): self.logger.error("failed to copy builtin template %s"%"".join(ssh.errorlog)) ssh.close()
def configureManagementServer(self,profile, branch, configName=None): """ We currently configure all mgmt servers on a single xen HV. In the future replace this by launching instances via the API on a IaaS cloud using desired template """ mgmt_vm_mac = bash(self.workDir+"/macgen.py").getStdout() mgmt_host = "centos-"+mgmt_vm_mac.replace(':','-') mgmt_ip=None configPath=None configObj=None if configName==None: configObj=self.resourceMgr.getConfig(profile) else: configObj=self.resourceMgr.getConfig(profile,configName) if configObj==None: self.logger.info("either the config you asked for is in use or it is not registered in the db") sys.exit(1) configPath=configObj['configfile'] #print "config_path",configPath config=configGenerator.getSetupConfig(configPath.replace(' ','')) #print config mgmt_ip=config.mgtSvr[0].mgtSvrIp #validate the ip address try: ip=IPAddress(mgmt_ip) except Exception as e: self.logger.exception(e) #freeconfig and exit self.resourceMgr.freeConfig(configPath) exit(1) self.infraxen_ip=config.infraxen self.infraxen_passwd=config.infraxen_passwd noSimulator=self.checkIfSimulatorBuild(config) if (self.infraxen_ip==None or self.infraxen_passwd==None): self.logger.info("invalid values for infraxen_ip or infraxen_passwd") self.resourceMgr.freeConfig(configPath) exit(1) self.logger.info("management server ip=%s"%mgmt_ip) os="centos" self.mgmtHostInfo.update({'hostname':mgmt_host}) #print self.mgmtHostInfo self.mgmtHostInfo.update({'ip':mgmt_ip}) self.mgmtHostInfo.update({'mac':mgmt_vm_mac}) self.mgmtHostInfo.update({'password':"******"}) self.mgmtHostInfo.update({'domain':self.DOMAIN}) self.mgmtHostInfo.update({'os':os}) #print self.mgmtHostInfo self.mgmtHostInfo.update({'configfile':configPath}) self.mgmtHostInfo.update({'config_id':configObj['id']}) self.mgmtHostInfo.update({'infra_server':self.infraxen_ip}) self.mgmtHostInfo.update({'infra_server_passwd':self.infraxen_passwd}) self.mgmtHostInfo.update({'profile':profile}) self.mgmtHostInfo.update({'branch':branch}) self.mgmtHostInfo.update({'noSimulator':noSimulator}) self.mgmtHostInfo.update({'simulator':'dummy'}) templateFiles="/etc/puppet/agent.conf=/etc/puppet/puppet.conf" cobbler_profile=self.getProfile("centos") #Remove and re-add cobbler system bash("cobbler system remove --name=%s"%mgmt_host) bash("cobbler system add --name=%s --hostname=%s --dns-name=%s --mac-address=%s \ --netboot-enabled=yes --enable-gpxe=no --ip-address=%s \ --profile=%s --template-files=%s "%(mgmt_host, mgmt_host, (mgmt_host+"."+self.DOMAIN), mgmt_vm_mac, mgmt_ip, cobbler_profile, templateFiles)); bash("cobbler sync") #clean puppet reports if any bash("rm -f %s"%("/var/lib/puppet/reports/"+self.mgmtHostInfo['hostname']+"."+self.DOMAIN)) #add this node to nodes.pp of puppet bash(("echo \"node %s inherits basenode { \ninclude nfsclient\ninclude java17\ninclude mysql\ninclude maven35\ninclude cloudstack-simulator\ninclude m2repo}\" >> /etc/puppet/manifests/nodes.pp"%(mgmt_host))); #Revoke all certs from puppetmaster bash("puppet cert clean %s.%s"%(mgmt_host, self.DOMAIN)) #Start VM on xenserver xenssh = \ remoteSSHClient(self.infraxen_ip,22, "root", self.infraxen_passwd) self.logger.debug("bash vm-uninstall.sh -n %s"%(mgmt_host)) xenssh.execute("xe vm-uninstall force=true vm=%s"%mgmt_host) self.logger.debug("bash vm-start.sh -n %s -m %s"%(mgmt_host, mgmt_vm_mac)) out = xenssh.execute("bash vm-start.sh -n %s -m %s"%(mgmt_host, mgmt_vm_mac)) self.logger.info("started mgmt server: %s. Waiting for services .."%mgmt_host); return self.mgmtHostInfo
def addProxyInfoToHosts(self): for hypervisor in self.hypervisorInfo: self.logger.info("Adding proxy info to host %s"%hypervisor['ip']) hostssh=remoteSSHClient(hypervisor['ip'], 22, "root", hypervisor['password']) hostssh.execute('echo "export http_proxy=http://172.16.88.5:3128" >> /root/.bashrc') hostssh.close()
def createXenPools(self,xenHostClusterList): for hostList in xenHostClusterList: for hostInfo in hostList[1:]: hostssh=remoteSSHClient(hostInfo['ip'], 22, "root", hostInfo['password']) hostssh.execute("xe pool-join master-address=%s master-username='******' master-password='******'"%(hostList[0]['hostname'],'root',hostList[0]['password'])) hostssh.close()