コード例 #1
0
 def deleteTestLogs(prList):
     logger = logging.getLogger("prLogClenUpMethod")
     for pr in prList:
         try:
             bash("rm -rf /mnt/test_result_archive/%s" % (pr["build_number"]))
         except KeyError as e:
             logger.error("keyError occured, no key named %s for prNo %s" % (e.args[0], pr["pr_no"]))
コード例 #2
0
ファイル: driverVm.py プロジェクト: bvbharatk/CI-orchestrator
 def installPuppet(self):
     self.logger.info("Begining puppet install")
     self.logger.info("Adding puppet labs collection repository")
     self.checkForSuccess(bash("rpm -ivh https://yum.puppetlabs.com/puppetlabs-release-pc1-el-7.noarch.rpm")) 
     self.checkForSuccess(bash("yum -y install puppetserver"))
     self.checkForSuccess(bash("systemctl enable puppetserver && systemctl start puppetserver"))
     self.logger.info("puppet install complete")
コード例 #3
0
 def getTotalTests(self):
    if (self.hypervisor.lower() != "simulator"):
       Tag="tags=%s"%self.zoneType
    else:
       Tag="tags=selfservice,'!BugId'"
    bash("%s/testenv/bin/nosetests-2.7 --with-xunit --xunit-file=totalTests.xml -w %s/test/integration/smoke -a %s --collect-only "%(self.basedir, self.basedir,Tag))
    ts, tr=parse(self.basedir+"/totalTests.xml")
コード例 #4
0
 def isJobComplete(self,queue):
       args=queue.get()
       envPath=args[0]
       jobIdentifier=args[1]
       while not os.path.isdir(envPath+"/"+"%sComplete"%jobIdentifier):
                 self.logger.debug("testing on zone %s is in progress"%jobIdentifier)
                 time.sleep(20)
       queue.task_done()
       bash("rm -f %s/%sComplete"%(envPath,jobIdentifier))
コード例 #5
0
    def reportUsingJenkinsEmailPlugin(self, marvinConfigJson, env, execOnOneZone = True):
        try:
            jobIdentifierList = []
            bugLoggerData = []
            j = Jenkins('http://jenkins-ccp.citrix.com', 'bharatk', 'BharatK')
            for zone in cscfg.zones:
                self.logger.info(zone.name)
                for pod in zone.pods:
                    for cluster in pod.clusters:
                        self.logger.info('creating a jeknins job to generate results and email notfication for hypervisor %s and zone %s' % (cluster.hypervisor, zone.name))
                        modifiedjob = jobModifier.modifyReportGenerator(env['build_number'] + '_' + zone.name + '_' + cluster.hypervisor, mailto)
                        jobname = modifiedjob
                        file = open('/root/cloud-autodeploy2/newcode/' + modifiedjob, 'r')
                        config = file.read()
                        file.close()
                        j.create_job(modifiedjob, config)
                        j.build_job(modifiedjob, {'buildNumber': env['build_number'],
                         'BuildNo': env['build_number'],
                         'MGMT_SVR': env['hostip'],
                         'BASEDIR': env['virtenvPath'],
                         'version': env['version'],
                         'BranchInfo': env['version'],
                         'GitRepoUrl': env['repo_url'],
                         'GitCommitId': env['commit_id'],
                         'CIRunStartDateTime': env['startTime'],
                         'CIRunEndDateTime': time.strftime('%c'),
                         'WikiLinks': 'https://cwiki.apache.org/confluence/display/CLOUDSTACK/Infrastructure%2CCI%2CSimulator%2CAutomation+Changes',
                         'hypervisor': cluster.hypervisor.lower(),
                         'HyperVisorInfo': cluster.hypervisor.lower(),
                         'zoneName': zone.name,
                         'BuildReport': 'https://www.dropbox.com/sh/yj3wnzbceo9uef2/AAB6u-Iap-xztdm6jHX9SjPja?dl=0',
                         'token': 'bharat'})
                        jobIdentifierList.append('report_' + zone.name)
                        jobDetails = {'job_name': modifiedjob,
                         'related_data_path': env['virtenvPath']}
                        self.resourceMgr.addJobDetails(jobDetails)
                        bugLoggerData.append({'hypervisor': cluster.hypervisor.lower(),
                         'branch': env['version'],
                         'logname': cluster.hypervisor.lower() + '__Log_' + env['build_number'],
                         'type': 'BVT'})
                        self.logger.info('bug logger data in zone looop %s' % bugLoggerData)
                        self.waitForJobComplete(env['virtenvPath'], jobIdentifierList)
                        self.archiveTestRunLogs(env, cluster.hypervisor.lower(), jobname)
                        break

                    break

                if execOnOneZone:
                    break
                self.logger.info('job identifier list %s' % jobIdentifierList)
                self.logger.info('cleaning up the workspace')
                bash('rm -f /root/cloud-autodeploy2/newcode/%s' % modifiedjob)
                self.logger.info('running bug logger')

        except Exception as e:
            self.logger.exception(e)
コード例 #6
0
 def __init__(self):
     self.resourceMgr=resourceManager()
     self.profileMap=({'xenserver':'xenserver6.2','vmware':'VMware5.1-x86_64','kvm':'kvm-Centos64'})
     self.allocatedResource={}
     self.hypervisorInfo=[]
     self.hostClusterMap={'xenserver':[],'kvm':[],'vmware':[]}
     self.logger=logging.getLogger("hostImager")
     self.json_config={}
     self.mountPt="/tmp/" + ''.join([random.choice(string.ascii_uppercase) for x in xrange(0, 10)])
     bash("mkdir -p %s"%self.mountPt)
コード例 #7
0
 def mountAndClean(self,host, path):
   """
   Will mount and clear the files on NFS host in the path given. Obviously the
   NFS server should be mountable where this script runs
   """
   self.mkdirs(self.mountPt)
   self.logger.info("cleaning up %s:%s" % (host, path))
   mnt = bash("mount -t nfs %s:%s %s" % (host, path, self.mountPt))
   erase = bash("rm -rf %s/*" % self.mountPt)
   umnt = bash("umount %s" % self.mountPt)
コード例 #8
0
 def mountAndClean(self,host, path):
   """
   Will mount and clear the files on NFS host in the path given. Obviously the
   NFS server should be mountable where this script runs
   """
   mnt_path = "/tmp/" + ''.join([random.choice(string.ascii_uppercase) for x in xrange(0, 10)])
   self.mkdirs(mnt_path)
   self.logger.info("cleaning up %s:%s" % (host, path))
   mnt = bash("mount -t nfs %s:%s %s" % (host, path, mnt_path))
   erase = bash("rm -rf %s/*" % mnt_path)
   umnt = bash("umount %s" % mnt_path)
コード例 #9
0
def mountAndClean(host, path):
    """
    Will mount and clear the files on NFS host in the path given. Obviously the
    NFS server should be mountable where this script runs
    """
    mnt_path = "/tmp/" + ''.join([random.choice(string.ascii_uppercase) for x in range(0, 10)])
    mkdirs(mnt_path)
    logging.info("cleaning up %s:%s" % (host, path))
    mnt = bash("mount -t nfs %s:%s %s" % (host, path, mnt_path))
    erase = bash("rm -rf %s/*" % mnt_path)
    umnt = bash("umount %s" % mnt_path)
コード例 #10
0
def fetch(filename, url, path):
    try:
        zipstream = urllib.request.urlopen(url)
        tarball = open('/tmp/%s' % filename, 'wb')
        tarball.write(zipstream.read())
        tarball.close()
    except urllib.error.URLError as u:
        raise u
    except IOError:
        raise
    bash("mv /tmp/%s %s" % (filename, path))
コード例 #11
0
 def createStorageDirs(self,sstor):
     self.logger.info("mount -t nfs %s %s"%("/".join(sstor.url.replace("nfs://","").split("/")[:2]),self.mountPt))
     mount=bash("mount -t nfs %s %s"%("/".join(sstor.url.replace("nfs://","").split("/")[:2]),self.mountPt))
     if(not mount.isSuccess()):
        self.logger.error("failed to mount %s"%mount.getStderr())
     path = urlparse.urlsplit(sstor.url).path
     self.logger.debug("path %s"%path)
     relativePath=path.replace(path.split("/")[1],self.mountPt)
     self.logger.info("mkdir -p %s"%relativePath)
     bash("mkdir -p %s"%relativePath)
     self.cleanMount()
コード例 #12
0
 def __init__(self):
     self.logger=logging.getLogger('testhistoryManager')
     self.libDir='/root/cloud-autodeploy2/newcode'
     self.knownIssuesFile='%s/known_test_issues'%self.libDir
     self.failedIssuesFile='%s/failed_test_issues'%self.libDir
     self.klockFilePath="%s/.klockFile"%self.libDir
     self.flockFilePath="%s/.flockFile"%self.libDir
     self.knownIssueThreshold=3
     if (not os.path.isfile(self.klockFilePath)):
          bash("touch %s"%self.klockFilePath)
     if (not os.path.isfile(self.flockFilePath)):
          bash("touch %s"%self.flockFilePath)
     self.klock=filelock.FileLock(self.klockFilePath)
     self.flock=filelock.FileLock(self.flockFilePath)
コード例 #13
0
 def configureCobbler(self):
     self.logger.info("configuring cobbler")
     self.checkForSuccess(bash("cobbler get-loaders"))
     self.logger.info("importing distros and creating profiles")
     filename=""
     if len(self.ci_config['centosImage']['download_url']) > 0:
           self.downLoadTonfs(self.ci_config['centosImage']['download_url'])
           mounturl="%s:/var/export/iso/"%(self.config['nodes']['nfs']['ip'])       
           filename=urlparse(self.ci_config['centosImage']['download_url']).path.split(os.sep)[-1:][0]
     else:
         mounturl=self.ci_config['centosImage']['mount_url']
     self.importDistroFromMountPt(mounturl, filename)
     self.cobblerAddRepos()
     self.cobblerAddReposToProfiles(self.ci_config['repos'].keys())
     self.checkForSuccess(bash("cobbler sync"))
コード例 #14
0
 def archiveTestRunLogs(self, env, hypervisor, reportGeneratorBuildName):
     self.logger.info('sh  %s/archive_test_results.sh %s %s %s %s %s %s %s' % (self.libPath, env['virtenvPath'],
      hypervisor,
      '%s/%s' % (self.jenkinsWorkspace, reportGeneratorBuildName),
      env['build_number'],
      '%s/%s' % (env['virtenvPath'], hypervisor),
      env['branch'],
      env['hostip']))
     bash('sh %s/archive_test_results.sh %s %s %s %s %s %s %s >> /var/log/test_archive.log 2>&1' % (self.libPath, env['virtenvPath'],
      hypervisor,
      '%s/%s' % (self.jenkinsWorkspace, reportGeneratorBuildName),
      env['build_number'],
      '%s/%s' % (env['virtenvPath'], hypervisor),
      env['branch'],
      env['hostip']))
コード例 #15
0
 def persistFailedIssues(self,issuesDict):
     data=[key+","+str(issuesDict[key]) for key in issuesDict.keys() if issuesDict[key] > 0]
     retry=3
     tempFile="."+''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(10))
     #print tempFile
     while retry >0:
        try:
           with self.flock.acquire(timeout = 10):
                file=open("%s/%s"%(self.libDir,tempFile),'w')
                file.write("\n".join(data))
                file.close()
                break
        except Exception as e:
               retry-=1
               self.logger.info('failed to update failed issues files due to %s'%e)
     bash("mv %s/%s  %s"%(self.libDir, tempFile, self.failedIssuesFile))
コード例 #16
0
    def configureManagementServer(self,profile, branch, configName=None):
     """
     We currently configure all mgmt servers on a single xen HV. In the future
     replace this by launching instances via the API on a IaaS cloud using
     desired template
     """
     mgmt_vm_mac = bash(self.workDir+"/macgen.py").getStdout()
     mgmt_host = "centos-"+mgmt_vm_mac.replace(':','-')
     mgmt_ip=None
     configPath=None
     configObj=None
     if configName==None:
           configObj=self.resourceMgr.getConfig(profile)
     else:
         configObj=self.resourceMgr.getConfig(profile,configName)
     if configObj==None:
        self.logger.info("either the config you asked for is in use or it is not registered in the db")
        sys.exit(1)

     configPath=configObj['configfile']
     #print "config_path",configPath
     config=configGenerator.getSetupConfig(configPath.replace(' ',''))
     #print config
     mgmt_ip=config.mgtSvr[0].mgtSvrIp
     #validate the ip address
     try:
        ip=IPAddress(mgmt_ip)
     except Exception,e:
            self.logger.error(e)
            #freeconfig and exit
            self.resourceMgr.freeConfig(configPath)
            exit(1)
コード例 #17
0
 def prepareKVMHost(self,mgmtHostInfo,hypervisor):
     self.logger.info("preparing kvm host %s"%hypervisor['hostname'])
     ssh = SSHClient()
     ssh.set_missing_host_key_policy(AutoAddPolicy())
     ssh.connect(hypervisor['ip'], 22,username="******",password=hypervisor['password'])
     scp = SCPClient(ssh.get_transport())
     scp.put("/etc/puppet/modules/kvm-agent/files/authorized_keys","/root/.ssh/")
     mgmtSsh=remoteSSHClient(mgmtHostInfo['ip'], 22, "root", mgmtHostInfo['password'])
     self.logger.info("copying the cloudstack rpms to kvm host")
     bash("scp -r -q -o StrictHostKeyChecking=no  /etc/puppet/modules/kvm-agent root@%s:/root"%hypervisor['ip'])
     kvmSsh=remoteSSHClient(hypervisor['ip'], 22, "root", hypervisor['password'])
     kvmSsh.execute("mkdir /tmp/cloudstack")
     mgmtSsh.execute("scp -r -q -o StrictHostKeyChecking=no -i /root/.ssh/id_rsa.mgmt /root/cloudstack-repo/*  root@%s:/tmp/cloudstack"%hypervisor['ip'])
     kvmSsh.execute("puppet apply --debug --modulepath=/root -e 'include kvm-agent' >> puppetRun.log  2>&1")
     kvmSsh.close()
     mgmtSsh.close()
     self.logger.info("kicked off puppet install of kvm")
コード例 #18
0
 def seedBuiltinTemplates(self):
     for zone in self.json_config.zones:
          for pod in zone.pods:
             for cluster in pod.clusters:
               if cluster.hypervisor.lower() == "xenserver":
                   hypervisor="xen"
               else:
                  hypervisor=cluster.hypervisor.lower()
               if hypervisor=="simulator":
                  continue
               for sstor in zone.secondaryStorages:
                      path = urlparse.urlsplit(sstor.url).path
                      relativePath=path.replace(path.split("/")[1],self.mountPt)
                      #mount secondary storage on ms.
                      mount=bash("mount -t nfs %s %s"%("/".join(sstor.url.replace("nfs://","").split("/")[:2]),self.mountPt))
                      if(not mount.isSuccess()):
                        self.logger.error("failed to mount %s"%sstor)
                      if(hypervisor=='xen'):
                          if(not bash("mkdir -p  %s/template/tmpl/1/5/"%relativePath).isSuccess()):
                              self.logger.error("failed to create directory on nfs-server %s"%"".join(ssh.errorlog))
                          if(not bash("cp -f %s/automation/BUILTIN/XEN/* %s/template/tmpl/1/5/."%(self.mountPt,relativePath)).isSuccess()):
                              self.logger.error("failed to copy builtin template %s"%"".join(ssh.errorlog))
                      if(hypervisor=='kvm'):
                          if(not bash("mkdir -p %s/template/tmpl/1/4/"%relativePath).isSuccess()):
                              self.logger.error("failed to create directory on nfs-server %s"%"".join(ssh.errorlog))
                          if(bash("cp -f %s/automation/BUILTIN/KVM/* %s/template/tmpl/1/4/."%(self.mountPt,relativePath)).isSuccess()):
                              self.logger.error("failed to copy builtin template %s"%"".join(ssh.errorlog))
                      bash("umount %s"%self.mountPt)
コード例 #19
0
 def install(self):
     self.checkForSuccess(bash("wget -O /etc/yum.repos.d/jenkins.repo http://pkg.jenkins.io/redhat/jenkins.repo")) 
     self.checkForSuccess(bash("rpm --import http://pkg.jenkins.io/redhat/jenkins.io.key"))
     self.checkForSuccess(bash("yum -y install jenkins"))
     self.checkForSuccess(bash("yum -y install java"))
     self.checkForSuccess(bash("chkconfig jenkins on"))
     self.checkForSuccess(bash("service jenkins start"))
     self.checkForSuccess(bash("pip install Python-Jenkins"))
     self.logger.info("jenkins install complete")
コード例 #20
0
 def importDistroFromMountPt(self, mounturl, filename):
     mountPt1=self.getMountPt() 
     mountPt2=self.getMountPt()
     try:
        self.checkForSuccess(bash("mount -t nfs %s %s"%(mounturl, mountPt1)))
        self.checkForSuccess(bash("mount -o loop %s %s"%(os.path.join(mountPt1,filename), mountPt2)))
        self.checkForSuccess(bash("cobbler import  --name=CentosDef --path=%s"%mountPt2))
     finally:
         bash("umount %s"%mountPt1)
         bash("umount %s"%mountPt2) 
コード例 #21
0
    def getReportAnalysers(self,cloudstack_config, env, execOnOneZone=True):
        reportAnalyserMap={}
        for zone in cloudstack_config.zones:
            for pod in zone.pods:
                for cluster in pod.clusters:
                    bash('mkdir -p %s' % ('%s/report_generator_%s_%s_%s/%s/%s' % (self.jenkinsWorkspace,
                     env['build_number'],
                     cluster.hypervisor.lower(),
                     zone.name,
                     cluster.hypervisor.lower(),
                     'reports')))
                    self.archiveTestRunLogs(env, cluster.hypervisor.lower(), 'report_generator_%s_%s_%s' % (env['build_number'], cluster.hypervisor.lower(), zone.name))
                    self.logger.info('Generating plain text report')
                    report = reportAnalyser(env['virtenvPath'], os.path.join(self.jenkinsWorkspace, 'report_generator_%s_%s_%s' % (env['build_number'], cluster.hypervisor.lower(), zone.name), cluster.hypervisor.lower(), 'reports'), env['config_file'], cluster.hypervisor.lower(), zone.networktype, env['build_number'])
                    reportAnalyserMap.update({zone.name:report})
                    break

                break

            if execOnOneZone:
                break
        return reportAnalyserMap
コード例 #22
0
 def create(self,repo_url, branch,commit,configName,profile):
    # Re-check because ssh connect works soon as post-installation occurs. But 
    # server is rebooted after post-installation. Assuming the server is up is
    # wrong in these cases. To avoid this we will check again before continuing
    # to add the hosts to cloudstack
    try:
       hosts=[]
       self.mgmtHostInfo={}
       self.mgmtHostInfo.update({'startTime':time.strftime("%c"),'repo_url':repo_url,'branch':branch})
       self.prepareCloudstackRepo()
       prepare_mgmt = True
       self.logger.info("Configuring management server")
       self.configureManagementServer(profile, branch, configName)
       hosts.append(self.mgmtHostInfo['hostname'])
       self.waitForHostReady(hosts)
       mgmtSsh=remoteSSHClient(self.mgmtHostInfo['ip'], 22, "root", self.mgmtHostInfo['password'])
       mgmtSsh.execute("echo 'export http_proxy=http://172.16.88.5:3128' >> /root/.bashrc; source /root/.bashrc")
       mgmtSsh.execute("puppet agent -t ")
       mgmtSsh.execute("ssh-copy-id root@%s"%self.mgmtHostInfo['ip'])
       self.waitTillPuppetFinishes()
       #git proxy config
       mgmtSsh.execute("git config --global http.proxy http://172.16.88.5:3128; git config --global https.proxy http://172.16.88.5:3128")
       delay(30)
       if prepare_mgmt:
          compute_hosts=self.hostImager.imageHosts(self.mgmtHostInfo)
          self.prepareManagementServer(repo_url,branch,commit)
          self.mgmtHostInfo.update({'repo_url':repo_url,'branch':branch})
          self.hostImager.checkIfHostsUp(compute_hosts)
          return self.mgmtHostInfo 
    except Exception, e:
         self.logger.error(e)       
         #cleanup resources and exit.
         self.resourceMgr.freeConfig(self.mgmtHostInfo['configfile'])
         xenssh=remoteSSHClient(self.infraxen_ip,22, "root", self.infraxen_passwd)
         self.logger.debug("bash vm-uninstall.sh -n %s"%(self.mgmtHostInfo['hostname']))
         xenssh.execute("xe vm-uninstall force=true vm=%s"%self.mgmtHostInfo['hostname']) 
         bash("cobbler system remove --name=%s"%(self.mgmtHostInfo['hostname']))
         bash("cobbler sync")
         sys.exit(1)
コード例 #23
0
 def configure(self):
     #self.logger.info("setting jenkins authentication method to use unix userdata")
     #self.checkForSuccess(bash("cp %s/jenkis_auth_file /var/lib/jenkins"%currentDir))
     #self.logger.info("setting jenkins password")
     #self.logger.info("echo %s | sudo passwd jenkins --stdin"%self.jenkinsPasswd)
     #self.checkForSuccess(bash("service jenkins restart"))
     time.sleep(10)
     self.logger.info("checking if auth config is successful")
     j=Jenkins(self.jenkinsUrl, "admin", self.jenkinsPasswd)
     try:
        j.get_plugins()
     except Exception as e:
        self.logger.info("failed to retrive plugin info, may be auth problem")
        self.logger.exception(e) 
        raise e
     self.logger.info("auth config successful")
     self.logger.info("installing requried plugins")
     self.logger.info("reading from jenkins plugins file %s/jenkins_plugins.txt"%currentDir)
     f=open('%s/jenkins_plugins.txt'%currentDir, 'r')
     pluginsToInstall=f.read()  
     pluginsToInstall=pluginsToInstall.split('\n')
     self.installPlugins(j,pluginsToInstall)
     self.logger.info("Plugin installation complete")
     self.logger.info("restarting jenkins")
     self.restartJenkins()
     self.logger.info("Creating CI jobs on jenkins")
     for file in os.listdir(os.path.join(currentDir,'jenkins_job_templates')):
       try:
          if not j.job_exists(file):
             f=open(os.path.join(currentDir,'jenkins_job_templates',file),'r')
             config=f.read()
             f.close()
             self.logger.info("creating job %s, reading config from file %s"%(repr(file),os.path.join(currentDir,'jenkins_job_templates',file)))
             j.create_job(file, config)  
          else:
            self.logger.info("job %s already exists, not creating"%file)
       except Exception as e:
            self.logger.warn("failed to create job %s"%(file))
            self.logger.exception(e) 
     self.logger.info("created all CI jobs")
     self.logger.info("Adding driverVM as node in jenkins")
     params = {
              'port': '22',
              'username': '******',
              'credentialsId':'abe3f139-77bd-4db4-824b-1c79d5205d8b',
              'host':self.config['nodes']['driverVM']['ip'] 
     }
     self.addPasswdToCredential(j,"vagrant")
     self.checkForSuccess(bash("cp %s /var/lib/jenkins/."%(os.path.join(currentDir,"jenkins_credentials","credentials.xml"))))
     j.create_node('driverVM', numExecutors=20, nodeDescription="CI slave VM", remoteFS='/automation/jenkins', labels='driverVM', exclusive=True,launcher=jenkins.LAUNCHER_SSH, launcher_params=params) 
     self.logger.info("jenkins install complete")
コード例 #24
0
   def destroy(self,hostname):
       #print "****in destroy *******"
       if self.__con or self.connect():
          cur =self.__con.cursor(MySQLdb.cursors.DictCursor)
          #print hostname
          #print "SELECT * FROM `resource_db`.`host` WHERE hostname='%s' and state='free'"%hostname
          cur.execute("SELECT * FROM `resource_db`.`host` WHERE hostname='%s' and state='free'"%hostname)
          host= cur.fetchone()
          #print host
          hostInfo=self.getDict(host,cur)
          #print "SELECT * FROM `resource_db`.`static_config` WHERE id='%s'"%hostInfo['config_id']
          cur.execute("SELECT * FROM `resource_db`.`static_config` WHERE id='%s'"%hostInfo['config_id'])
          configInfo=self.getDict(cur.fetchone(),cur)
          self.logger.info("destroying host %s"%hostInfo['hostname'])
          bash("cobbler system remove --name=%s"%hostInfo['hostname'])
          xenssh = \
                  remoteSSHClient(hostInfo['infra_server'],22, "root", hostInfo['infra_server_passwd'])

          self.logger.debug("bash vm-uninstall.sh -n %s"%(hostInfo['hostname']))
          xenssh.execute("xe vm-uninstall force=true vm=%s"%hostInfo['hostname'])
          #print hostInfo
          self.resourceMgr.freeConfig(configInfo['configfile'])
          self.remove(hostInfo['id'])
コード例 #25
0
 def restartJenkins(self):
     self.checkForSuccess(bash("service jenkins restart"))
     retry=20
     while retry > 0:
       retry-=1
       try:
           j=Jenkins(self.jenkinsUrl, "admin", self.jenkinsPasswd)
           j.get_plugins()
           break
       except Exception as e:
            if retry==0:
               self.logger.info("Failed to restart jenkins")  
            else:
               time.sleep(20)
               self.logger.info("waiting for jenkins to restart, this may take a while")
コード例 #26
0
 def install(self):
     self.logger.info("enabling epel release")
     self.checkForSuccess(bash("yum -y install epel-release"))
     self.logger.info("installing common dependencies")
     self.logger.info("installing wget")
     self.checkForSuccess(bash("yum -y install wget"))
     self.logger.info("installing gcc")
     self.checkForSuccess(bash("yum -y install gcc"))
     self.logger.info("installing python-devel")
     self.checkForSuccess(bash("yum install -y python-devel"))
     self.logger.info("installing python-pip")
     self.checkForSuccess(bash("yum -y install python-pip"))
     self.checkForSuccess(bash("pip install --upgrade pip"))
     self.logger.info("installing cheeta templating engine")
     self.checkForSuccess(bash("pip install cheetah"))
コード例 #27
0
 def createDataCenter(self, env, tag = None):
     try:
         os.chdir(env['virtenvPath'])
         marvin_config = env['config_file']
         pythonPath = env['pythonPath']
         self.logger.info('Deploying datacenter using marvin')
         marvinFolder = bash('ls | grep Marvin-[0-9].[0-9].[0-9]').getStdout()
         subprocess.check_call('%s/nosetests-2.7 -v --with-marvin --marvin-config=%s --deploy -w /tmp' % (pythonPath, marvin_config), shell=True)
         self.logger.info('Testing if setup is ready')
         subprocess.check_call('%s/nosetests-2.7 -v --with-marvin --marvin-config=%s /root/cloud-autodeploy2/newcode/testSetupSuccess.py' % (pythonPath, marvin_config), shell=True)
         self.logger.info('Restarting Management server for global setting changes to take effect')
         subprocess.check_call('%s/python2.7 /root/cloud-autodeploy2/newcode/restartMgmt.py --config  %s --noSimulator %s' % (pythonPath, marvin_config, env['noSimulator']), shell=True)
         self.logger.info('Waiting some time for managementserver startup')
         time.sleep(120)
     except Exception as e:
         self.logger.info('error occured while deploying datacenter.')
         self.logger.exception(e)
         raise Exception("failed to create datacenter") 
コード例 #28
0
ファイル: driverVm.py プロジェクト: bvbharatk/CI-orchestrator
 def  setupPuppet(self):
      conf=puppetAgentConf()
      self.logger.info("backup the original files to /etc/puppet/backup")
      bash("mkdir -p /etc/puppet/backup")
      bash("mv *.conf /etc/puppet/backup/")
      nameString="*."+".".join(self.config['nodes']['driverVM']['hostname'].split(".")[1:])
      conf.search=nameString 
      conf.driver_vm_hostname=self.config['nodes']['driverVM']['hostname']
      f=open("/etc/puppet/agent.conf",'w')
      print >> f, conf
      f.close()
      cmd="echo %s >> /etc/puppet/autosign.conf"%nameString
      self.checkForSuccess(bash(cmd))
      self.checkForSuccess(bash("cp -f %s/puppet/* /etc/puppet/."%currentDir)) 
コード例 #29
0
 def createDataCenter(self,env,tag=None):
     try:
         os.chdir(env['virtenvPath'])
         marvin_config=env['config_file']
         pythonPath=env['pythonPath']
         self.logger.info("Deploying datacenter using marvin")
         #subprocess.check_call("%s/nosetests-2.7 -v --with-marvin --marvin-config=%s -w /tmp"%(pythonPath,marvin_config),shell=True)
         marvinFolder=bash("ls | grep Marvin-[0-9].[0-9].[0-9]").getStdout()
         subprocess.check_call("%s/nosetests-2.7 -v --with-marvin --marvin-config=%s --deploy -w /tmp"%(pythonPath,marvin_config),shell=True)
         #subprocess.check_call("%s/python2.7 ./%s/marvin/deployDataCenter.py -i %s"%(pythonPath,marvinFolder,marvin_config),shell=True)
         self.logger.info("Testing if setup is ready")
         subprocess.check_call("%s/nosetests-2.7 -v --with-marvin --marvin-config=%s /root/cloud-autodeploy2/newcode/testSetupSuccess.py"%(pythonPath,marvin_config),shell=True) 
         self.logger.info("Restarting Management server for global setting changes to take effect")
         subprocess.check_call("%s/python2.7 /root/cloud-autodeploy2/newcode/restartMgmt.py --config  %s --noSimulator %s"%(pythonPath,marvin_config,env['noSimulator']),shell=True)
         self.logger.info("Waiting some time for managementserver startup")
         time.sleep(120)
     except Exception, e:
         self.logger.info("error occured while deploying datacenter.")
         self.logger.error(e)
         return 1
コード例 #30
0
ファイル: configure.py プロジェクト: iselu/cloud-autodeploy
def configureManagementServer(mgmt_host):
    """
    We currently configure all mgmt servers on a single xen HV. In the future
    replace this by launching instances via the API on a IaaS cloud using
    desired template
    """
    mgmt_vm = macinfo[mgmt_host]
    mgmt_ip = macinfo[mgmt_host]["address"]

    # Remove and re-add cobbler system
    bash("cobbler system remove --name=%s" % mgmt_host)
    bash(
        "cobbler system add --name=%s --hostname=%s --mac-address=%s \
         --netboot-enabled=yes --enable-gpxe=no \
         --profile=%s --server=%s --gateway=%s"
        % (
            mgmt_host,
            mgmt_host,
            mgmt_vm["ethernet"],
            mgmt_host,
            cobblerHomeResolve(mgmt_ip, param="cblrgw"),
            cobblerHomeResolve(mgmt_ip),
        )
    )
    bash("cobbler sync")

    # Revoke all certs from puppetmaster
    bash("puppet cert clean %s.%s" % (mgmt_host, DOMAIN))

    # Start VM on xenserver
    xenssh = sshClient.SshClient(macinfo["infraxen"]["address"], 22, "root", macinfo["infraxen"]["password"])

    logging.debug("bash vm-uninstall.sh -n %s" % (mgmt_host))
    xenssh.execute("xe vm-uninstall force=true vm=%s" % mgmt_host)
    logging.debug("bash vm-start.sh -n %s -m %s" % (mgmt_host, mgmt_vm["ethernet"]))
    out = xenssh.execute("bash vm-start.sh -n %s -m %s" % (mgmt_host, mgmt_vm["ethernet"]))

    logging.info("started mgmt server with uuid: %s. Waiting for services .." % out)
    return mgmt_host
コード例 #31
0
def configureManagementServer(mgmt_host):
    """
    We currently configure all mgmt servers on a single xen HV. In the future
    replace this by launching instances via the API on a IaaS cloud using
    desired template
    """
    mgmt_vm = macinfo[mgmt_host]
    mgmt_ip = macinfo[mgmt_host]["address"]

    #Remove and re-add cobbler system
    bash("cobbler system remove --name=%s" % mgmt_host)
    bash("cobbler system add --name=%s --hostname=%s --mac-address=%s \
         --netboot-enabled=yes --enable-gpxe=no \
         --profile=%s --server=%s --gateway=%s" %
         (mgmt_host, mgmt_host, mgmt_vm["ethernet"], mgmt_host,
          cobblerHomeResolve(mgmt_ip,
                             param='cblrgw'), cobblerHomeResolve(mgmt_ip)))
    bash("cobbler sync")

    #Revoke all certs from puppetmaster
    bash("puppet cert clean %s.%s" % (mgmt_host, DOMAIN))

    #Start VM on xenserver
    xenssh = \
    sshClient.SshClient(macinfo["infraxen"]["address"],
                                    22, "root",
                                    macinfo["infraxen"]["password"])

    logging.debug("bash vm-uninstall.sh -n %s" % (mgmt_host))
    xenssh.execute("xe vm-uninstall force=true vm=%s" % mgmt_host)
    logging.debug("bash vm-start.sh -n %s -m %s" %
                  (mgmt_host, mgmt_vm["ethernet"]))
    out = xenssh.execute("bash vm-start.sh -n %s -m %s" %
                         (mgmt_host, mgmt_vm["ethernet"]))

    logging.info("started mgmt server with uuid: %s. Waiting for services .." %
                 out)
    return mgmt_host
コード例 #32
0
def seedSecondaryStorage(cscfg, hypervisor):
    """
    erase secondary store and seed system VM template via puppet. The
    secseeder.sh script is executed on mgmt server bootup which will mount and
    place the system VM templates on the NFS
    """
    mgmt_server = cscfg.mgtSvr[0].mgtSvrIp
    logging.info("Secondary storage seeded via puppet with systemvm templates")
    bash("rm -f /etc/puppet/modules/cloudstack/files/secseeder.sh")
    for zone in cscfg.zones:
        for sstor in zone.secondaryStorages:
            shost = urllib.parse.urlsplit(sstor.url).hostname
            spath = urllib.parse.urlsplit(sstor.url).path
            spath = ''.join([shost, ':', spath])
            logging.info("seeding %s systemvm template on %s"%(hypervisor, spath))
            bash("echo '/bin/bash /root/redeploy.sh -s %s -h %s' >> /etc/puppet/modules/cloudstack/files/secseeder.sh"%(spath, hypervisor))
    bash("chmod +x /etc/puppet/modules/cloudstack/files/secseeder.sh")
コード例 #33
0
ファイル: configure.py プロジェクト: Blufe/cloudstack
def seedSecondaryStorage(cscfg, hypervisor):
    """
    erase secondary store and seed system VM template via puppet. The
    secseeder.sh script is executed on mgmt server bootup which will mount and
    place the system VM templates on the NFS
    """
    mgmt_server = cscfg.mgtSvr[0].mgtSvrIp
    logging.info("Secondary storage seeded via puppet with systemvm templates")
    bash("rm -f /etc/puppet/modules/cloudstack/files/secseeder.sh")
    for zone in cscfg.zones:
        for sstor in zone.secondaryStorages:
            shost = urlparse.urlsplit(sstor.url).hostname
            spath = urlparse.urlsplit(sstor.url).path
            spath = ''.join([shost, ':', spath])
            logging.info("seeding %s systemvm template on %s"%(hypervisor, spath))
            bash("echo '/bin/bash /root/redeploy.sh -s %s -h %s' >> /etc/puppet/modules/cloudstack/files/secseeder.sh"%(spath, hypervisor))
    bash("chmod +x /etc/puppet/modules/cloudstack/files/secseeder.sh")
コード例 #34
0
      def seedSecondaryStorage(self,cscfg,hostInfo):
          """
          erase secondary store and seed system VM template via puppet. The
          secseeder.sh script is executed on mgmt server bootup which will mount and
          place the system VM templates on the NFS
          """
          mgmt_server = cscfg.mgtSvr[0].mgtSvrIp
          #hypervisors = ["xen","kvm","vmware"]
          ssh = SSHClient()
          ssh.set_missing_host_key_policy(AutoAddPolicy())
          ssh.connect(hostname=hostInfo['ip'],username="******",password=hostInfo['password'])
          scp = SCPClient(ssh.get_transport())

          for zone in cscfg.zones:
               for pod in zone.pods:
                  for cluster in pod.clusters:
                    if cluster.hypervisor.lower() == "xenserver":
                        hypervisor="xen"
                    else:
                       hypervisor=cluster.hypervisor.lower()
                    if hypervisor=="simulator":
                       continue
                    for sstor in zone.secondaryStorages:
                           shost = urlparse.urlsplit(sstor.url).hostname
                           spath = urlparse.urlsplit(sstor.url).path
                           spath = ''.join([shost, ':', spath])
                           self.createStorageDirs(sstor)
                           #for h in hypervisors:
                           self.logger.info("adding template seeding commands to seed %s systemvm template on %s"%(hypervisor, spath))
                           #self.logger.info("seeding from url %s"%self.resourceMgr.getSystemVMdownloadUrl(hostInfo['branch'],cluster.hypervisor.lower()))
                           bash("echo '/bin/bash /root/redeploy.sh -s %s -u %s -h %s' >> /tmp/secseeder.%s.sh"%(spath, self.resourceMgr.getSystemVMdownloadUrl(hostInfo['branch'],cluster.hypervisor.lower())['download_url'],hypervisor,hostInfo['ip']))
          try:
              if (os.path.exists("/tmp/secseeder.%s.sh"%(hostInfo['ip']))):
                 bash("chmod +x /tmp/secseeder.%s.sh"%(hostInfo['ip']))
                 scp.put("/tmp/secseeder.%s.sh"%(hostInfo['ip']),"/root/secseeder.sh")
                 bash("rm -f /tmp/secseeder.%s.sh"%hostInfo['ip'])
          except Exception as e:
                 self.logger.exception(e)
                 raise
コード例 #35
0
def refreshHosts(cscfg, hypervisor="xen", profile="xen602"):
    """
    Removes cobbler system from previous run. 
    Creates a new system for current run.
    Ipmi boots from PXE - default to Xenserver profile
    """
    for zone in cscfg.zones:
        for pod in zone.pods:
            for cluster in pod.clusters:
                for host in cluster.hosts:
                    hostname = urlparse.urlsplit(host.url).hostname
                    logging.debug("attempting to refresh host %s" % hostname)
                    #revoke certs
                    bash("puppet cert clean %s.%s" % (hostname, DOMAIN))
                    #setup cobbler profiles and systems
                    try:
                        hostmac = macinfo[hostname]['ethernet']
                        hostip = macinfo[hostname]['address']
                        bash("cobbler system remove \
                             --name=%s" % (hostname))
                        bash("cobbler system add --name=%s --hostname=%s \
                             --mac-address=%s --netboot-enabled=yes \
                             --enable-gpxe=no --profile=%s --server=%s \
                             --gateway=%s" %
                             (hostname, hostname, hostmac, profile,
                              cobblerHomeResolve(hostip, param='cblrgw'),
                              cobblerHomeResolve(hostip)))

                        bash("cobbler sync")
                    except KeyError:
                        logging.error("No mac found against host %s. Exiting" %
                                      hostname)
                        sys.exit(2)
                    #set ipmi to boot from PXE
                    try:
                        ipmi_hostname = ipmiinfo[hostname]
                        logging.debug("found IPMI nic on %s for host %s" %
                                      (ipmi_hostname, hostname))
                        bash("ipmitool -Uroot -P%s -H%s chassis bootdev \
                             pxe" % (IPMI_PASS, ipmi_hostname))
                        bash("ipmitool -Uroot -P%s -H%s chassis power cycle" %
                             (IPMI_PASS, ipmi_hostname))
                        logging.debug("Sent PXE boot for %s" % ipmi_hostname)
                    except KeyError:
                        logging.error(
                            "No ipmi host found against %s. Exiting" %
                            hostname)
                        sys.exit(2)
                    yield hostname
    delay(5)  #to begin pxe boot process or wait returns immediately
コード例 #36
0
def mkdirs(path):
    dir = bash("mkdir -p %s" % path)
コード例 #37
0
def mkdirs(path):
    dir = bash("mkdir -p %s" % path)


def fetch(filename, url, path):
    try:
        zipstream = urllib2.urlopen(url)
        tarball = open('/tmp/%s' % filename, 'wb')
        tarball.write(zipstream.read())
        tarball.close()
    except urllib2.URLError, u:
        raise u
    except IOError:
        raise
    bash("mv /tmp/%s %s" % (filename, path))


def cobblerHomeResolve(ip_address, param="gateway"):
    ipAddr = IPAddress(ip_address)
    for nic, network in cobblerinfo.items():
        subnet = IPNetwork(cobblerinfo[nic]["network"])
        if ipAddr in subnet:
            return cobblerinfo[nic][param]


def configureManagementServer(mgmt_host):
    """
    We currently configure all mgmt servers on a single xen HV. In the future
    replace this by launching instances via the API on a IaaS cloud using
    desired template