Ejemplo n.º 1
0
 def imageHosts(self,mgmtHostInfo):
     hosts=[]
     self.json_config = configGenerator.getSetupConfig(mgmtHostInfo['configfile'])
     hosts.extend(self.refreshHosts(self.json_config))
     self.seedSecondaryStorage(self.json_config, mgmtHostInfo)
     self.cleanPrimaryStorage(self.json_config)
     return hosts
Ejemplo n.º 2
0
    def configureManagementServer(self,profile, branch, configName=None):
     """
     We currently configure all mgmt servers on a single xen HV. In the future
     replace this by launching instances via the API on a IaaS cloud using
     desired template
     """
     mgmt_vm_mac = bash(self.workDir+"/macgen.py").getStdout()
     mgmt_host = "centos-"+mgmt_vm_mac.replace(':','-')
     mgmt_ip=None
     configPath=None
     configObj=None
     if configName==None:
           configObj=self.resourceMgr.getConfig(profile)
     else:
         configObj=self.resourceMgr.getConfig(profile,configName)
     if configObj==None:
        self.logger.info("either the config you asked for is in use or it is not registered in the db")
        sys.exit(1)

     configPath=configObj['configfile']
     #print "config_path",configPath
     config=configGenerator.getSetupConfig(configPath.replace(' ',''))
     #print config
     mgmt_ip=config.mgtSvr[0].mgtSvrIp
     #validate the ip address
     try:
        ip=IPAddress(mgmt_ip)
     except Exception,e:
            self.logger.error(e)
            #freeconfig and exit
            self.resourceMgr.freeConfig(configPath)
            exit(1)
 def refreshHost(self, mgmtHostInfo,branch,githubPrno,commit,reImageHosts=True):
     """
     Prepare the mgmt server for a marvin test run
     """
     buildlog=""
     self.logger.info("refreshing managemet server")
     self.mgmtHostInfo=mgmtHostInfo
     self.mgmtHostInfo.update({'startTime':time.strftime("%c")})
     self.mgmtHostInfo['branch']=branch
     if reImageHosts:
        compute_hosts=self.hostImager.imageHosts(self.mgmtHostInfo)
     if self._isPortListening(host=mgmtHostInfo['ip'], port=22, timeout=300) \
         and self._isPortListening(host=mgmtHostInfo['ip'], port=3306, timeout=30):
       mgmt_ip = self.mgmtHostInfo['ip']
       mgmt_pass = self.mgmtHostInfo['password']
       with contextlib.closing(remoteSSHClient(mgmt_ip, 22, "root", mgmt_pass)) as ssh:
         self.logger.info("seeding systemvm templates")
         #ssh.execute("sh /root/secseeder.sh > secseeder.log 2>&1")
         #self.hostImager.seedBuiltinTemplates()
         package=self.hostImager.pacakageIfKVM()
         config=configGenerator.getSetupConfig(self.mgmtHostInfo['configfile'].replace(' ',''))
         noSimulator=self.checkIfSimulatorBuild(config)
         self.mgmtHostInfo.update({'noSimulator':noSimulator})
         out=ssh.execute("python /root/refreshHost.py -p %s --noSimulator %s  %s %s > /var/log/cloudstack.log"%(package, self.mgmtHostInfo['noSimulator'],("--githubPrno %s"%githubPrno if (githubPrno is not None) else "-b %s"%branch), ("" if commit==None else "-c %s"%commit)))
         buildlog="".join(ssh.stdout)
         if(out !=0):
            self.logger.info("build log ...............\n%s"%buildlog)
            errorlog="failed to build cloudstack errorLog: %s"%ssh.errorlog
            ssh.execute("cat /var/log/cloudstck.log")
            self.logger.error("".join(ssh.stdout))
            raise Exception("failed to build cloudstack errorLog: %s"%errorlog)
         ssh.execute("cd /automation/cloudstack/; git log | grep -m 1 'commit' | tr -d 'commit' | tr -d ' '")
         self.logger.info('building from commit_id %s'%ssh.stdout[0])
         self.mgmtHostInfo.update({'commit_id':ssh.stdout[0]})
         ssh.execute("sh /root/secseeder.sh > secseeder.log 2>&1")
         self.hostImager.seedBuiltinTemplates()
         retry=3
         while retry !=0:
               if not  (self._isPortListening(host=self.mgmtHostInfo['ip'], port=8080, timeout=300)):
                       ssh.execute("python /root/restartMgmtServer.py -p /automation/cloudstack/ --noSimulator %s >> /var/log/cloudstack.log"%self.mgmtHostInfo['noSimulator'])
                       self.logger.info("exceded timeout restarting the management server and trying again")
                       retry=retry-1
               else:
                   break
       self.hostImager.checkIfHostsUp(compute_hosts)
       self.hostImager.execPostInstallHooks(self.mgmtHostInfo)
       return self.mgmtHostInfo
Ejemplo n.º 4
0
 def __parseConfig(self):
     '''
     @Name: __parseConfig
     @Desc : Parses the configuration file passed and assigns
     the parsed configuration
     @Output : SUCCESS or FAILED
     '''
     try:
         if not os.path.isfile(self.__configFile):
             self.__tcRunLogger.error("=== Marvin Parse Config Init Failed ===")
             return FAILED
         self.__parsedConfig = getSetupConfig(self.__configFile)
         self.__tcRunLogger.info("=== Marvin Parse Config Successful ===")
         return SUCCESS
     except Exception as e:
         self.__tcRunLogger.exception("=== Marvin Parse Config Init Failed: %s ===" % e)
         return FAILED
Ejemplo n.º 5
0
 def __parseConfig(self):
     '''
     @Name: __parseConfig
     @Desc : Parses the configuration file passed and assigns
     the parsed configuration
     @Output : SUCCESS or FAILED
     '''
     try:
         if not os.path.isfile(self.__configFile):
             print "\n=== Marvin Parse Config Init Failed ==="
             return FAILED
         self.__parsedConfig = getSetupConfig(self.__configFile)
         print "\n=== Marvin Parse Config Successful ==="
         return SUCCESS
     except Exception as e:
         print "\nException Occurred Under __parseConfig : " \
               "%s" % GetDetailExceptionInfo(e)
         return FAILED
Ejemplo n.º 6
0
 def __parseConfig(self):
     '''
     @Name: __parseConfig
     @Desc : Parses the configuration file passed and assigns
     the parsed configuration
     @Output : SUCCESS or FAILED
     '''
     try:
         if not os.path.isfile(self.__configFile):
             print "\n=== Marvin Parse Config Init Failed ==="
             return FAILED
         self.__parsedConfig = getSetupConfig(self.__configFile)
         print "\n=== Marvin Parse Config Successful ==="
         return SUCCESS
     except Exception as e:
         print "\nException Occurred Under __parseConfig : " \
               "%s" % GetDetailExceptionInfo(e)
         return FAILED
Ejemplo n.º 7
0
 def __parseConfig(self):
     '''
     @Name: __parseConfig
     @Desc : Parses the configuration file passed and assigns
     the parsed configuration
     @Output : SUCCESS or FAILED
     '''
     try:
         if not os.path.isfile(self.__configFile):
             self.__tcRunLogger.error(
                 "=== Marvin Parse Config Init Failed ===")
             return FAILED
         self.__parsedConfig = getSetupConfig(self.__configFile)
         self.__tcRunLogger.info("=== Marvin Parse Config Successful ===")
         return SUCCESS
     except Exception as e:
         self.__tcRunLogger.exception(
             "=== Marvin Parse Config Init Failed: %s ===" % e)
         return FAILED
Ejemplo n.º 8
0
if __name__ == '__main__':
    parser = OptionParser()
    parser.add_option(
        "-c",
        "--config",
        action="store",
        default="xen.cfg",
        dest="config",
        help="the path where the server configurations is stored")
    (options, args) = parser.parse_args()

    if options.config is None:
        raise

    cscfg = configGenerator.getSetupConfig(options.config)
    mgmt_server = cscfg.mgtSvr[0].mgtSvrIp
    ssh = sshClient.SshClient(mgmt_server, 22, "root", "password")
    ssh.execute("service cloudstack-management restart")

    #Telnet wait until api port is open
    tn = None
    timeout = 120
    while timeout > 0:
        try:
            tn = telnetlib.Telnet(mgmt_server, 8096, timeout=120)
            break
        except Exception:
            delay(1)
            timeout = timeout - 1
    if tn is None:
    def configureManagementServer(self,profile, branch, configName=None):
     """
     We currently configure all mgmt servers on a single xen HV. In the future
     replace this by launching instances via the API on a IaaS cloud using
     desired template
     """
     mgmt_vm_mac = bash(self.workDir+"/macgen.py").getStdout()
     mgmt_host = "centos-"+mgmt_vm_mac.replace(':','-')
     mgmt_ip=None
     configPath=None
     configObj=None
     if configName==None:
           configObj=self.resourceMgr.getConfig(profile)
     else:
         configObj=self.resourceMgr.getConfig(profile,configName)
     if configObj==None:
        self.logger.info("either the config you asked for is in use or it is not registered in the db")
        sys.exit(1)

     configPath=configObj['configfile']
     #print "config_path",configPath
     config=configGenerator.getSetupConfig(configPath.replace(' ',''))
     #print config
     mgmt_ip=config.mgtSvr[0].mgtSvrIp
     #validate the ip address
     try:
        ip=IPAddress(mgmt_ip)
     except Exception as e:
            self.logger.exception(e)
            #freeconfig and exit
            self.resourceMgr.freeConfig(configPath)
            exit(1)

     self.infraxen_ip=config.infraxen
     self.infraxen_passwd=config.infraxen_passwd
     noSimulator=self.checkIfSimulatorBuild(config)
     if (self.infraxen_ip==None or self.infraxen_passwd==None):
        self.logger.info("invalid values for infraxen_ip or infraxen_passwd")
        self.resourceMgr.freeConfig(configPath)
        exit(1)

     self.logger.info("management server ip=%s"%mgmt_ip)

     os="centos"
     self.mgmtHostInfo.update({'hostname':mgmt_host})
     #print self.mgmtHostInfo
     self.mgmtHostInfo.update({'ip':mgmt_ip})
     self.mgmtHostInfo.update({'mac':mgmt_vm_mac})
     self.mgmtHostInfo.update({'password':"******"})
     self.mgmtHostInfo.update({'domain':self.DOMAIN})
     self.mgmtHostInfo.update({'os':os})
     #print self.mgmtHostInfo
     self.mgmtHostInfo.update({'configfile':configPath})
     self.mgmtHostInfo.update({'config_id':configObj['id']})
     self.mgmtHostInfo.update({'infra_server':self.infraxen_ip})
     self.mgmtHostInfo.update({'infra_server_passwd':self.infraxen_passwd})
     self.mgmtHostInfo.update({'profile':profile})
     self.mgmtHostInfo.update({'branch':branch})
     self.mgmtHostInfo.update({'noSimulator':noSimulator})
     self.mgmtHostInfo.update({'simulator':'dummy'})
     templateFiles="/etc/puppet/agent.conf=/etc/puppet/puppet.conf"
     cobbler_profile=self.getProfile("centos")

     #Remove and re-add cobbler system
     bash("cobbler system remove --name=%s"%mgmt_host)
     bash("cobbler system add --name=%s --hostname=%s --dns-name=%s --mac-address=%s \
          --netboot-enabled=yes --enable-gpxe=no --ip-address=%s \
          --profile=%s --template-files=%s "%(mgmt_host, mgmt_host, (mgmt_host+"."+self.DOMAIN),
                                                 mgmt_vm_mac, mgmt_ip, cobbler_profile, templateFiles));
     bash("cobbler sync")
     #clean puppet reports if any
     bash("rm -f %s"%("/var/lib/puppet/reports/"+self.mgmtHostInfo['hostname']+"."+self.DOMAIN))
     #add this node to nodes.pp of puppet
     bash(("echo \"node %s inherits basenode { \ninclude nfsclient\ninclude java17\ninclude mysql\ninclude maven35\ninclude cloudstack-simulator\ninclude m2repo}\" >> /etc/puppet/manifests/nodes.pp"%(mgmt_host)));
     #Revoke all certs from puppetmaster
     bash("puppet cert clean %s.%s"%(mgmt_host, self.DOMAIN))

     #Start VM on xenserver
     xenssh = \
     remoteSSHClient(self.infraxen_ip,22, "root", self.infraxen_passwd)

     self.logger.debug("bash vm-uninstall.sh -n %s"%(mgmt_host))
     xenssh.execute("xe vm-uninstall force=true vm=%s"%mgmt_host)
     self.logger.debug("bash vm-start.sh -n %s -m %s"%(mgmt_host, mgmt_vm_mac))
     out = xenssh.execute("bash vm-start.sh -n %s -m %s"%(mgmt_host,
                                                  mgmt_vm_mac))
     self.logger.info("started mgmt server: %s. Waiting for services .."%mgmt_host);
     return self.mgmtHostInfo
Ejemplo n.º 10
0
    """
    Imports the Modules Required
    """
    from marvin.marvinLog import MarvinLog
    from marvin.cloudstackTestClient import CSTestClient

    """
    Step1: Create the Logger
    """
    if (options.input) and not (os.path.isfile(options.input)):
        print "\n=== Invalid Input Config File Path, Please Check ==="
        exit(1)

    log_obj = MarvinLog("CSLog")
    cfg = configGenerator.getSetupConfig(options.input)
    log = cfg.logger

    ret = log_obj.createLogs("DeployDataCenter", log)
    if ret != FAILED:
        log_folder_path = log_obj.getLogFolderPath()
        tc_run_logger = log_obj.getLogger()
    else:
        print "\n===Log Creation Failed. Please Check==="
        exit(1)

    """
    Step2 : Create Test Client
    """
    obj_tc_client = CSTestClient(cfg.mgtSvr[0], cfg.dbSvr, logger=tc_run_logger)
    if obj_tc_client and obj_tc_client.createTestClient() == FAILED:
Ejemplo n.º 11
0
  (options, args) = parser.parse_args()

  if options.input is None:
    print "\n==== For cleaning: Please Specify a " \
          "Valid Input Configuration File ===="
    exit(1)

  if (options.input) and not (os.path.isfile(options.input)):
    print "\n=== Invalid Input Config File Path, Please Check ==="
    exit(1)

  from marvin.marvinLog import MarvinLog
  from marvin.cloudstackTestClient import CSTestClient

  log_obj = MarvinLog("CSLogClean")
  cfg = configGenerator.getSetupConfig(options.input)
  log = cfg.logger
  ret = log_obj.createLogs("Clean_Infra", log)
  if ret != FAILED:
    log_folder_path = log_obj.getLogFolderPath()
    tc_run_logger = log_obj.getLogger()
  else:
    print "\n=== Log Creation Failed. Please Check ==="
    exit(1)

  obj_tc_client = CSTestClient(cfg.mgtSvr[0], cfg.dbSvr,
                               logger=tc_run_logger)
  if obj_tc_client and obj_tc_client.createTestClient() == FAILED:
    print "\n=== TestClient Creation Failed ==="
    exit(1)
Ejemplo n.º 12
0
        raise e
    generate_system_tables(system)

    hosts = []
    prepare_mgmt = False
    if options.distro is not None:
        #Management Server configuration - only tests the packaging
        mgmt_host = "cloudstack-"+options.distro
        prepare_mgmt = True
        logging.info("Configuring management server %s"%mgmt_host)
        hosts.append(configureManagementServer(mgmt_host))

    if options.hypervisor is not None:
        #FIXME: query profiles from hypervisor args through cobbler api
        auto_config = options.hypervisor + ".cfg"
        cscfg = configGenerator.getSetupConfig(auto_config)
        logging.info("Reimaging hosts with %s profile for the %s \
                     hypervisor" % (options.profile, options.hypervisor))
        hosts.extend(refreshHosts(cscfg, options.hypervisor, options.profile))
        seedSecondaryStorage(cscfg, options.hypervisor)
        cleanPrimaryStorage(cscfg)

    waitForHostReady(hosts)
    delay(30)
    # Re-check because ssh connect works soon as post-installation occurs. But 
    # server is rebooted after post-installation. Assuming the server is up is
    # wrong in these cases. To avoid this we will check again before continuing
    # to add the hosts to cloudstack
    waitForHostReady(hosts)
    if prepare_mgmt:
        prepareManagementServer(mgmt_host)
Ejemplo n.º 13
0
from marvin import configGenerator

config=configGenerator.getSetupConfig('/root/cloud-autodeploy2/newcode/config-templates/advanced-xen-2cluster.cfg')

for zone in config.zones:
                 print "zone :",zone.name
                 for pod in zone.pods:
                     print "pod :",pod.name
                     for cluster in pod.clusters:
                         for host in cluster.hosts:
                             print host.url
                         print "cluster :", cluster.hypervisor
                         print "creating a jeknins job to generate results and email notfication for hypervisor %s and zone %s"%(cluster.hypervisor, zone.name)
Ejemplo n.º 14
0
from marvin import configGenerator
from marvin import sshClient
from time import sleep as delay
import telnetlib
import socket

if __name__ == '__main__':
    parser = OptionParser()
    parser.add_option("-c", "--config", action="store", default="xen.cfg",
                      dest="config", help="the path where the server configurations is stored")
    (options, args) = parser.parse_args()
    
    if options.config is None:
        raise

    cscfg = configGenerator.getSetupConfig(options.config)
    mgmt_server = cscfg.mgtSvr[0].mgtSvrIp
    ssh = sshClient.SshClient(mgmt_server, 22, "root", "password")
    ssh.execute("service cloudstack-management restart")

    #Telnet wait until api port is open
    tn = None
    timeout = 120
    while timeout > 0:
        try:
            tn = telnetlib.Telnet(mgmt_server, 8096, timeout=120)
            break
        except Exception:
            delay(1)
            timeout = timeout - 1
    if tn is None:
Ejemplo n.º 15
0
from testEnv import testEnv,testManager
from marvin import configGenerator

env = {'pythonPath': '/automation/virtenv/00-16-3e-17-43-25/146/testenv/bin',
         'config_file': '/automation/virtenv/00-16-3e-17-43-25/146/00-16-3e-17-43-25.cfg',
         'virtenvPath': '/automation/virtenv/00-16-3e-17-43-25/158',
         'hostip': '10.147.28.221',
         'build_number': '158',
         'version': '1254',
         'noSimulator': True,
         'repo_url': 'https://github.com/apache/cloudstack.git',
         'startTime': '',
         'commit_id': None,
         'branch':'1254' }

cscfg = configGenerator.getSetupConfig(env['config_file'])
testMgr = testManager('test/integration/smoke-test/integration/smoke/misc-test_ssvm.py-test_nuage_vsp.py,test/integration/smoke/test_ssvm.py', env['virtenvPath'])
tests=testMgr.getTests()
print "tests=  ",tests
tests=testMgr.getTests()
print "tests=  ",tests
reportAnalyserMap=testEnv().getReportAnalysers(cscfg, env, True)
print reportAnalyserMap
for key in reportAnalyserMap.keys():
                         reportAnalyserMap[key].collectTestsToReRun()
                         tests=reportAnalyserMap[key].suitsToRerun
                         if(tests is None):
                            avoidZones.append(key)
                         else:
                            #print "tests to rerun",tests
                            testMgr.addTestsToReRun(tests)
Ejemplo n.º 16
0
    def execOnJenkins(self, env, testSpecifierString, mailto, reRunFailedTests=True, retryCount=1, report=True, execOnOneZone=True,
           postOnPr=False, testMgr=None, avoidZones=None):
        try:
            env['hypervisor'] = ''
            if avoidZones is None:
               avoidZones=[]
            if testMgr is None:
               testMgr = testManager(testSpecifierString, env['virtenvPath'])
            jobModifier = modifyJOb()
            modifiedjob = ''
            j = Jenkins('http://jenkins-ccp.citrix.com', 'bharatk', 'BharatK')
            tests = testMgr.getTests()
            if tests == None:
                raise Exception('found no tests to run')
            while tests is not None:
                os.chdir(env['virtenvPath'])
                self.logger.info('launching jenkins TestExecutor Job')
                cscfg = configGenerator.getSetupConfig(env['config_file'])
                for zone in cscfg.zones:
                    if zone.name in avoidZones:
                       continue
                    for pod in zone.pods:
                        for cluster in pod.clusters:
                            for modifiedjob in jobModifier.addTests(env['build_number'], tests, self.throttle_job_count):
                                file = open('/root/cloud-autodeploy2/newcode/' + modifiedjob, 'r')
                                config = file.read()
                                file.close()
                                bash('rm -f /root/cloud-autodeploy2/newcode/%s' % modifiedjob)
                                if not j.job_exists(modifiedjob):
                                    j.create_job(modifiedjob, config)
                                else:
                                    j.delete_job(modifiedjob)
                                    j.create_job(modifiedjob, config)
                                j.build_job(modifiedjob, {'BASEDIR': env['virtenvPath'],
                                 'MGMT_SVR': env['hostip'],
                                 'buildNumber': env['build_number'],
                                 'zoneName': zone.name,
                                 'hypervisor': cluster.hypervisor.lower(),
                                 'zoneType': zone.networktype,
                                 'configFileName': env['config_file'],
                                 'token': 'bharat'})
                                self.waitForJobComplete(env['virtenvPath'], [zone.name])
                                env['hypervisor'] = '%s,%s' % (env['hypervisor'], cluster.hypervisor.lower())

                            break

                        break

                    if execOnOneZone:
                        break

                tests = testMgr.getTests()

            j.delete_job(modifiedjob)

            reportAnalyserMap=self.getReportAnalysers(cscfg, env, execOnOneZone)  
            if(reRunFailedTests):
               while retryCount > 0:
                     self.logger.info("checking if we need to re run any of the tests")
                     testsToReRun=[]
                     for key in reportAnalyserMap.keys():
                         tests=reportAnalyserMap[key].suitsToRerun
                         if(tests is None):
                            avoidZones.append(key)
                         else:
                            testMgr.addTestsToReRun(tests) 
                     retryCount-=1
                     self.logger.info("zone name:%s The follwoing tests will be re run %s"%(key,tests))
                     if(len(testsToReRun)==0):
                       break
                     else: 
                        self.execOnJenkins(env, testSpecifierString, mailto, reRunFailedTests, retryCount, False, execOnOneZone, postOnPr, testMgr, avoidZones)
               
            if report and postOnPr:
                for key in reportAnalyserMap.keys():
                    self.reportOnPr(reportAnalyserMap[key].generateTextReport2(), env)
            elif report:
                self.reportUsingJenkinsEmailPlugin(cscfg, env)
            return env
        except Exception as e:
            self.logger.exception(e)
Ejemplo n.º 17
0
        raise e
    generate_system_tables(system)

    hosts = []
    prepare_mgmt = False
    if options.distro is not None:
        #Management Server configuration - only tests the packaging
        mgmt_host = "cloudstack-" + options.distro
        prepare_mgmt = True
        logging.info("Configuring management server %s" % mgmt_host)
        hosts.append(configureManagementServer(mgmt_host))

    if options.hypervisor is not None:
        #FIXME: query profiles from hypervisor args through cobbler api
        auto_config = options.hypervisor + ".cfg"
        cscfg = configGenerator.getSetupConfig(auto_config)
        logging.info("Reimaging hosts with %s profile for the %s \
                     hypervisor" % (options.profile, options.hypervisor))
        hosts.extend(refreshHosts(cscfg, options.hypervisor, options.profile))
        seedSecondaryStorage(cscfg, options.hypervisor)
        cleanPrimaryStorage(cscfg)

    waitForHostReady(hosts)
    delay(30)
    # Re-check because ssh connect works soon as post-installation occurs. But
    # server is rebooted after post-installation. Assuming the server is up is
    # wrong in these cases. To avoid this we will check again before continuing
    # to add the hosts to cloudstack
    waitForHostReady(hosts)
    if prepare_mgmt:
        prepareManagementServer(mgmt_host)
Ejemplo n.º 18
0
    def execOnJenkins(self,env,testSpecifierString,mailto,execOnOneZone=True):
        try:
              testMgr=testManager(testSpecifierString,env['virtenvPath'])
              jobModifier=modifyJOb()
              modifiedjob=""
              j=Jenkins('http://jenkins-ccp.citrix.com','bharatk','BharatK')
              tests=testMgr.getTests()
              if(tests==None):
                raise Exception("found no tests to run")
              while(not tests is None):
                  #trigger a jenkins job.
                  os.chdir(env['virtenvPath'])
                  self.logger.info("launching jenkins TestExecutor Job")
                  #createing testexecutorjobs for each zone.
                  cscfg=configGenerator.getSetupConfig(env['config_file'])
                  jobIdentifierList=[]
                  for zone in cscfg.zones:
                      for pod in zone.pods:
                         for cluster in pod.clusters:
                             modifiedjob=jobModifier.addTests(env['build_number'],tests)
                             file=open("/root/cloud-autodeploy2/newcode/"+modifiedjob,'r')
                             config=file.read()
                             file.close()
                             bash("rm -f /root/cloud-autodeploy2/newcode/%s"%modifiedjob)
                             if(not j.job_exists(modifiedjob)):
                                  j.create_job(modifiedjob,config)
                             else:
                                  j.reconfig_job(modifiedjob,config)
                             j.build_job(modifiedjob, {'BASEDIR':env['virtenvPath'], 'MGMT_SVR' : env['hostip'],'buildNumber':env['build_number'],'zoneName':zone.name,'hypervisor':cluster.hypervisor.lower(),'zoneType':zone.networktype,'configFileName':env['config_file'],'token':'bharat'})
                             jobIdentifierList.append(zone.name)
                             break
                         break
                      if (execOnOneZone):
                        break
                  self.waitForJobComplete(env['virtenvPath'],jobIdentifierList)
                  tests=testMgr.getTests()  

              j.delete_job(modifiedjob) 
              jobIdentifierList=[]
              bugLoggerData=[]
              time.sleep(30)
              for zone in cscfg.zones:
                 self.logger.info(zone.name)
                 for pod in zone.pods:
                     for cluster in pod.clusters:
                         self.logger.info("creating a jeknins job to generate results and email notfication for hypervisor %s and zone %s"%(cluster.hypervisor, zone.name))
                         modifiedjob=jobModifier.modifyReportGenerator(env['build_number']+"_"+zone.name+"_"+cluster.hypervisor, mailto)
                         jobname=modifiedjob
                         file=open("/root/cloud-autodeploy2/newcode/"+modifiedjob,'r')
                         config=file.read()
                         file.close()
                         j.create_job(modifiedjob,config)
                         j.build_job(modifiedjob, {'buildNumber':env['build_number'],'BuildNo':env['build_number'], 'MGMT_SVR' : env['hostip'], 'BASEDIR':env['virtenvPath'], 'version':env['version'], 'BranchInfo':env['version'],\
                         'GitRepoUrl':env['repo_url'],'GitCommitId':env['commit_id'], 'CIRunStartDateTime':env['startTime'],'CIRunEndDateTime':time.strftime("%c"), 'WikiLinks':'https://cwiki.apache.org/confluence/display/CLOUDSTACK/Infrastructure%2CCI%2CSimulator%2CAutomation+Changes','hypervisor':cluster.hypervisor.lower(), 'HyperVisorInfo':cluster.hypervisor.lower(), 'zoneName':zone.name, 'BuildReport':"http://jenkins-ccp.citrix.com/job/"+jobname+"/1/testReport/",'token':'bharat'})
                         jobIdentifierList.append("report_"+zone.name)
                         jobDetails={"job_name":modifiedjob,"related_data_path":env['virtenvPath']}
                         self.resourceMgr.addJobDetails(jobDetails)
                         bugLoggerData.append({'hypervisor':cluster.hypervisor.lower(), 'branch':env['version'],'logname':cluster.hypervisor.lower()+'__Log_'+env['build_number'], 'type':'BVT'})
                         self.logger.info("bug logger data in zone looop %s"%bugLoggerData)
                         break
                     break
                 if (execOnOneZone):
                    #env['hypervisor':cluster.hypervisor.lower()]
                    break  
              self.logger.info("job identifier list", jobIdentifierList)       
              self.waitForJobComplete(env['virtenvPath'],jobIdentifierList)
              #self.logger.info("deleting the reporter job on jenkins job_name=%s",jobname)
              #j.delete_job(jobname)
              self.logger.info("cleaning up the workspace")
              bash("rm -f /root/cloud-autodeploy2/newcode/%s"%modifiedjob)
              self.logger.info("running bug logger")
              #self.runBugLogger(bugLoggerData)
              #os.system("rm -rf %s"%(self.jenkinsWorkspace+"/"+jobname))
        except Exception, e:
               self.logger.error(e)