Esempio n. 1
0
 def createPoliciesFromJson(cls,
                            file,
                            serviceType,
                            sourceHiveServiceName="mycluster0_hive",
                            sourceHdfsServiceName="mycluster0_hadoop",
                            targetServiceName=None,
                            ambariWeburl=source_weburl,
                            updateIfExists=False,
                            polResource=None,
                            isOverRideTrue=True):
     if Xa.isArgusInstalled():
         servicesMapJson = Config.getEnv(
             'ARTIFACTS_DIR') + '/' + datetime.datetime.now().strftime(
                 "%Y%m%d%H%M%S") + 'service_mapping.json'
         serviceName = "hadoop" if serviceType == "hdfs" else serviceType
         if targetServiceName is None:
             targetServiceName = \
             Xa.findRepositories(nameRegex="^.*_" + serviceName + "$", type=serviceType, status=True,
                                 ambariWeburl=ambariWeburl)[0]['name']
         f = open(servicesMapJson, 'w')
         if serviceType == "hive":
             f.write('{"' + sourceHiveServiceName + '":"' +
                     targetServiceName + '"}')
         elif serviceType == "hdfs":
             f.write('{"' + sourceHdfsServiceName + '":"' +
                     targetServiceName + '"}')
         f.close()
         Xa.importPoliciesInJsonFile(file,
                                     serviceType,
                                     servicesMapJson=servicesMapJson,
                                     ambariWeburl=ambariWeburl,
                                     updateIfExists=updateIfExists,
                                     polResource=polResource,
                                     isOverRideTrue=isOverRideTrue)
Esempio n. 2
0
 def disableOrEnableenableDenyAndExceptionsInPolicies(
         cls, serviceList, enableenableDenyAndExceptionsInPolicies=True):
     Xa.disableOrEnableenableDenyAndExceptionsInPolicies(
         serviceList, enableenableDenyAndExceptionsInPolicies)
     Xa.disableOrEnableenableDenyAndExceptionsInPolicies(
         serviceList,
         enableenableDenyAndExceptionsInPolicies,
         weburl=target_weburl)
Esempio n. 3
0
    def setupOpenRangerHivePolicy(cls):
        logger.info(
            "============================== %s.%s ============================="
            % (__name__, sys._getframe().f_code.co_name))
        logger.info("setupOpenRangerHivePolicy: Begin")
        repos = Xa.findRepositories(nameRegex="^.*_hive$",
                                    type="Hive",
                                    status=True)
        if len(repos) == 0:
            repo = {}
            repo['repositoryType'] = 'Hive'
            repo['name'] = "%s%d" % ('knox_test_hive_repo_', time.time())
            repo['description'] = 'Knox Test Hive Repo'
            repo['version'] = '0.4.0.2.2.2.0-2509'
            repo['isActive'] = True
            config = {}
            config['username'] = '******'
            config['password'] = '******'
            config['jdbc.driverClassName'] = 'org.apache.hive.jdbc.HiveDriver'
            config[
                'jdbc.url'] = 'jdbc:hive2://ip-172-31-37-219.ec2.internal:10000/default;principal=hive/[email protected]'
            config['commonNameForCertificate'] = ''
            config['isencrypted'] = True
            repo = Xa.createPolicyRepository(repo, config)
        else:
            assert len(
                repos
            ) == 1, "Found wrong number of Hive Ranger policy repos. Expected 1, found %d." % len(
                repos)
            repo = repos[0]

        #print "REPO=" + jsonlib.dumps(repo,indent=4)
        t = time.time()
        policy = {}
        policy['repositoryName'] = repo['name']
        policy['repositoryType'] = repo['repositoryType']
        policy['policyName'] = "%s%s%d" % (repo['name'],
                                           '_open_public_test_policy_', t)
        policy['description'] = 'Open Knox Public Test Policy'
        policy['databases'] = '*, default'
        policy['tables'] = "*,%d" % t
        policy['columns'] = "*,%d" % t
        policy['isEnabled'] = True
        policy['isAuditEnabled'] = True
        policy['tableType'] = 'Inclusion'
        policy['columnType'] = 'Inclusion'
        policy['permMapList'] = {
            'groupList': ['public'],
            'permList': [
                'select', 'update', 'create', 'drop', 'alter', 'index', 'lock',
                'all', 'admin'
            ]
        },
        #print "CREATE=" + jsonlib.dumps(policy)
        result = Xa.createPolicy(policy)
        logger.info("setupOpenRangerHivePolicy: %s" % jsonlib.dumps(result))
        return result
Esempio n. 4
0
 def changePolicyInterval(cls):
     if not BeaconRanger.policyIntervalChanged:
         interval_for_source_cls = Ambari.getConfig(
             'ranger-hive-security', webURL=source_weburl
         )['ranger.plugin.hive.policy.pollIntervalMs']
         interval_for_target_cls = Ambari.getConfig(
             'ranger-hive-security', webURL=target_weburl
         )['ranger.plugin.hive.policy.pollIntervalMs']
         if not interval_for_source_cls == "5000":
             Xa.changePolicyInterval("HIVE", "5000", webURL=source_weburl)
         if not interval_for_target_cls == "5000":
             Xa.changePolicyInterval("HIVE", "5000", webURL=target_weburl)
         BeaconRanger.policyIntervalChanged = True
         BeaconRanger.policyActivationWaitTime = 6
Esempio n. 5
0
    def setupOpenRangerKnoxPolicy(cls):
        logger.info(
            "============================== %s.%s ============================="
            % (__name__, sys._getframe().f_code.co_name))
        logger.info("setupOpenRangerKnoxPolicy: Begin")
        repos = Xa.findRepositories(nameRegex="^.*_knox$",
                                    type="Knox",
                                    status=True)
        if len(repos) == 0:
            repo = {}
            repo['repositoryType'] = 'Knox'
            repo['name'] = "%s%d" % ('knox_test_knox_repo_', time.time())
            repo['description'] = 'Knox Test Knox Repo'
            repo['version'] = '0.1.0'
            repo['isActive'] = True
            config = {}
            config['username'] = Knox.getAdminUsername()
            config['password'] = Knox.getAdminPassword()
            config[
                'knox.url'] = 'https://%KNOX_HOST%:8443/gateway/admin/api/v1/topologies'
            config['commonNameForCertificate'] = ''
            repo = Xa.createPolicyRepository(repo, config)
        else:
            assert len(
                repos
            ) == 1, "Found wrong number of Knox Ranger policy repos. Expected 1, found %d." % len(
                repos)
            repo = repos[0]

        t = time.time()
        policy = {}
        policy['repositoryName'] = repo['name']
        policy['repositoryType'] = repo['repositoryType']
        policy['policyName'] = "%s%s%d" % (repo['name'],
                                           '_open_public_test_policy_', t)
        policy['description'] = 'Knox Open Public Test Policy'
        policy['topologies'] = "*,%d" % t
        policy['services'] = "*,%d" % t
        policy['isEnabled'] = True
        policy['isRecursive'] = True
        policy['isAuditEnabled'] = True
        policy['permMapList'] = [{
            'groupList': ['public'],
            'permList': ['allow']
        }]
        #print "CREATE=" + jsonlib.dumps(policy)
        result = Xa.createPolicy(policy)
        #print "CREATED=" + jsonlib.dumps(result)
        logger.info("setupOpenRangerKnoxProxy: %s" % jsonlib.dumps(result))
        return result
Esempio n. 6
0
 def disableEnableHiveInfraPolicies(cls, enablePolicy):
     policies_in_source_Cluster = Xa.getPolicy_api_v2("hive",
                                                      weburl=source_weburl)
     policies_in_target_Cluster = Xa.getPolicy_api_v2("hive",
                                                      weburl=target_weburl)
     for policy in policies_in_source_Cluster:
         if 'Qe-infra' in policy["name"]:
             policy["isEnabled"] = enablePolicy
             Xa.UpdatePolicy_api_v2(policy)
     for policy in policies_in_target_Cluster:
         if 'Qe-infra' in policy["name"]:
             policy["isEnabled"] = enablePolicy
             Xa.UpdatePolicy_api_v2(policy)
     # waiting for policy to be active
     time.sleep(30)
Esempio n. 7
0
    def downgrade_argus_admin(cls, latestVersion, config, currVersion):
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            '#### ranger-admin: downgrading to old version.. ####')
        # 'knox.crt'
        # 'cacertswithknox'
        node = Xa.getPolicyAdminHost()
        user = Machine.getAdminUser()
        xa_admin_stop_cmd = 'sudo service ranger-admin stop'
        xa_admin_start_cmd = 'sudo service ranger-admin start'

        UpgradePerNode.reportProgress(
            '#### ranger-admin: stopping with command %s ####' %
            xa_admin_stop_cmd)
        exit_code, stdout = Machine.runas(user,
                                          xa_admin_stop_cmd,
                                          host=node,
                                          logoutput=True)
        logger.info('****************** xa admin stop exit_code = ' +
                    str(exit_code))

        node = Xa.getPolicyAdminHost()
        from beaver.component.rollingupgrade.ruCommon import hdpSelect
        hdpSelect.changeVersion("ranger-admin", latestVersion, node)
        logger.info(
            '*************************** hdp-select to new version done = ')

        UpgradePerNode.reportProgress(
            '#### ranger-admin: starting with command %s ####' %
            xa_admin_start_cmd)
        exit_code, stdout = Machine.runas(user,
                                          xa_admin_start_cmd,
                                          host=node,
                                          logoutput=True)
        logger.info('****************** xa admin start exit_code = ' +
                    str(exit_code))
        logger.info('****************** xa admin start stdout = ' +
                    str(stdout))

        if exit_code == 0:
            UpgradePerNode.reportProgress(
                '#### ranger-admin: downgrade successful ####')
        else:
            UpgradePerNode.reportProgress(
                '#### ranger-admin: downgrade failed! startup exit_code=%d ####'
                % exit_code)
Esempio n. 8
0
 def getRangerConfigs(cls, ambariWeburl=None):
     if Xa.isArgusInstalled() is True:
         logger.info("Ranger is ON")
         Address = Xa.getPolicyAdminAddress(ambariWeburl=ambariWeburl)
         hadoop_repo = \
         Xa.findRepositories(nameRegex="^.*_" + "hadoop" + "$", type="hdfs", status=True, ambariWeburl=ambariWeburl)[
             0]['name']
         hive_repo = \
         Xa.findRepositories(nameRegex="^.*_" + "hive" + "$", type="hive", status=True, ambariWeburl=ambariWeburl)[
             0]['name']
         ranger_config = {
             "ranger_url": Address,
             "hadoop_repo": hadoop_repo,
             "hive_repo": hive_repo
         }
         return ranger_config
     else:
         return None
Esempio n. 9
0
 def Xa_getUserSyncLogs(cls, logoutput=True):
     try:
         from beaver.component.xa import Xa
         return Xa.getUserSyncLogs(logoutput)
     except Exception:
         if logoutput:
             logger.error(
                 "Exception occured during Xa_getUserSyncLogs() call")
             logger.error(traceback.format_exc())
         return None
Esempio n. 10
0
 def deleteRangerPolicyBasedOndDatabase(cls,
                                        serviceType,
                                        db,
                                        serviceName=None,
                                        weburl=None,
                                        deleteOnlyDenyPolicies=False):
     if serviceName is None:
         repos = Xa.findRepositories(nameRegex="^.*_" + serviceType + "$",
                                     type=serviceType,
                                     status=True,
                                     ambariWeburl=weburl)
         serviceName = repos[0]['name']
     policies_to_delete = Xa.getPoliciesForResources(serviceType,
                                                     serviceName,
                                                     database=db,
                                                     ambariWeburl=weburl)
     if policies_to_delete is not None:
         for policy in policies_to_delete["policies"]:
             if deleteOnlyDenyPolicies == True:
                 if primaryCluster + "_beacon deny policy for " + db == policy[
                         "name"]:
                     Xa.deletePolicy_by_id_api_v2(policy["id"],
                                                  weburl=weburl)
                     break
             else:
                 Xa.deletePolicy_by_id_api_v2(policy["id"], weburl=weburl)
     #waiting for policy refresh after policies deletion
     time.sleep(30)
Esempio n. 11
0
 def verify_Policy_Exists_after_replication(
         cls,
         servicetype,
         verify_from_cluster=source_weburl,
         custer_to_verify=target_weburl,
         database=None,
         path=None,
         NoPolicyInTarget=False,
         expectedDesc="created by beacon while importing from " +
     primaryCluster,
         preDenyPolicyStr=primaryCluster + "_beacon deny policy for "):
     if Xa.isArgusInstalled() is True:
         serviceName = "hadoop" if servicetype == "hdfs" else servicetype
         serviceNameOfverify_from_cluster = \
             Xa.findRepositories(nameRegex="^.*_" + serviceName + "$", type=servicetype, status=True,
                                 ambariWeburl=verify_from_cluster)[0]['name']
         serviceNameOfverify_to_cluster = \
             Xa.findRepositories(nameRegex="^.*_" + serviceName + "$", type=servicetype, status=True,
                                 ambariWeburl=custer_to_verify)[0]['name']
         logger.info("verifying if policy exist in target cluster")
         policies_in_source_Cluster = Xa.getPoliciesForResources(
             servicetype,
             serviceName=serviceNameOfverify_from_cluster,
             ambariWeburl=verify_from_cluster,
             database=database,
             path=path)
         policies_in_target_Cluster = Xa.getPoliciesForResources(
             servicetype,
             serviceName=serviceNameOfverify_to_cluster,
             ambariWeburl=custer_to_verify,
             database=database,
             path=path)
         if NoPolicyInTarget == False:
             assert len(policies_in_target_Cluster
                        ) != 0, "make sure policies were imported"
             BeaconRanger.setIdOfAllPolicyToZero(
                 policies_in_source_Cluster, policies_in_target_Cluster,
                 expectedDesc)
             logger.info("set of policies in target cluster: " +
                         str(policies_in_target_Cluster["policies"]))
             for policy in policies_in_source_Cluster["policies"]:
                 logger.info("policy is " + str(policy))
                 assert policy in policies_in_target_Cluster["policies"]
             logger.info(
                 "all policies are verified!! now will check for deny policy if it is true"
             )
         isDenyPolicyTrue = Ambari.getConfig(
             'beacon-security-site',
             webURL=source_weburl)['beacon.ranger.plugin.create.denypolicy']
         all_policies_in_target_Cluster = Xa.getPolicy_api_v2(
             servicetype, weburl=target_weburl)
         if isDenyPolicyTrue == 'true':
             dataset = path if servicetype == "hdfs" else database
             BeaconRanger.denyPolicyValidation(
                 servicetype, dataset, all_policies_in_target_Cluster,
                 preDenyPolicyStr)
         else:
             assert len(policies_in_target_Cluster) == len(
                 policies_in_source_Cluster)
Esempio n. 12
0
    def getComponnetsToTest(cls, compFile, depFile):
        '''
        Get the components that are being tested according to depFile
        '''
        # read in the config file
        conf = RuSetup.readJson(compFile)
        isStandalone = conf[RuSetup.CONF_STANDALONE]
        RuSetup._skipQueue = set(conf[RuSetup.CONF_SKIP_QUEUE])
        RuSetup._defaultQueue = conf[RuSetup.CONF_DEFAULT_QUEUE]
        returnSet = None
        if isStandalone:
            # get the components to test
            returnSet = set(conf[RuSetup.CONF_COMPONENTS_TEST])
        else:
            returnSet = set(RuSetup.getComponentsAffected(compFile, depFile))

        # skip tests according to cluster settings
        if not HDFS.isHAEnabled():
            logger.info("Skip HDFS since HA is not enabled")
            returnSet.discard("hdfs")

        # as discussed in Ru standup for 11/13, enabling storm-slider for non HA cluster and storm standalone for HA cluster
        if YARN.isHAEnabled():
            returnSet.discard("storm-slider")
        else:
            returnSet.discard("storm")

        if Hadoop.isEncrypted():
            returnSet.discard("knox")
            returnSet.discard("falcon")

        if Hadoop.isTez():
            logger.info("Add tez since Hadoop.isTez()")
            returnSet.add("tez")
        else:
            logger.info(
                "Make sure tez is not in the list since Hadoop.isTez() is false"
            )
            returnSet.discard("tez")
        # Note: component.xa is always available, even if xa is not installed
        # So this line should work even if the cluster does not have xa installed
        from beaver.component.xa import Xa
        if Xa.isArgusInstalled():
            logger.info("Add argus since argus is there")
            returnSet.add("argus")
        else:
            logger.info(
                "Make sure argus is not in the list since it's not available")
            returnSet.discard("argus")

        return list(returnSet)
Esempio n. 13
0
 def background_job_setup(cls,
                          components,
                          runSmokeTestSetup=True,
                          config=None):
     from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
     UpgradePerNode.reportProgress(
         '#### call for background-job setup for argus admin started ####')
     if runSmokeTestSetup:
         # componentList = ["hive","hbase","hdfs","knox","storm"]
         componentList = []
         if Xa.isHdfsInstalled():
             componentList.append('hdfs')
         if Xa.isHiveInstalled():
             componentList.append('hive')
         if Xa.isHBaseInstalled():
             componentList.append('hbase')
         if Xa.isKnoxInstalled():
             componentList.append('knox')
         if Xa.isStormInstalled():
             componentList.append('storm')
         cls.smoke_test_setup(componentList)
     UpgradePerNode.reportProgress(
         '#### call for background-job setup for argus admin done ####')
Esempio n. 14
0
    def downgrade_master(cls, latestVersion, config, currVersion):
        from beaver.component.xa import Xa
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            '#### ranger-usersync: downgrading to old version.. ####')
        # 'knox.crt'
        # 'cacertswithknox'
        user = Machine.getAdminUser()
        xa_usersync_stop_cmd = 'sudo service ranger-usersync stop'
        xa_usersync_start_cmd = 'sudo service ranger-usersync start'

        node = Xa.getPolicyAdminHost()

        UpgradePerNode.reportProgress(
            '#### ranger-usersync: stopping with command %s ####' %
            xa_usersync_stop_cmd)
        exit_code, stdout = Machine.runas(user,
                                          xa_usersync_stop_cmd,
                                          host=node,
                                          logoutput=True)
        logger.info('****************** xa usersync stop exit_code = ' +
                    str(exit_code))

        from beaver.component.rollingupgrade.ruCommon import hdpSelect
        hdpSelect.changeVersion("ranger-usersync", latestVersion, node)
        logger.info(
            '*************************** hdp-select to new version done = ')

        UpgradePerNode.reportProgress(
            '#### ranger-usersync: starting with command %s ####' %
            xa_usersync_start_cmd)
        exit_code, stdout = Machine.runas(user,
                                          xa_usersync_start_cmd,
                                          host=node,
                                          logoutput=True)
        logger.info('****************** xa usersync start exit_code = ' +
                    str(exit_code))
        logger.info('****************** xa usersync start stdout = ' +
                    str(stdout))

        if exit_code == 0 and 'UnixAuthenticationService has started successfully.' in stdout:
            UpgradePerNode.reportProgress(
                '#### ranger-usersync: downgrade successful ####')
        else:
            UpgradePerNode.reportProgress(
                '#### ranger-usersync: downgrade failed! startup error_code=%d ####'
                % exit_code)
Esempio n. 15
0
    def setup_phoenix(self):
        from beaver.component.hadoop import Hadoop
        PHOENIX_TEST_TABLE = 'basicTable'
        try:
            logger.info("### Phoenix setup starting ###")
            from beaver.component.hbase import HBase
            from beaver.component.phoenix import Phoenix
            from beaver.component.xa import Xa

            # servers = HBase.getNumOfRegionServers()
            # if servers == -1:
            #     time.sleep(10)
            #     servers = HBase.getNumOfRegionServers()
            #
            # if servers == -1 or servers['running'] <= 0:
            #     #We restart all the regionServers
            #     HBase.startRegionServers(nodes=HBase.getRegionServers())

            if Hadoop.isSecure() and not Xa.isArgus():
                Phoenix.grantPermissionsToSystemTables()
            #We create a table to read through the upgrade process
            primaryKey = {'name': 'ID', 'type': 'BIGINT'}
            columns = [{
                'name': 'FirstName',
                'type': 'VARCHAR(30)'
            }, {
                'name': 'SecondName',
                'type': 'VARCHAR(30)'
            }, {
                'name': 'City',
                'type': 'VARCHAR(30)'
            }]
            exit_code, stdout = Phoenix.createTable(PHOENIX_TEST_TABLE,
                                                    primaryKey, columns)
            env = {}
            env['JAVA_HOME'] = Config.get('machine', 'JAVA_HOME')
            #We insert 10 rows into the table
            for i in range(10):
                Phoenix.runSQLLineCmds(
                    "UPSERT INTO %s VALUES (%s,'Name_%s','Surname_%s','City_%s')"
                    % (PHOENIX_TEST_TABLE, str(i), str(i), str(i), str(i)),
                    env)
        except Exception as phoenix_exception:
            logger.info("###   Phoenix setup failed ###")
            logger.info('Caused by: ' + str(phoenix_exception))
            pass
Esempio n. 16
0
    def hdp_downgrade(cls,
                      components,
                      currVersion,
                      latestVersion,
                      doTeardown=True):
        '''
        Downgrade HDP Stack With Per Node Method
        :param components: Components to be downgraded
        :param currVersion: Current version (Version V1)
        :param latestVersion: Version to be downgraded to (Version V0)
        '''
        UpgradePerNode.reportProgress(
            "###  Starting downgrade from %s to %s for components=%s ####" %
            (currVersion, latestVersion, components))
        DN = HDFS.getDatanodes()
        core_components = UpgradePerNode.find_existing_core_components(
            components)

        # Run setup for background Jobs for all components
        Rollingupgrade.background_job_setup(components, config=None)

        # Starts Long running background Jobs for all components
        numBackgroundJobs = Rollingupgrade.run_longRunning_Application(
            components, config=None)
        logger.info(
            "Total number of long running background jobs before starting upgrade is %s"
            % numBackgroundJobs)
        UpgradePerNode.reportProgress(
            "###  Just started %s background jobs  ###" % numBackgroundJobs)

        #### TODO - downgrade Hue and run Hue smoke test ####
        UpgradePerNode.reportProgress(
            "###  Starting downgrade of non-core components outside the cluster  ###"
        )
        ## Example : ##
        ## if "hue" in components:
        ##    Rollingupgrade.downgrade_master_and_smoketest(["hue"], latestVersion, config=None)
        ##    Rollingupgrade.downgrade_slave_and_smoketest(["hue"], latestVersion, node)

        #### TODO - downgrade storm and run smoke test ####

        ## Example : ##
        ## if "storm" in components:
        ##    Rollingupgrade.downgrade_master_and_smoketest(["storm"], latestVersion, config=None)
        ##    Rollingupgrade.downgrade_slave_and_smoketest(["storm"], latestVersion, node)

        #### TODO - downgrade Kafka and run smoke test ####

        ## Example : ##
        ## if "kafka" in components:
        ##    Rollingupgrade.downgrade_master_and_smoketest(["kafka"], latestVersion, config=None)
        ##    Rollingupgrade.downgrade_slave_and_smoketest(["kafka"], latestVersion, node)

        #### downgrade Flume ####
        if "flume" in components:
            Rollingupgrade.downgrade_master_and_smoketest(["flume"],
                                                          latestVersion,
                                                          config=None)

        #### downgrade Knox and run smoke test ####
        if "knox" in components:
            Rollingupgrade.downgrade_master_and_smoketest(["knox"],
                                                          latestVersion,
                                                          config=None)
        UpgradePerNode.reportProgress(
            "###  Finished downgrade of non-core components outside the cluster  ###"
        )

        UpgradePerNode.reportProgress(
            "###  Starting downgrade of slider apps ###")
        ### TODO- downgrade slider client and non rolling upgrade of slider-apps ####
        ### TODO- Stop storm-slider app, hbase-slider app, accumulo-slider app
        ### TODO- downgrade storm-slider client
        ### TODO- resubmit storm-slider app, hbase-slider app, accumulo-slider app
        UpgradePerNode.reportProgress(
            "###  Finished downgrade of slider apps ###")

        # Downgrade Non core components
        UpgradePerNode.reportProgress(
            "###  Starting downgrade clients %s inside the cluster ###" %
            components)
        ### TODO - Downgrade CLIENTS ####
        Rollingupgrade.downgrade_client_insideCluster_and_smoketest(
            components, latestVersion, config=None)
        UpgradePerNode.reportProgress(
            "###  Finished downgrade of clients %s inside the cluster ###" %
            components)

        #### TODO - Downgrade phoenix and Run phoenix smoke test ####
        UpgradePerNode.reportProgress(
            "###  started downgrade of non-core cluster components  ###")

        ## Example : ##
        ## if "phoenix" in components:
        ##    Rollingupgrade.downgrade_master_and_smoketest(["phoenix"], latestVersion, config=None)
        ##    Rollingupgrade.downgrade_slave_and_smoketest(["phoenix"], latestVersion, node)

        #### downgrade falcon and run smoke test ####

        if "falcon" in components:
            Rollingupgrade.downgrade_master_and_smoketest(["falcon"],
                                                          latestVersion,
                                                          config=None)

        # #### - downgrade oozie and run smoke test ####
        if "oozie" in components:
            Rollingupgrade.downgrade_master_and_smoketest(["oozie"],
                                                          latestVersion,
                                                          config=None)

        #### Downgrade Pig and run pig smoke test ####
        if "pig" in components:
            Rollingupgrade.downgrade_master_and_smoketest(["pig"],
                                                          latestVersion,
                                                          config=None)

        if "hive" in components:
            Rollingupgrade.downgrade_master_and_smoketest(["hive"],
                                                          latestVersion,
                                                          config=None)
        UpgradePerNode.reportProgress(
            "###  Finished downgrade of non-core cluster components  ###")

        # Downgrade Slave services of core-components (Hdfs, Yarn, hbase)
        UpgradePerNode.reportProgress(
            "###  Starting downgrade of core %s slaves  ###" % core_components)
        i = 0
        #### TODO - Downgrade Datanode, Nodemanager, Regionserver  ####
        for node in DN:
            i += 1
            logger.info("**** Downgrading slave number " + str(i) + ": " +
                        node + " ****")
            Rollingupgrade.downgrade_slave_and_smoketest(
                core_components, latestVersion, node, None)
            #check if background function running
            runningJobs = YARN.getNumOfRunningJobs()
            logger.info("Long-running job ended too early; running jobs =" +
                        str(runningJobs))
            #assert runningJobs == numBackgroundJobs, 'Long-running job ended too early; running jobs = ' + str(runningJobs)
        UpgradePerNode.reportProgress(
            "###  Finished downgrade of %d core %s slaves  ###" %
            (i, core_components))

        # run smoke tests after downgrading
        Rollingupgrade.run_smokeTests(components, config=None)

        #### TODO - Downgrade Namenode, Resourcemanager, Hbase master ####
        UpgradePerNode.reportProgress(
            "###  Starting downgrade of core %s masters  ###" %
            core_components)
        Rollingupgrade.downgrade_master_and_smoketest(core_components,
                                                      latestVersion,
                                                      config=None)

        #### TODO - Run Validation after All Master and slave services are down ####
        Rollingupgrade.testAfterAllMasterSlavesUpgraded(components)

        ### TODO - Downgrade Zookeeper ####
        #Rollingupgrade.downgrade_master_and_smoketest(["zookeeeper"], latestVersion, config=None)
        UpgradePerNode.reportProgress(
            "###  Finished downgrade of core %s masters  ###" %
            core_components)

        #### IF XA is enabled, downgrade XA services ####
        from beaver.component.xa import Xa
        if "argus" in components and Xa.isArgusInstalled():
            logger.info(
                'XA is Enabled in the cluster, setting up and downgrading the same'
            )
            Rollingupgrade.downgrade_master_and_smoketest(['argus'],
                                                          latestVersion,
                                                          config=None,
                                                          currVersion=None)

#### TODO - Run all component Smoke tests ####
        Rollingupgrade.run_smokeTests(components, config=None)

        #TODO - this is common code with upgrade - move it to a function.   - but the slider part is differnt in downgrade; shouldn't be ---
        ### Need to stop HDFS Falcon,Yarn long runningJobs ####
        # create flagFile to kill HDFS background job
        ### Need to stop HDFS Falcon,Yarn long runningJobs ####
        TEST_USER = Config.get('hadoop', 'HADOOPQA_USER')
        createCmd = "dfs -touchz " + UpgradePerNode._HDFS_FLAG_FILE
        exit_code, output = HDFS.runas(TEST_USER, createCmd)

        ruYARN.stopYarnLongRunningJob()
        if "falcon" in components:
            from beaver.component.rollingupgrade.ruFalcon import ruFalcon
            ruFalcon.stopFalconLongRunningJob()
        if "storm-slider" in components:
            from beaver.component.rollingupgrade.ruStorm import ruStorm
            ruStorm.teardown_storm_slider_app()

        ## TODO - wait for long running jobs to finish
        isZero = YARN.waitForZeroRunningApps()
        ## Temporarily uncommenting to tune test
        #assert isZero, "all long running jobs are not finished"

        ## TODO - Validate long running jobs
        Rollingupgrade.verifyLongRunningJob(components)

        ## TODO - call Teardown for long running jobs
        Rollingupgrade.background_job_teardown(components, None)

        ## Finalize State
        Rollingupgrade.ru_finalize_state(components)
        UpgradePerNode.reportProgress(
            "###  Completed downgrade from %s to %s for components=%s ####" %
            (currVersion, latestVersion, components))
Esempio n. 17
0
    def hdp_upgrade_rollback(cls,
                             components,
                             currVersion,
                             latestVersion,
                             doTeardown=True,
                             finalize=True):
        '''
        Test rollback after a partial upgrade. Note today only HDFS/HBase need state rollback.
        But we do partial upgrage of core without background tests and then rollback

        Steps
        0) Create some state (e.g. file) that we will delete after upgrade
        1) Prepare and save component states.
        4) Partially Upgrade core components
          For each service, does:
          4a) Upgrade Masters, Upgrade 1 slave
          4b) Run smoke tests for all components.
        5) Delete state created in step 0
        6) Create new state
        7) rollback state
        7b) Run smoke tests
        7c) Validate that state create in 0 still exists but state create in step 6 does not

        :param components: list of Components to upgrade (Can only be HDFS, HBASE)
        :param currVersion: Current Version
        :param latestVersion: Version to be upgraded to
        :param doTeardown: Only Cleanup when required
        '''
        cls.reportProgress(
            "###  Starting upgrade from %s to %s for components=%s ####" %
            (currVersion, latestVersion, components))
        DN = HDFS.getDatanodes()

        # Find core components (HDFS, YARN, HBase) if exist.
        core_components = cls.find_existing_core_components(components)

        #TODO if there are any non-core components then print an error since they are not going to be upgraded.

        #TODO create some state (e.g. files) whose existence will be checked after the rollback
        if "hdfs" in components:
            ruHDFS.createState4Rollback1()
        #if "hdfs" in components:
        #    hbase.createState4Rollback1()
        if "yarn" in components:
            logger.info("Rollback doesn't make sense for YRAN")

        #Prepare and save state before upgrade
        Rollingupgrade.ru_prepare_save_state_for_upgrade(components)

        #upgrade the components in Hierchacy
        cls.reportProgress("###  Starting upgrade of core %s masters  ###" %
                           core_components)
        #### IF XA is enabled, upgrade XA services ####
        from beaver.component.xa import Xa
        cls.reportProgress(
            "******************************* checking for argus to be installed *******************************"
        )
        if "argus" in components and Xa.isArgusInstalled():
            logger.info(
                '**************************************************** XA is Enabled in the cluster, setting up and upgrading the same ****************************************************'
            )
            Rollingupgrade.upgrade_master_and_smoketest(
                ['argus'], latestVersion, config=None, currVersion=currVersion)

        if "zookeeper" in components:
            Rollingupgrade.upgrade_master_and_smoketest(["zookeeper"],
                                                        latestVersion,
                                                        config=None)

        # Upgrade Master services -
        # Namenode, Secondarynamenode, Resourcemanager, Application Timelineserver,
        # JobHistoryserver and HbaseMaster with new version

        AfterUpgradeBackGroundJobs = Rollingupgrade.upgrade_master_and_smoketest(
            core_components, latestVersion, config=None)
        cls.reportProgress("###  Finished upgrade of core %s masters  ###" %
                           core_components)

        # upgrade 1 slave service - Datanodes, Nodemanagers and Regionservers with new version
        cls.reportProgress("###  Starting upgrade of core %s slaves  ###" %
                           core_components)

        logger.info("**** Upgrading first  slave:" + DN[0] + " ****")
        Rollingupgrade.upgrade_slave_and_smoketest(core_components,
                                                   latestVersion, DN[0], None,
                                                   False)

        cls.reportProgress("###  Finished upgrade of 1 core %s slave  ###" %
                           (core_components))
        #### Run all component Smoke tests ####
        Rollingupgrade.run_smokeTests(components, config=None)

        #  Run Tests to verify components accessibility
        Rollingupgrade.testAfterAllMasterSlavesUpgraded(components)

        #### Run all component Smoke tests ####
        Rollingupgrade.run_smokeTests(components, config=None)

        # TODO - delete some state that was created befoire the prepare-save state
        # TODO - create some new state
        if "hdfs" in components:
            ruHDFS.createState4Rollback2()
        if "hbase" in components:
            from beaver.component.rollingupgrade.ruHbase import ruHbase
            ruHbase.createState4Rollback2()
        if "yarn" in components:
            logger.info("Rollback doesn't make sense for YRAN")

        #################### Now do the rollback ########################
        cls.reportProgress(
            "###  Starting  rollback from %s to %s for components=%s ####" %
            (latestVersion, currVersion, components))

        logger.info("**** Downgrading slave number 0 : " + DN[0] + " ****")
        Rollingupgrade.downgrade_slave_and_smoketest(core_components,
                                                     currVersion, DN[0], None)

        #### Downgrade Namenode, Resourcemanager, Hbase master ####
        cls.reportProgress("###  Starting downgrade of core %s masters  ###" %
                           core_components)
        Rollingupgrade.downgrade_master_and_smoketest(core_components,
                                                      currVersion,
                                                      config=None)

        ## rollback state TODO the rollback function does not exist yet.
        #Rollingupgrade.ru_rollback_state(components)
        if "hdfs" in components:
            ruHDFS.ru_rollback_state()
        #if "hbase" in components:
        #    hbase.ru_rollback_state()
        if "yarn" in components:
            logger.info("Rollback doesn't make sense for YRAN")

        # TODO now check that the deleted state exists and the newly create state does not
        if "hdfs" in components:
            ruHDFS.checkState4Rollback()
        if "hbase" in components:
            from beaver.component.rollingupgrade.ruHbase import ruHbase
            ruHbase.checkState4Rollback()
        if "yarn" in components:
            logger.info("Rollback doesn't make sense for YRAN")

        cls.reportProgress(
            "###  Completed rollback from %s to %s for components=%s ####" %
            (currVersion, latestVersion, components))
Esempio n. 18
0
def generateTestReportConf(infile, outfile, results, startTime, endTime):
    config = ConfigParser()
    config.optionxform = str
    config.read(infile)

    if config.has_section(SECTION):
        # set the version to 2.0 so new keys are processed
        config.set(SECTION, 'report_version', '2.0')
        # Stores the original component value, some testsuite runs like HiveServer2Concurr, Sqoop
        # change this for reporting, but we need to preserve for log archiving for uniqueness
        config.set(SECTION, "TESTSUITE_COMPONENT", config.get(SECTION, "COMPONENT"))
        for option, value in config.items(SECTION):
            try:
                if ((option != "SECURE" and value != "")
                        or (Config.getEnv("HDP_STACK_INSTALLED").lower() == "false" and value != "")):
                    continue
                elif option == "BUILD_ID":
                    # if REPO_URL is not set, set the BUILD_ID to 0
                    # otherwise get the BUILD_ID from the file
                    if config.get(SECTION, "REPO_URL") == "" or not config.has_option(SECTION, "REPO_URL"):
                        config.set(SECTION, option, 0)
                    else:
                        config.set(SECTION, option, getBuildId(config.get(SECTION, "REPO_URL")))
                elif option == "HOSTNAME":
                    config.set(SECTION, option, socket.getfqdn())
                elif option == "COMPONENT_VERSION":
                    if not config.has_option(SECTION, "COMPONENT") or config.get(SECTION, "COMPONENT") == "":
                        config.set(SECTION, "COMPONENT", "Hadoop")
                    if "ambarieu-hdf" in config.get(SECTION, "COMPONENT"):
                        config.set(SECTION, option, getComponentVersion(config.get(SECTION, "COMPONENT")))
                    elif "ambari" in config.get(SECTION, "COMPONENT"):
                        config.set(SECTION, option, getComponentVersion("Ambari"))
                    else:
                        config.set(SECTION, option, getComponentVersion(config.get(SECTION, "COMPONENT")))
                elif option == "OS":
                    if Machine.isWindows():
                        cmd = 'powershell (Get-WmiObject -class Win32_OperatingSystem).Caption'
                        _exit_code, stdout = Machine.runasDeprecated(
                            user=Machine.getAdminUser(), cmd=cmd, passwd=Machine.getAdminPasswd()
                        )
                        config.set(SECTION, option, stdout)
                        continue

                    osname = platform.dist()[0]
                    # hack to check for oracle os as there
                    # is no diff for python
                    if os.path.exists('/etc/oracle-release'):
                        osname = 'oracle'
                    ver = platform.dist()[1]
                    # Need a hack for SLES as python cannot determine 11.1 vs 11.3
                    if osname.lower() == 'suse':
                        # read the file /etc/SuSE-release and determine the patch version.
                        f = open('/etc/SuSE-release', 'r')
                        txt = f.read()
                        f.close()
                        # get the patch level. For example
                        # PATCHLEVEL = 3
                        m = re.search('PATCHLEVEL = (.*)', txt, re.MULTILINE)
                        # if you find a match append to the version string
                        if m and m.group(1):
                            ver = '%s.%s' % (ver, m.group(1))

                    arch = platform.architecture()[0]

                    if os.path.exists('/etc/os-release'):
                        try:
                            f = open('/etc/os-release', 'r')
                            txt = f.read()
                            f.close()
                            m = re.search('NAME="(.*)"', txt, re.MULTILINE)
                            if m and m.group(1):
                                if m.group(1) == "Amazon Linux":
                                    osname = "amazonlinux"
                                    m = re.search('VERSION="(.*)"', txt, re.MULTILINE)
                                    if m and m.group(1):
                                        ver = m.group(1)
                                        if "2 (2017.12)" in ver:
                                            ver = "2"
                                    # the amzn ami which qe team is using is of 64 bit
                                    arch = "64bit"
                        except Exception:
                            logger.error(traceback.format_exc())

                    config.set(SECTION, option, '%s-%s-%s' % (osname, ver, arch))
                elif option == "HDP_STACK":
                    if "ambari" in config.get(SECTION, "COMPONENT"):
                        from beaver.component.ambari import Ambari
                        hdpVersion = Ambari.getHDPVersion()
                        if hdpVersion and hdpVersion[0] in ('1', '2'):
                            config.set(SECTION, option, "h" + hdpVersion[0])
                        else:
                            config.set(SECTION, option, 'h2')
                    else:
                        hadoopVersion = getComponentVersion("Hadoop")
                        if hadoopVersion and hadoopVersion[0] in ('1', '2'):
                            config.set(SECTION, option, "h" + hadoopVersion[0])
                elif option == "TDE":
                    from beaver.component.hadoop import HDFS2
                    if HDFS2.isKMSEnabled():
                        config.set(SECTION, option, "on")
                    else:
                        config.set(SECTION, option, "off")

                elif option == "SECURE":
                    if "ambari" in config.get(SECTION, "COMPONENT"):
                        from beaver.component.ambari import Ambari

                        config.set(SECTION, option, str(Ambari.isSecure()).lower())
                        secure_str = str(Ambari.isSecure()).lower()
                    else:
                        from beaver.component.hadoop import Hadoop
                        secure_str = str(Hadoop.isSecure()).lower()
                        if config.get(SECTION, "COMPONENT") == "HiveServer2Concurr":
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            if Hadoop.isSecure():
                                config.set(SECTION, "hs2_authentication", "Kerberos")
                            else:
                                config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrHTTP":
                            if Hadoop.isEncrypted():
                                secure_str += "-http-en"
                            else:
                                secure_str += "-http"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            if Hadoop.isSecure():
                                config.set(SECTION, "hs2_authentication", "Kerberos")
                            else:
                                config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "HTTP")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrLDAP":
                            if Hadoop.isEncrypted():
                                secure_str += "-ldap-en"
                            else:
                                secure_str += "-ldap"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "LDAP")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrLDAPHTTP":
                            if Hadoop.isEncrypted():
                                secure_str += "-ldap-http-en"
                            else:
                                secure_str += "-ldap-http"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "LDAP")
                            config.set(SECTION, "hs2_transport", "HTTP")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrSSL":
                            if Hadoop.isEncrypted():
                                secure_str += "-ssl-en"
                            else:
                                secure_str += "-ssl"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "true")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrSSLHTTP":
                            if Hadoop.isEncrypted():
                                secure_str += "-ssl-http-en"
                            else:
                                secure_str += "-ssl-http"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "HTTP")
                            config.set(SECTION, "hs2_ssl", "true")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrTPUser":
                            if Hadoop.isEncrypted():
                                secure_str += "-tpuser-en"
                            else:
                                secure_str += "-tpuser"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            config.set(SECTION, "hs2_authentication", "Kerberos")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "true")
                        elif config.get(SECTION, "COMPONENT") == "HiveServer2ConcurrLongRunning":
                            if Hadoop.isEncrypted():
                                secure_str += "-longrun-en"
                            else:
                                secure_str += "-longrun"
                            config.set(SECTION, "hs2_authorization", "SQL Standard")
                            if Hadoop.isSecure():
                                config.set(SECTION, "hs2_authentication", "Kerberos")
                            else:
                                config.set(SECTION, "hs2_authentication", "Unsecure")
                            config.set(SECTION, "hs2_transport", "Binary")
                            config.set(SECTION, "hs2_ssl", "false")
                            config.set(SECTION, "hs2_trusted_proxy", "false")
                        elif config.get(SECTION, "COMPONENT") == "SqoopDb2":
                            config.set(SECTION, "COMPONENT", "Sqoop")
                        else:
                            if Hadoop.isEncrypted():
                                secure_str += '-en'
                        config.set(SECTION, option, secure_str)
                elif option == "BLOB":
                    pass
                elif option == "RAN":
                    # dont add skipped, just pass + fail + aborted
                    config.set(SECTION, option, results[0] + len(results[1]) + results[3])
                elif option == "PASS":
                    config.set(SECTION, option, results[0])
                elif option == "FAIL":
                    config.set(SECTION, option, len(results[1]))
                elif option == "SKIPPED":
                    config.set(SECTION, option, results[2])
                elif option == "ABORTED":
                    config.set(SECTION, option, results[3])
                elif option == "FAILED_TESTS":
                    failedTests = ",".join(results[1])
                    failureSummary = ReportHelper.getFailureSummary(failedTests)
                    config.set(SECTION, "FAILURE_SUMMARY", failureSummary)
                    tmpFailedTests = ReportHelper.getGroupedFailedTests(failedTests)
                    config.set(SECTION, option, ReportHelper.getMergedFailedTests(tmpFailedTests, failureSummary))
                elif option == "NUM_OF_DATANODES":
                    if "ambari" in config.get(SECTION, "COMPONENT"):
                        config.set(SECTION, option, "N/A")
                    else:
                        from beaver.component.hadoop import HDFS

                        config.set(SECTION, option, HDFS.getDatanodeCount())
                elif option == "BUILD_URL":
                    if 'BUILD_URL' in os.environ:
                        config.set(SECTION, option, os.environ['BUILD_URL'])
                elif option == "HDP_RELEASE":
                    # If RU/RB, we must override HDP_RELEASE
                    #   (we can't fix this with product front. Discussed in BUG-31369.)
                    if config.get(SECTION, "TESTSUITE_COMPONENT").lower() in ["rollingupgrade", "rollback",
                                                                              "rollingupgrade-ha", "rollback-ha"]:
                        config.set(SECTION, option, "dal")
                    else:
                        config.set(SECTION, option, getRepoId(config.get(SECTION, "REPO_URL")))
                elif option == "JDK":
                    config.set(SECTION, option, Machine.getJDK())
                elif option == "DB":
                    if not config.has_option(SECTION, "COMPONENT") or config.get(SECTION, "COMPONENT") == "":
                        config.set(SECTION, "COMPONENT", "Hadoop")
                    config.set(SECTION, option, getDatabaseFlavor(config.get(SECTION, "COMPONENT")))
            except Exception as error:
                logger.error("ERROR processing option: %s", option)
                logger.error("Exception: %s", error)
        # make sure Hadoop is installed before append Tez to the component name
        if Config.getEnv("HDP_STACK_INSTALLED").lower() == "true" and config.has_option(SECTION, "COMPONENT"):
            if "ambari" in config.get(SECTION, "COMPONENT"):
                kerberos_server_type = 'n/a'
                from beaver.component.ambari import Ambari
                if Ambari.isSecure():
                    kerberos_server_type = 'mit'
                config.set(SECTION, 'kerberos_server_type', kerberos_server_type)
            else:
                from beaver.component.hadoop import Hadoop, HDFS
                from beaver.component.slider import Slider

                # set execution_framework. New columns for dashboard v2
                # TODO: This needs to be improved to be component specific.
                if Hadoop.isTez():
                    if Slider.isInstalled():
                        config.set(SECTION, 'execution_framework', 'tez-slider')
                    else:
                        config.set(SECTION, 'execution_framework', 'tez')
                else:
                    if Slider.isInstalled():
                        config.set(SECTION, 'execution_framework', 'mr-slider')
                    else:
                        config.set(SECTION, 'execution_framework', 'mr')
                # set wire_encryption
                # TODO: This needs to be improved to be component specific.
                if Hadoop.isEncrypted():
                    config.set(SECTION, 'wire_encryption', 'true')
                else:
                    config.set(SECTION, 'wire_encryption', 'false')
                # set kerberos_server_type
                kerberos_server_type = 'n/a'
                if Hadoop.isSecure():
                    kerberos_server_type = 'mit'
                    # add a check for AD
                    if Machine.isLinux():
                        gateway = Config.get("machine", "GATEWAY")
                        Machine.copyToLocal(Machine.getAdminUser(), gateway, '/etc/krb5.conf', '/tmp/krb5.conf')
                        f = open('/tmp/krb5.conf', 'r')
                        txt = f.read()
                        f.close()
                        #Finding all the admin_server in the krb5.conf with ports, if any
                        p = re.compile('admin_server = ((?!FILE).*)')
                        admin_server_list_with_ports = p.findall(txt)
                        admin_server_list = []
                        for admin_server_with_port in admin_server_list_with_ports:
                            admin_server_list.append(admin_server_with_port.split(':')[0])
                        #If len is greater than 1, first checking if one of the admin server is AD host,
                        #  than to ensure that not all the hosts are AD hosts, checking if one of the admin
                        #  server is not in AD Hosts Lists.
                        if len(admin_server_list) > 1:
                            for ad_host in AD_HOSTS_LIST:
                                if ad_host in admin_server_list:
                                    for admin_server in admin_server_list:
                                        if admin_server not in AD_HOSTS_LIST:
                                            kerberos_server_type = 'ad+mit'
                                            break
                        else:
                            for ad_host in AD_HOSTS_LIST:
                                if ad_host in admin_server_list:
                                    kerberos_server_type = 'ad'
                                    break
                config.set(SECTION, 'kerberos_server_type', kerberos_server_type)

                try:
                    from beaver.component.xa import Xa
                    # set argus. New column for dashboard v2
                    if Xa.isArgus():
                        config.set(SECTION, 'argus', 'true')
                    else:
                        config.set(SECTION, 'argus', 'false')
                except Exception as error:
                    logger.error("ERROR processing argus")
                    logger.error("Exception: %s", error)

                #set TDE
                if HDFS.isKMSEnabled():
                    config.set(SECTION, 'tde', 'true')
                else:
                    config.set(SECTION, 'tde', 'false')

        config.set(SECTION, 'START_TIME', startTime)
        config.set(SECTION, 'END_TIME', endTime)
        coverage_summary_file = os.path.join(Config.getEnv('ARTIFACTS_DIR'), "coverage_summary.json")
        if os.path.exists(coverage_summary_file):
            fp = open(coverage_summary_file, "r")
            json_str = "\n".join(fp.readlines())
            fp.close()
            coverage_summary = json.loads(json_str)
            for key, value in coverage_summary.items():
                config.set(SECTION, key, value)
        config.write(open(outfile, 'w'))

    elif config.has_section('SANDBOX'):
        out_config = ConfigParser()
        out_config.optionxform = str
        out_config.add_section(SECTION)

        sb_type = config.get('SANDBOX', 'vm_env')
        out_config.set(SECTION, 'BUILD_ID', '0')
        ova_uri = ''
        if sb_type == 'VBOX':
            ova_uri = config.get(sb_type, 'vbox_ova_uri')
        elif sb_type == 'FUSION':
            ova_uri = config.get(sb_type, 'fus_ova_uri')
        if sb_type == 'HYPERV':
            ova_uri = config.get(sb_type, 'hyperv_ova_uri')
        out_config.set(SECTION, 'REPO_URL', ova_uri)
        sb_host = ''
        if os.name != 'nt':
            sb_host = os.popen("hostname -f").read().strip()
            sb_host = sb_host + '(' + os.popen("ifconfig en0 | grep 'inet ' | awk -F ' ' '{print $2}'"
                                               ).read().strip() + ')'
        else:
            sb_host = 'Kiev local host'
        out_config.set(SECTION, 'HOSTNAME', sb_host)

        out_config.set(SECTION, 'HDP_STACK', "h" + (config.get('VERSIONS', 'hadoop_version')[0]))
        out_config.set(SECTION, 'COMPONENT', 'SANDBOX')
        out_config.set(SECTION, 'TESTSUITE_COMPONENT', 'SANDBOX')

        if sb_type == 'HYPERV':
            sb_ver = 'hyper-v'
        else:
            tmp = ['%20', 'Hortonworks', 'VirtualBox', '.ova', 'VMware', '_']
            sb_ver = ova_uri.split('/')[5]
            for rstr in tmp:
                sb_ver = sb_ver.replace(rstr, '')

        out_config.set(SECTION, 'COMPONENT_VERSION', sb_ver)
        out_config.set(SECTION, 'CHECKSUM', 'N/A')

        ver_num = os.popen("sw_vers | grep 'ProductVersion:' | awk -F ' ' '{print $2}'").read().strip()
        if sb_type == 'HYPERV':
            out_config.set(SECTION, 'OS', 'Windows 8.1')
        else:
            out_config.set(SECTION, 'OS', 'MAC OS X ' + ver_num)
        out_config.set(SECTION, 'SECURE', 'false')
        out_config.set(SECTION, 'TYPE', 'System, UI')
        out_config.set(SECTION, 'BLOB', 'N/A')
        out_config.set(SECTION, 'PKG', 'OVA')
        if sb_type == 'VBOX':
            out_config.set(SECTION, 'INSTALLER', 'Oracle VirtualBox')
        elif sb_type == 'FUSION':
            out_config.set(SECTION, 'INSTALLER', 'VMWare Fusion')
        elif sb_type == 'HYPERV':
            out_config.set(SECTION, 'INSTALLER', 'Windows Hyper-V')
        out_config.set(SECTION, 'RAN', results[0] + len(results[1]) + results[3])
        out_config.set(SECTION, 'PASS', results[0])
        out_config.set(SECTION, 'FAIL', len(results[1]))
        out_config.set(SECTION, 'SKIPPED', results[2])
        out_config.set(SECTION, 'ABORTED', results[3])
        out_config.set(SECTION, 'FAILED_DEPENDENCY', 'N/A')
        out_config.set(SECTION, 'FAILED_TESTS', ",".join(results[1]))

        out_config.set(SECTION, 'NUM_OF_DATANODES', '1')
        out_config.set(SECTION, 'HDP_RELEASE', ova_uri.split('/')[4])
        out_config.set(SECTION, 'JDK', '1.6.0_51')
        out_config.set(SECTION, 'DB', 'N/A')
        out_config.set(SECTION, 'BROWSER', config.get('SANDBOX', 'test_browser'))

        out_config.write(open(outfile, 'w'))
Esempio n. 19
0
    def upgrade_argus_admin(cls, latestVersion, config, currVersion):
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress('#### install argus admin. ####')
        # 'knox.crt'
        # 'cacertswithknox'
        node = Xa.getPolicyAdminHost()
        user = Machine.getAdminUser()
        xa_admin_stop_cmd = 'sudo service ranger-admin stop'
        xa_admin_start_cmd = 'sudo service ranger-admin start'
        ranger_old_version = '/usr/hdp/' + currVersion + '/ranger-admin/'
        ranger_new_version = '/usr/hdp/' + latestVersion + '/ranger-admin/'

        localTestWorkDir1 = os.path.join(Config.getEnv('ARTIFACTS_DIR'))
        knox_cert_files = '{knox.crt,cacertswithknox}'
        source_files_to_copy = os.path.join(ranger_old_version,
                                            knox_cert_files)
        command_copy_knoxcerts = 'cp -f %s %s' % (source_files_to_copy,
                                                  localTestWorkDir1)
        exit_code, stdout = Machine.runas(user,
                                          command_copy_knoxcerts,
                                          host=node,
                                          logoutput=True)
        logger.info(
            '*************************** admin copy command_copy_knoxcerts exit_code = '
            + str(exit_code))
        logger.info(
            '*************************** admin copy command_copy_knoxcerts stdout = '
            + str(stdout))

        exit_code, stdout = Machine.runas(user,
                                          xa_admin_stop_cmd,
                                          host=node,
                                          logoutput=True)
        logger.info('****************** xa admin stop exit_code = ' +
                    str(exit_code))

        source_properties_file = os.path.join(ranger_old_version,
                                              'install.properties')
        destination_properties_file = os.path.join(ranger_new_version,
                                                   'install.properties')
        command = 'cp -f %s %s' % (source_properties_file,
                                   destination_properties_file)
        exit_code, stdout = Machine.runas(user,
                                          command,
                                          host=node,
                                          logoutput=True)
        logger.info(
            '*************************** admin copy command exit_code = ' +
            str(exit_code))
        logger.info(
            '*************************** admin copy command stdout = ' +
            str(stdout))

        command = '(cd %s && export JAVA_HOME=%s && ./setup.sh)' % (
            ranger_new_version, Machine.getJavaHome())
        UpgradePerNode.reportProgress(
            '#### ranger-admin: installing new version with command %s ###' %
            command)
        exit_code, stdout = Machine.runas(user,
                                          command,
                                          host=node,
                                          logoutput=True)

        if exit_code == 0 and 'Installation of XASecure PolicyManager Web Application is completed.' in stdout:
            UpgradePerNode.reportProgress(
                '#### ranger-admin: installation successful ###')

            from beaver.component.rollingupgrade.ruCommon import hdpSelect
            hdpSelect.changeVersion("ranger-admin", latestVersion, node)
            logger.info(
                '*************************** ranger-admin: hdp-select to new version done = '
            )

            source_files_to_copy = os.path.join(localTestWorkDir1,
                                                knox_cert_files)
            command_copy_knoxcerts = 'cp -f %s %s' % (source_files_to_copy,
                                                      ranger_new_version)
            exit_code, stdout = Machine.runas(user,
                                              command_copy_knoxcerts,
                                              host=node,
                                              logoutput=True)
            logger.info(
                '*************************** admin copy command_copy_knoxcerts back exit_code = '
                + str(exit_code))
            logger.info(
                '*************************** admin copy command_copy_knoxcerts back stdout = '
                + str(stdout))

            UpgradePerNode.reportProgress(
                '#### ranger-admin: starting new version with command %s ###' %
                xa_admin_start_cmd)
            exit_code, stdout = Machine.runas(user,
                                              xa_admin_start_cmd,
                                              host=node,
                                              logoutput=True)
            logger.info('****************** xa admin start exit_code = ' +
                        str(exit_code))
            logger.info('****************** xa admin start stdout = ' +
                        str(stdout))

            if exit_code == 0:
                UpgradePerNode.reportProgress(
                    '#### ranger-admin: new version started successfully ####')
            else:
                UpgradePerNode.reportProgress(
                    '#### ranger-admin: failed to start new version! exit_code=%d ####'
                    % exit_code)
        else:
            logger.info(
                '****************** setup.sh script failed for admin ******* ')
            UpgradePerNode.reportProgress(
                '#### ranger-admin: installation of new version failed! exit_code=%d ###'
                % exit_code)
Esempio n. 20
0
    def createRangerHivePolicy(cls,
                               database,
                               udf=None,
                               table=None,
                               column=None,
                               userList=None,
                               weburl=None):
        BeaconRanger.changePolicyInterval()
        logger.info('creating policy for  %s' % database)
        users = [Config.getEnv('USER')] if userList is None else userList
        if table is not None:
            polItem = XaPolicy.createPolicyItem(
                userList=users, PermissionList=XaPolicy.HIVE_ALL_PERMISSIONS)
            if column is not None:
                policy = XaPolicy.getPolicyJson(policy_prefix + '_' +
                                                database + '_table_' + table +
                                                '_col_' + column,
                                                'hive', [polItem],
                                                ambariWeburl=weburl,
                                                database=database,
                                                table=table,
                                                column=column)
            else:
                policy = XaPolicy.getPolicyJson(policy_prefix + '_' +
                                                database + '_table_' + table,
                                                'hive', [polItem],
                                                ambariWeburl=weburl,
                                                database=database,
                                                table=table,
                                                column='*')
        elif udf is not None:
            polItem = XaPolicy.createPolicyItem(
                userList=users, PermissionList=XaPolicy.HIVE_ALL_PERMISSIONS)
            policy = XaPolicy.getPolicyJson(policy_prefix + '_' + database +
                                            '_udf_' + udf,
                                            'hive', [polItem],
                                            ambariWeburl=weburl,
                                            database=database,
                                            udf=udf)
        else:
            # repladmin check
            polItem = XaPolicy.createPolicyItem(
                userList=users,
                PermissionList=[
                    XaPolicy.PERMISSION_CREATE, XaPolicy.PERMISSION_SELECT,
                    XaPolicy.PERMISSION_DROP, XaPolicy.PERMISSION_REPL_ADMIN
                ])
            policy = XaPolicy.getPolicyJson(policy_prefix + '_' + database,
                                            'hive', [polItem],
                                            ambariWeburl=weburl,
                                            database=database,
                                            table='*',
                                            column='*')
        logger.info('going to create policy: ' + str(policy))
        status_code, response = Xa.createPolicy_api_v2(
            policy,
            policyActivationWaitTime=BeaconRanger.policyActivationWaitTime,
            weburl=weburl)

        if status_code == 200 and weburl != target_weburl:
            BeaconRanger.policiesAddedBeforeTest.append(policy)
Esempio n. 21
0
    def upgrade_master(cls, latestVersion, config, currVersion):
        from beaver.component.xa import Xa
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress('#### install argus admin. ####')
        # 'knox.crt'
        # 'cacertswithknox'
        user = Machine.getAdminUser()
        xa_usersync_stop_cmd = 'sudo service ranger-usersync stop'
        xa_usersync_start_cmd = 'sudo service ranger-usersync start'
        ranger_old_version = '/usr/hdp/' + currVersion + '/ranger-usersync/'
        ranger_new_version = '/usr/hdp/' + latestVersion + '/ranger-usersync/'

        exit_code, stdout = Machine.runas(user,
                                          xa_usersync_stop_cmd,
                                          host=Xa.getPolicyAdminHost(),
                                          logoutput=True)
        logger.info('****************** xa usersync stop exit_code = ' +
                    str(exit_code))

        source_properties_file = os.path.join(ranger_old_version,
                                              'install.properties')
        destination_properties_file = os.path.join(ranger_new_version,
                                                   'install.properties')
        command = 'cp -f %s %s' % (source_properties_file,
                                   destination_properties_file)
        exit_code, stdout = Machine.runas(user,
                                          command,
                                          host=Xa.getPolicyAdminHost(),
                                          logoutput=True)
        logger.info(
            '*************************** usersync copy command exit_code = ' +
            str(exit_code))
        logger.info(
            '*************************** usersync copy command stdout = ' +
            str(stdout))

        command = '(cd %s && export JAVA_HOME=%s && ./setup.sh)' % (
            ranger_new_version, Machine.getJavaHome())
        UpgradePerNode.reportProgress(
            '#### ranger-usersync: installing new version with command %s ###'
            % command)
        exit_code, stdout = Machine.runas(user,
                                          command,
                                          host=Xa.getPolicyAdminHost(),
                                          logoutput=True)

        if exit_code == 0:
            UpgradePerNode.reportProgress(
                '#### ranger-usersync: installation successful ###')

            node = Xa.getPolicyAdminHost()
            from beaver.component.rollingupgrade.ruCommon import hdpSelect
            hdpSelect.changeVersion("ranger-usersync", latestVersion, node)
            logger.info(
                '*************************** hdp-select to new version done = '
            )

            UpgradePerNode.reportProgress(
                '#### ranger-usersync: starting new version with command %s ###'
                % xa_usersync_start_cmd)
            exit_code, stdout = Machine.runas(user,
                                              xa_usersync_start_cmd,
                                              host=node,
                                              logoutput=True)
            logger.info('****************** xa usersync start exit_code = ' +
                        str(exit_code))
            logger.info('****************** xa usersync start stdout = ' +
                        str(stdout))

            if exit_code == 0 and 'UnixAuthenticationService has started successfully.' in stdout:
                UpgradePerNode.reportProgress(
                    '#### ranger-usersync: new version started successfully ###'
                )
            else:
                UpgradePerNode.reportProgress(
                    '#### ranger-usersync: new version failed to start! exit_code=%d ###'
                    % exit_code)
        else:
            logger.info(
                '****************** setup.sh script failed for usersync ******* '
            )
            UpgradePerNode.reportProgress(
                '#### ranger-usersync: installation of new version failed! exit_code=%d ###'
                % exit_code)
Esempio n. 22
0
 def resetRanger(cls):
     policy = RANGER_KNOX_POLICY
     if not policy == None:
         Xa.deletePolicy(policy['id'])
         Knox.restartKnox()
Esempio n. 23
0
    def hdp_upgrade(cls,
                    components,
                    currVersion,
                    latestVersion,
                    doTeardown=True,
                    finalize=True):
        '''
        Upgrade HDP Stack With Per Node Method.

        Steps
        1) Prepare and save component states.
        2) Setup prerequisites for background jobs.
        3) Start long-running background jobs for all components.
        4) Upgrade core components from bottom to top.
          For each service, does:
          4a) Upgrade service.
          4b) Run smoke tests for all components.
          4c) Check number of all background jobs.
        5) After all components are upgraded, run another set of tests.
        6) Repeat same process for non-core components.
        7) Upgrade clients of components which were upgraded earlier.
        8) Upgrade client-only components.
        9) After all components are upgraded, run smoke tests.
        10) Stop long running jobs.
        11) Look for failed and kill jobs.
        12) Verify outputs of successful jobs.
        13) Finalize all states.

        :param components: list of Components to upgrade
        :param currVersion: Current Version
        :param latestVersion: Version to be upgraded to
        :param doTeardown: Only Cleanup when required
        '''
        cls.reportProgress(
            "###  Starting upgrade from %s to %s for components=%s ####" %
            (currVersion, latestVersion, components))
        DN = HDFS.getDatanodes()

        # Find core components (HDFS, YARN, HBase) if exist.
        core_components = cls.find_existing_core_components(components)

        #Prepare and save state before upgrade
        Rollingupgrade.ru_prepare_save_state_for_upgrade(components)

        # Run setup for background Jobs for all components
        Rollingupgrade.background_job_setup(components, config=None)

        # Starts Long running background Jobs for all components
        numBackgroundJobs = Rollingupgrade.run_longRunning_Application(
            components, config=None)
        logger.info(
            "Total number of long running background jobs before starting upgrade is %s"
            % numBackgroundJobs)
        cls.reportProgress("###  Just started %s background jobs  ###" %
                           numBackgroundJobs)

        #upgrade the components in Hierchacy
        cls.reportProgress("###  Starting upgrade of core %s masters  ###" %
                           core_components)
        #### IF XA is enabled, upgrade XA services ####
        from beaver.component.xa import Xa
        cls.reportProgress(
            "******************************* checking for argus to be installed *******************************"
        )
        if "argus" in components and Xa.isArgusInstalled():
            logger.info(
                '**************************************************** XA is Enabled in the cluster, setting up and upgrading the same ****************************************************'
            )
            Rollingupgrade.upgrade_master_and_smoketest(
                ['argus'], latestVersion, config=None, currVersion=currVersion)

        ##### TODO - upgrade ZOOKEEPER ########
        if "zookeeper" in components:
            Rollingupgrade.upgrade_master_and_smoketest(["zookeeper"],
                                                        latestVersion,
                                                        config=None)
        # Upgrade Master services - Namenode, Secondarynamenode, Resourcemanager, Application Timelineserver, JobHistoryserver and HbaseMaster with new version
        #### TODO - Application Timelineserver HbaseMaster ####
        AfterUpgradeBackGroundJobs = Rollingupgrade.upgrade_master_and_smoketest(
            core_components, latestVersion, config=None)
        cls.reportProgress("###  Finished upgrade of core %s masters  ###" %
                           core_components)
        numBackgroundJobs = numBackgroundJobs + AfterUpgradeBackGroundJobs
        logger.info(
            "Total number of long running background jobs after upgrading master services is %s"
            % numBackgroundJobs)

        # upgrade slave service - Datanodes, Nodemanagers and Regionservers with new version
        cls.reportProgress("###  Starting upgrade of core %s slaves  ###" %
                           core_components)
        i = 0
        #### TODO - upgrade Regionserver  ####
        for node in DN:
            i += 1
            logger.info("**** Upgrading slave number " + str(i) + ": " + node +
                        " ****")
            if i % 4 == 0:
                runSmoke = True
            else:
                runSmoke = False
            Rollingupgrade.upgrade_slave_and_smoketest(core_components,
                                                       latestVersion, node,
                                                       None, runSmoke)
            #check if background function running
            runningJobs = YARN.getNumOfRunningJobs()
            logger.info("Long-running job ended too early; running jobs =" +
                        str(runningJobs))
            #assert runningJobs == numBackgroundJobs, 'Long-running job ended too early; running jobs = ' + str(runningJobs)

        cls.reportProgress("###  Finished upgrade of %d core %s slaves  ###" %
                           (i, core_components))
        #### Run all component Smoke tests ####
        Rollingupgrade.run_smokeTests(components, config=None)

        #  Run Tests to verify components accessibility
        Rollingupgrade.testAfterAllMasterSlavesUpgraded(components)

        #### Starting upgrade non core components ####
        cls.reportProgress(
            "###  Starting upgrade of non-core cluster components  ###")
        if "hive" in components:
            Rollingupgrade.upgrade_master_and_smoketest(["hive"],
                                                        latestVersion,
                                                        config=None)

        #### TODO- upgrade pig to N+1 version ####

        #### TODO - Run pig smoke test ####
        #     ## Example : ##
        if "pig" in components:
            Rollingupgrade.upgrade_master_and_smoketest(["pig"],
                                                        latestVersion,
                                                        config=None)
        # ##    Rollingupgrade.upgrade_slave_and_smoketest(["pig"], latestVersion, node)

        # #### TODO - upgrade oozie server to N+1 version ####

        # #### - Run oozie smoke test ####
        if "oozie" in components:
            Rollingupgrade.upgrade_master_and_smoketest(["oozie"],
                                                        latestVersion,
                                                        config=None)

        #### upgrade falcon to N+1 version and run its smoke tests ####

        if "falcon" in components:
            Rollingupgrade.upgrade_master_and_smoketest(["falcon"],
                                                        latestVersion,
                                                        config=None)

        #### TODO - upgrade phoenix to N+1 version ####

        #### TODO - Run phoenix smoke test ####
        if "phoenix" in components:
            ruPhoenix.run_smoke_test(ruPhoenix._smokeTestNum)

        #### TODO - upgrade sqoop to N+1 version ####
        #### TODO - Run sqoop smoke test ####

        cls.reportProgress(
            "###  Finished upgrade of non-core cluster components  ###")

        ##For storm-slider we want toverify the topologies and kill the storm-slider app.
        if "storm-slider" in components:
            from beaver.component.rollingupgrade.ruStorm import ruStorm
            ruStorm.verify_and_stop_slider_app()

        #### TODO- upgrade clients for Argus, Zk, Hdfs, Yarn, MR, Tez, Hive, Pig, Hbase, Falcon, oozie, sqoop , phoenix, mahout ####
        cls.reportProgress(
            "###  Starting upgrade of clients %s inside the cluster ###" %
            components)
        Rollingupgrade.upgrade_client_insideCluster_and_smoketest(
            components, latestVersion, config=None)

        if "storm-slider" in components:
            from beaver.component.rollingupgrade.ruStorm import ruStorm
            ruStorm.start_slider_app_resubmit_topologies()
            time.sleep(120)  # Allow time for storm-slider topologies to run.

        cls.reportProgress("###  Starting upgrade of slider apps ###")
        ### TODO- upgrade slider client and non rolling upgrade of slider-apps ####
        ### TODO- Stop storm-slider app, hbase-slider app, accumulo-slider app
        ### TODO- Upgrade storm-slider client
        ### TODO- resubmit storm-slider app, hbase-slider app, accumulo-slider app
        cls.reportProgress("###  Finished upgrade of slider apps ###")

        #### Knox upgrade
        if "knox" in components:
            Rollingupgrade.upgrade_master_and_smoketest(["knox"],
                                                        latestVersion,
                                                        config=None)

        #### upgrade Flume to N+1 version ####
        if "flume" in components:
            Rollingupgrade.upgrade_master_and_smoketest(["flume"],
                                                        latestVersion,
                                                        config=None)

        #### TODO - upgrade Kafka to N+1 version ####

        #### TODO - Run Kafka smoke test ####

        ## Example : ##
        ## if "kafka" in components:
        ##    Rollingupgrade.upgrade_master_and_smoketest(["kafka"], latestVersion, config=None)
        ##    Rollingupgrade.upgrade_slave_and_smoketest(["kafka"], latestVersion, node)

        #### TODO - upgrade Storm to N+1 version ####

        #### TODO - Run storm smoke test ####

        ## Example : ##
        ## if "storm" in components:
        ##    Rollingupgrade.upgrade_master_and_smoketest(["storm"], latestVersion, config=None)
        ##    Rollingupgrade.upgrade_slave_and_smoketest(["storm"], latestVersion, node)

        #### TODO - upgrade Hue to N+1 version ####

        #### TODO - Run Hue smoke test ####

        ## Example : ##
        ## if "hue" in components:
        ##    Rollingupgrade.upgrade_master_and_smoketest(["hue"], latestVersion, config=None)
        ##    Rollingupgrade.upgrade_slave_and_smoketest(["hue"], latestVersion, node)
        cls.reportProgress(
            "###  Finished upgrade of non-core components outside the cluster  ###"
        )

        #### TODO - Run all component Smoke tests ####
        Rollingupgrade.run_smokeTests(components, config=None)

        ### Need to stop HDFS Falcon,Yarn long runningJobs ####
        # create flagFile to kill HDFS background job
        TEST_USER = Config.get('hadoop', 'HADOOPQA_USER')
        createCmd = "dfs -touchz " + cls._HDFS_FLAG_FILE
        exit_code, output = HDFS.runas(TEST_USER, createCmd)

        if "falcon" in components:
            from beaver.component.rollingupgrade.ruFalcon import ruFalcon
            ruFalcon.stopFalconLongRunningJob()
        if "yarn" in components:
            ruYARN.stopYarnLongRunningJob()
        if "slider" in components:
            ruSlider.stopSliderLongRunningJob()
        if "storm-slider" in components:
            from beaver.component.rollingupgrade.ruStorm import ruStorm
            ruStorm.teardown_storm_slider_app()

        ## TODO - wait for long running jobs to finish
        isZero = YARN.waitForZeroRunningApps()
        if isZero:
            cls.reportProgress("#### None apps are running. ####")
        else:
            cls.reportProgress(
                "#### Check Failed. some apps are running. ####")
        #assert isZero, "all long running jobs are not finished"

        ### List down Failed/Killed applications ####
        Failed_Killed_apps = YARN.getFailedKilledAppList()
        cls.reportProgress(
            "### Listing Killed/Failed applications while performing upgrade ####"
        )
        for app in Failed_Killed_apps:
            queue = YARN.getQueueForApp(app)
            logger.info(" %s running on %s queue Failed/Killed." %
                        (app, queue))
            cls.reportProgress(
                "#### %s running on %s queue Failed/Killed. ####" %
                (app, queue))

        ## TODO - Validate long running jobs
        Rollingupgrade.verifyLongRunningJob(components)

        ## KILL APPLICATIONS ####
        YARN.killAllApplications(useYarnUser=True)

        ## TODO - call Finalize
        if finalize:
            Rollingupgrade.ru_finalize_state(components)

        ## TODO - call Teardown for long running jobs
        if doTeardown:
            Rollingupgrade.background_job_teardown(components, None)
        cls.reportProgress(
            "###  Completed upgrade from %s to %s for components=%s ####" %
            (currVersion, latestVersion, components))
Esempio n. 24
0
 def getComponentHosts(self, service, component):
     clust_name = Ambari.getClusterName()
     host_list = Ambari.getServiceHosts(service, component, cluster=clust_name, is_enc=Xa.isWireEncryptionOn())
     hostList = ','.join([str(i) for i in host_list])
     return hostList
Esempio n. 25
0
 def setupRanger(cls):
     if Xa.isPolicyAdminRunning():
         RANGER_KNOX_POLICY = Knox.setupOpenRangerKnoxPolicy()
         Knox.restartKnox()