Ejemplo n.º 1
0
    def resetNode(cls, nodename, hostlist,action):
        HADOOP_HOME = Config.get('hadoop', 'HADOOP_HOME')
        HADOOP_BIN = os.path.join(HADOOP_HOME,"bin")
        nodetype = { 'namenode': 'master',
                     'datanode': 'slave',
                     'jobtracker': 'master',
                     'tasktracker': 'slave',
                     }[nodename]

        MAPRED_USER = Config.get('hadoop', 'MAPRED_USER')
        HDFS_USER = Config.get('hadoop', 'HDFS_USER')

        user = { 'namenode': HDFS_USER,
                     'datanode': MAPRED_USER,
                     'jobtracker': HDFS_USER,
                     'tasktracker': MAPRED_USER,
                     }[nodename]
        cmd = os.path.join(HADOOP_BIN,action + "-" + nodetype + ".cmd")
 
        pattern = "(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"
        for host in hostlist:
            if not re.search(pattern,host):
                host = host.split(':')[0]
                host = socket.gethostbyname(host.replace('\n','').strip()) 

            host = re.findall(pattern, host)[0]
            logger.info( action + " " + nodename + " on host " + host)
            Machine.runinbackgroundAs(user, cmd, host,HADOOP_BIN)
Ejemplo n.º 2
0
 def getTTHostForAttemptId(cls,attemptID):
     HADOOP_JOBTRACKER_LOG = Config.get('hadoop', 'HADOOP_JOBTRACKER_LOG')
     f = open(HADOOP_JOBTRACKER_LOG,"r")
     for line in f:
         searchFor = re.search(".*" + attemptID + ".*tracker_.*/(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*",line)
         if searchFor != None:
             return searchFor.group(1)
Ejemplo n.º 3
0
 def getDatanodeCount(cls):
     exit_code, output = Hadoop.runas(Config.get('hadoop', 'HDFS_USER'), "dfsadmin -report")
     if exit_code == 0:
         m = re.match(".*Datanodes available: (\d+) \(", output, re.DOTALL)
         if m:
             return int(m.group(1))
     return 0
Ejemplo n.º 4
0
 def triggerSleepJob(cls,numOfMaps,numOfReduce,mapsleeptime,reducesleeptime,numOfJobs,queue='',background=False):
     jobCounter = 0
     while (jobCounter < numOfJobs):
         sleepCmd = " jar " + Config.get('hadoop', 'HADOOP_EXAMPLES_JAR') + " sleep " + queue + " -m " + numOfMaps + " -r " + numOfReduce + " -mt " + mapsleeptime +" -rt " + reducesleeptime
         if background:
             Hadoop.runInBackground(sleepCmd)
         else:
             Hadoop.run(sleepCmd)                
         jobCounter = jobCounter + 1
Ejemplo n.º 5
0
def generateTestReportConf(infile, outfile, results):
    config = ConfigParser()
    config.optionxform=str
    config.read(infile)
    if config.has_section(SECTION):
        for option, value in config.items(SECTION):
            if value != "": continue
            elif option == "BUILD_ID" and config.has_option(SECTION, "REPO_URL"):
                config.set(SECTION, option, getBuildId(config.get(SECTION, "REPO_URL")))
                config.remove_option(SECTION, "REPO_URL")
            elif option == "HOSTNAME":
                config.set(SECTION, option, socket.getfqdn())
            elif option == "COMPONENT_VERSION":
                if not config.has_option(SECTION, "COMPONENT") or config.get(SECTION, "COMPONENT") == "":
                    config.set(SECTION, "COMPONENT", "Hadoop")
                config.set(SECTION, option, getComponentVersion(config.get(SECTION, "COMPONENT")))
            elif option == "OS":
                config.set(SECTION, option, platform.platform())
            elif option == "SECURE" and Config.hasOption('hadoop', 'IS_SECURE'):
                config.set(SECTION, option, Config.get('hadoop', 'IS_SECURE').lower())
            elif option == "BLOB":
                pass
            elif option == "RAN":
                config.set(SECTION, option, results[0] + len(results[1]))
            elif option == "PASS":
                config.set(SECTION, option, results[0])
            elif option == "FAIL":
                config.set(SECTION, option, len(results[1]))
            elif option == "SKIPPED":
                config.set(SECTION, option, results[2])
            elif option == "ABORTED":
                config.set(SECTION, option, results[3])
            elif option == "FAILED_TESTS":
                config.set(SECTION, option, ",".join(results[1]))
            elif option == "SINGLE_NODE":
                from beaver.component.hadoop import HDFS
                if HDFS.getDatanodeCount() > 1:
                    config.set(SECTION, option, "false")
                else:
                    config.set(SECTION, option, "true")
        config.write(open(outfile, 'w'))
Ejemplo n.º 6
0
 def getDruidUser(cls):
     '''
     Returns Druid user
     '''
     return Config.get('druid', 'DRUID_USER')
 def getHadoopExamplesJar(cls):
     return Config.get('hadoop', 'HADOOP_EXAMPLES_JAR')
Ejemplo n.º 8
0
 def getConfigValue(cls, propertyValue, defaultValue=None):
     return util.getPropertyValueFromConfigXMLFile(os.path.join(Config.get('hadoop', 'HADOOP_CONF'), "core-site.xml"), propertyValue, defaultValue=defaultValue)
def getLocalDirInfo(host):            
    return util.getPropertyValueFromConfigXMLFile(os.path.join(Config.get('hadoop', 'HADOOP_CONF'), "mapred-site.xml"), "mapred.local.dir")
Ejemplo n.º 10
0
 def exitSafemode(cls):
     exit_code, output = Hadoop.runas(Config.get('hadoop', 'HDFS_USER'), "dfsadmin -safemode leave")
     if re.search(".*Safe mode is OFF",output) != None:
         return True
     else:
         return False        
Ejemplo n.º 11
0
 def runInBackgroundAs(cls, user, cmd, cwd=None, env=None):
     flume_cmd = Config.get('flume-ng', 'FLUME_CMD')
     flume_cmd += " " + cmd
     return Machine.runinbackgroundAs(user, flume_cmd, cwd=cwd, env=env)
Ejemplo n.º 12
0
    def switch_master_version(cls, action, version, config=None):
        '''
        Switches Hive master services' version
        :param action: Whether to "upgrade" or "downgrade"
        :param version: Version to be switched to
        :param config: Configuration location
        '''
        from beaver.component.rollingupgrade.ruCommon import hdpSelect
        from beaver.component.hive import Hive

        currentHiveVersion = Hive.getVersion()

        if action == 'upgrade':
            # Backup the database used by the Hive Metastore
            logger.info(
                "Performing backup of the Hive Metastore DB before starting the upgrade"
            )
            Hive.backupMetastoreDB(cls._metastore_backup_file)

        node = Hive.getHiveHost()

        # Stop the old Hive Metastore
        logger.info("Stopping the Hive Metastore")
        Hive.stopService(services=["metastore"])

        # Upgrade Hive Metastore servers to new version
        hdpSelect.changeVersion("hive-metastore", version, node)

        if action == 'upgrade':
            logger.info("Upgrading the Hive metastore schema")
            Hive.upgradeSchema()

        # Restart Hive Metastore servers one at a time
        logger.info("Restarting the Hive Metastore")
        Hive.startService(services=["metastore"])

        # Start new Hive Server 2 instance
        confHS2Port = Hive.getHiveserver2ThriftPort()
        hs2port = util.getNextAvailablePort(node, confHS2Port)

        hdpSelect.changeVersion("hive-server2", version, node)

        Hive.modifyConfig(config,
                          services=['hiveserver2'],
                          restartService=False)
        logger.info(
            "Starting a new HiveServer2 at port '%d' for assisting rolling-upgrade"
            % hs2port)
        if hs2port != confHS2Port:
            changes = {'hive-site.xml': {'hive.server2.thrift.port': hs2port}}
            Hive.modifyConfig(changes,
                              services=["hiveserver2"],
                              restartService=False)
        Hive.startService(services=["hiveserver2"])
        cls._hs2_live_ports = [Hive.getHiveserver2ThriftPort(), hs2port]

        # Deregister the old Hive Server 2 instances
        logger.info("Deregistering the HiveServer2 on version '%s'" %
                    currentHiveVersion)
        Hive.deregisterHiveServer2(version=currentHiveVersion)

        from beaver.component.hcatalog import Hcatalog

        # Stop the old WebHCat server
        logger.info("Stopping the WebHCat server")
        node = Config.get('templeton',
                          'TEMPLETON_HOST',
                          default=Machine.getfqdn())
        webhcatPort = Config.get('templeton',
                                 'TEMPLETON_PORT',
                                 default="50111")
        # Stop the old WebHCat server
        logger.info("Stop the WebHCat server")
        Hcatalog.stop(node)

        # Upgrade WebHCat to the new version
        hdpSelect.changeVersion("hive-webhcat", version, node)

        # Start the WebHCat server
        logger.info("Restarting the WebHCat server")
        newConfDir = os.path.join(Config.getEnv('ARTIFACTS_DIR'),
                                  'localWebhcatConf')
        if os.path.exists(newConfDir):
            Hcatalog.start(node, hcat_confdir=newConfDir)
        else:
            Hcatalog.start(node)
Ejemplo n.º 13
0
import os
import random
import ssl
import tempfile
import threading
import time
import urllib
import urllib2

from beaver.component.hadoop import Hadoop
from beaver.config import Config
logger = logging.getLogger(__name__)

KNOX_GUEST_USER = "******"
KNOX_GUEST_PWD = "guest-password"
KNOX_HOST = Config.get('knox', 'KNOX_HOST').split(',')[0]  # Just use the first Knox instance in the list for now.
KNOX_PORT = "8443"
HDFS_USER = Config.get('hadoop', 'HDFS_USER')
WEBHDFS_CONTEXT_ROOT = "/gateway/sandbox/webhdfs/v1"
if Hadoop.isAmbari():
    WEBHDFS_CONTEXT_ROOT = "/gateway/default/webhdfs/v1"


class KnoxWebHdfsLoadGenerator(object):
    """
  A class that generates WebHDFS load though the Knox gateway.
  Note: This currently does not support AD authentication.

  The following operations are currently performed in a loop by the load generator in each of the load threads.
  1. Mkdir
  2. Rename dir
Ejemplo n.º 14
0
    def run_background_job(cls, runSmokeTestSetup=False, config=None):
        '''
        Runs background long running Hive Job
        :param runSmokeTestSetup: Runs smoke test setup if set to true
        :param config: expected configuration location
        :return: Total number of long running jobs started
        '''
        from beaver.component.hive import Hive
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        UpgradePerNode.reportProgress(
            "[INFO][Hive][BGJob] Long running job for Hive component started")

        setqueue = ""
        if Hive.isTezEnabled():
            setqueue = "set tez.queue.name=%s; " % cls._yarn_queue
        else:
            setqueue = "set mapred.job.queue.name=%s; " % cls._yarn_queue

        logger.info("**** Running Hive CLI Test ****")
        query = setqueue + " create table if not exists hive_cli_lr (a string); select sleep(%d, 2000, 'hdfs://%s/hive_cli_lr', 'hdfs://%s/END') from (select count(*) from hive_cli_lr) a;" % (
            cls._max_bgjtest_duration, cls._hdfs_bgjtest_dir,
            cls._hdfs_bgjtest_dir)
        Hive.runQuery(query, background=True)

        logger.info("**** Running Beeline CLI Test ****")
        # Create the sleep function within the same Beeline session
        # Function created outside of HS2 instance are not picked
        query = setqueue + "\n"
        query += "drop function sleep2;\n"
        query += "create function sleep2 as 'org.apache.hive.udf.generic.GenericUDFSleep' using jar 'hdfs://%s/hive-udfs-0.1.jar';\n" % cls._hdfs_bgjtest_dir
        query += "create table if not exists bline_cli_lr (a string);\n"
        query += "select sleep2(%d, 2000, 'hdfs://%s/bline_cli_lr', 'hdfs://%s/END') from (select count(*) from bline_cli_lr) a;\n" % (
            cls._max_bgjtest_duration, cls._hdfs_bgjtest_dir,
            cls._hdfs_bgjtest_dir)
        Hive.runQueryOnBeeline(query, readFromFile=True, background=True)

        logger.info("**** Running WebHCat Test ****")
        webhcatHost = Config.get('templeton',
                                 'TEMPLETON_HOST',
                                 default=Machine.getfqdn())
        webhcatPort = Config.get('templeton',
                                 'TEMPLETON_PORT',
                                 default="50111")
        url = "http://%s:%s/templeton/v1/hive" % (webhcatHost, webhcatPort)
        query = setqueue + " set mapred.task.timeout=0; create table if not exists whcat_rest_lr (a string); select sleep(%d, 2000, 'hdfs://%s/whcat_rest_lr', 'hdfs://%s/END') from (select count(*) from whcat_rest_lr) a;" % (
            cls._max_bgjtest_duration, cls._hdfs_bgjtest_dir,
            cls._hdfs_bgjtest_dir)
        params = {'execute': query}
        status_code, stdout = util.curl(url, method='POST', params=params)
        retry = 0
        while status_code == 404 and retry < 3:
            time.sleep(15)
            status_code, stdout = util.curl(url, method='POST', params=params)
            retry += 1
        if status_code != 200:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][BGJobSetup] Long running job for WebHCat failed due to status code = %d"
                % status_code)
            logger.error(
                "Webhcat request failed with the following error: %s\n" %
                stdout)

        if runSmokeTestSetup:
            logger.info("**** Running Hive Smoke Test Setup ****")
            cls.smoke_test_setup()
        return 3
Ejemplo n.º 15
0
class ruHive:
    _metastore_backup_file = os.path.join(Config.getEnv('ARTIFACTS_DIR'),
                                          "metastore_backup.sql")
    _yarn_queue = "hive"
    _hdfs_smoketest_dir = "/tmp/hivetest"
    _smoketest_tbl = "smtesttbl"
    _hdfs_bgjtest_dir = "/tmp/hivelrtest"
    _bgjtest_tbl = "lrtesttbl"
    _hdfs_user = Config.get("hadoop", 'HDFS_USER')
    _num_of_webhcat_bgj = 1
    _num_of_rows_lr = 10000
    _num_of_rows_smoke = 100
    _max_bgjtest_duration = 72000000
    _hs2_live_ports = []
    _shortbgj_hive_process = None
    _shortbgj_bline_process = None

    @classmethod
    def background_job_setup(cls, runSmokeTestSetup=False, config=None):
        '''
        Setup for background long running job
        :param runSmokeTestSetup: Runs smoke test setup if set to true
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        UpgradePerNode.reportProgress(
            "[INFO][Hive][BGJob] Long running test setup for Hive started")
        cls.doBackgroundJobSetup(cls._hdfs_bgjtest_dir)
        UpgradePerNode.reportProgress(
            "[INFO][Hive][BGJob] Long running test setup for Hive finished")
        if runSmokeTestSetup:
            logger.info("**** Running Hive Smoke Test Setup ****")
            cls.smoke_test_setup()

    @classmethod
    def smoke_test_setup(cls):
        '''
        Setup required to run Smoke test
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        UpgradePerNode.reportProgress(
            "[INFO][Hive][Smoke] Smoke test setup for Hive started")
        cls.doSetup(cls._hdfs_smoketest_dir, cls._smoketest_tbl,
                    cls._num_of_rows_smoke, "smoke")
        UpgradePerNode.reportProgress(
            "[INFO][Hive][Smoke] Smoke test setup for Hive finished")

    @classmethod
    def doBackgroundJobSetup(cls, hdfs_test_dir):

        from beaver.component.hive import Hive
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        logger.info("Preparing the test setup for Hive background job")
        udfjar = os.path.join(Config.getEnv('WORKSPACE'), "tests", "hive",
                              "hive-udf", "hive-udfs-0.1.jar")
        HDFS.createDirectory(hdfs_test_dir,
                             user=cls._hdfs_user,
                             perm='777',
                             force=True)
        HDFS.copyFromLocal(udfjar, hdfs_test_dir)
        query = "drop function sleep; create function sleep as 'org.apache.hive.udf.generic.GenericUDFSleep' using jar 'hdfs://%s/hive-udfs-0.1.jar';" % hdfs_test_dir
        exit_code, stdout = Hive.runQuery(query)
        if exit_code != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][Setup] Long running failed due to exitcode = %d"
                % exit_code)
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][Hive][Setup] Long running finished successfully")

    @classmethod
    def doSetup(cls, hdfs_test_dir, tbl_name, num_of_rows, type):

        from beaver.component.hive import Hive
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        logger.info("Generating test table dataset with %d rows" % num_of_rows)
        test_data_file = os.path.join(Config.getEnv('ARTIFACTS_DIR'),
                                      tbl_name + ".dat")
        f = open(test_data_file, 'w')
        userid = 100000
        for i in xrange(num_of_rows):
            for j in range(random.randint(3, 8)):
                f.write("%d|%d\n" % (userid + i, random.randint(10, 80)))
        f.close()

        hdfs_tbl_dir = hdfs_test_dir + "/" + tbl_name
        logger.info("Copying the test dataset to HDFS directory '%s'" %
                    hdfs_tbl_dir)
        HDFS.createDirectory(hdfs_test_dir,
                             user=cls._hdfs_user,
                             perm='777',
                             force=True)
        HDFS.createDirectory(hdfs_tbl_dir, perm='777')
        HDFS.copyFromLocal(test_data_file, hdfs_tbl_dir)
        HDFS.chmod(cls._hdfs_user, '777', hdfs_tbl_dir)

        logger.info("Creating table '%s' and verification tables" % tbl_name)
        query = "drop table if exists %s;\n" % tbl_name
        query += "create external table %s (userid string, age int) row format delimited fields terminated by '|' stored as textfile location '%s';\n" % (
            tbl_name, hdfs_tbl_dir)
        query += "drop table if exists %s_hive_verify;\n" % tbl_name
        query += "create table %s_hive_verify (userid string, age int);\n" % tbl_name
        if type == "Long running":
            for i in range(cls._num_of_webhcat_bgj):
                query += "drop table if exists %s_wh_%d;\n" % (tbl_name, i + 1)
                query += "create table %s_wh_%d (userid string, age int);\n" % (
                    tbl_name, i + 1)
        hivesetupfile = os.path.join(Config.getEnv('ARTIFACTS_DIR'),
                                     "hivesetup.sql")
        util.writeToFile(query, hivesetupfile)
        exit_code, stdout = Hive.run("-f " + hivesetupfile, logoutput=False)
        if type:
            msg = "%s job setup for Hive component" % type
            if exit_code != 0:
                UpgradePerNode.reportProgress(
                    "[FAILED][Hive][Setup] %s failed due to exitcode = %d" %
                    (msg, exit_code))
            else:
                UpgradePerNode.reportProgress(
                    "[PASSED][Hive][Setup] %s finished successfully" % msg)

    @classmethod
    def run_background_job(cls, runSmokeTestSetup=False, config=None):
        '''
        Runs background long running Hive Job
        :param runSmokeTestSetup: Runs smoke test setup if set to true
        :param config: expected configuration location
        :return: Total number of long running jobs started
        '''
        from beaver.component.hive import Hive
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        UpgradePerNode.reportProgress(
            "[INFO][Hive][BGJob] Long running job for Hive component started")

        setqueue = ""
        if Hive.isTezEnabled():
            setqueue = "set tez.queue.name=%s; " % cls._yarn_queue
        else:
            setqueue = "set mapred.job.queue.name=%s; " % cls._yarn_queue

        logger.info("**** Running Hive CLI Test ****")
        query = setqueue + " create table if not exists hive_cli_lr (a string); select sleep(%d, 2000, 'hdfs://%s/hive_cli_lr', 'hdfs://%s/END') from (select count(*) from hive_cli_lr) a;" % (
            cls._max_bgjtest_duration, cls._hdfs_bgjtest_dir,
            cls._hdfs_bgjtest_dir)
        Hive.runQuery(query, background=True)

        logger.info("**** Running Beeline CLI Test ****")
        # Create the sleep function within the same Beeline session
        # Function created outside of HS2 instance are not picked
        query = setqueue + "\n"
        query += "drop function sleep2;\n"
        query += "create function sleep2 as 'org.apache.hive.udf.generic.GenericUDFSleep' using jar 'hdfs://%s/hive-udfs-0.1.jar';\n" % cls._hdfs_bgjtest_dir
        query += "create table if not exists bline_cli_lr (a string);\n"
        query += "select sleep2(%d, 2000, 'hdfs://%s/bline_cli_lr', 'hdfs://%s/END') from (select count(*) from bline_cli_lr) a;\n" % (
            cls._max_bgjtest_duration, cls._hdfs_bgjtest_dir,
            cls._hdfs_bgjtest_dir)
        Hive.runQueryOnBeeline(query, readFromFile=True, background=True)

        logger.info("**** Running WebHCat Test ****")
        webhcatHost = Config.get('templeton',
                                 'TEMPLETON_HOST',
                                 default=Machine.getfqdn())
        webhcatPort = Config.get('templeton',
                                 'TEMPLETON_PORT',
                                 default="50111")
        url = "http://%s:%s/templeton/v1/hive" % (webhcatHost, webhcatPort)
        query = setqueue + " set mapred.task.timeout=0; create table if not exists whcat_rest_lr (a string); select sleep(%d, 2000, 'hdfs://%s/whcat_rest_lr', 'hdfs://%s/END') from (select count(*) from whcat_rest_lr) a;" % (
            cls._max_bgjtest_duration, cls._hdfs_bgjtest_dir,
            cls._hdfs_bgjtest_dir)
        params = {'execute': query}
        status_code, stdout = util.curl(url, method='POST', params=params)
        retry = 0
        while status_code == 404 and retry < 3:
            time.sleep(15)
            status_code, stdout = util.curl(url, method='POST', params=params)
            retry += 1
        if status_code != 200:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][BGJobSetup] Long running job for WebHCat failed due to status code = %d"
                % status_code)
            logger.error(
                "Webhcat request failed with the following error: %s\n" %
                stdout)

        if runSmokeTestSetup:
            logger.info("**** Running Hive Smoke Test Setup ****")
            cls.smoke_test_setup()
        return 3

    @classmethod
    def run_smoke_test(cls, smoketestnumber, config=None):
        '''
        Run smoke test for hive
        :param smoketestnumber: Used for unique output log location
        '''
        return cls.run_client_smoketest(config=config)

    @classmethod
    def background_job_teardown(cls):
        '''
        Cleanup for long running Hive jobs
        '''
        from beaver.component.hive import Hive

        logger.info(
            "Make sure to switch the HiveServer2 to use the default port")
        adminUser = Machine.getAdminUser()
        hiveHost = Hive.getHiveHost()
        for port in cls._hs2_live_ports:
            pid = Machine.getPIDByPort(port, host=hiveHost, user=adminUser)
            if pid:
                Machine.killProcessRemote(pid, host=hiveHost, user=adminUser)
                time.sleep(2)
        if len(cls._hs2_live_ports) > 0:
            Hive.startService(services=["hiveserver2"])

    @classmethod
    def stopHiveLongRunningJob(cls):
        '''
        Stop the long running background queries
        '''
        logger.info("Push the file to HDFS to signal END of queries")
        HDFS.touchz(cls._hdfs_bgjtest_dir + "/END")

    @classmethod
    def verifyLongRunningJob(cls):
        '''
        Validate long running background job after end of all component upgrade
        '''
        # Assumption is all the MR jobs for Hive are alive and running

        from beaver.component.hive import Hive
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        logger.info(
            "Verify the query triggered by Hive CLI is alive post upgrade")
        passed = cls.verifyLongRunningQuery("hive_cli_lr")
        if passed:
            UpgradePerNode.reportProgress(
                "[PASSED][Hive][BGJob] Long running test for Hive Metastore passed"
            )
        else:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][BGJob] Long running test for Hive Metastore failed"
            )

        logger.info(
            "Verify the query triggered by Beeline CLI is alive post upgrade")
        passed = cls.verifyLongRunningQuery("bline_cli_lr")
        if passed:
            UpgradePerNode.reportProgress(
                "[PASSED][Hive][BGJob] Long running test for HiveServer2 passed"
            )
        else:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][BGJob] Long running test for HiveServer2 failed"
            )

        logger.info(
            "Verify the query triggered by WebHCat is alive post upgrade")
        passed = cls.verifyLongRunningQuery("whcat_rest_lr")
        if passed:
            UpgradePerNode.reportProgress(
                "[PASSED][Hive][BGJob] Long running test for WebHCat passed")
        else:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][BGJob] Long running test for WebHCat failed")

    @classmethod
    def verifyLongRunningQuery(cls, file_to_verify):
        lfile = os.path.join(Config.getEnv('ARTIFACTS_DIR'), file_to_verify)
        exit_code, stdout = HDFS.copyToLocal(
            cls._hdfs_bgjtest_dir + "/" + file_to_verify, lfile)
        if exit_code != 0:
            logger.info("Error fetching the timestamp file from HDFS")
            return False
        lines = open(lfile, 'r').readlines()
        if len(lines) == 0:
            logger.info("Empty timestamp file")
            return False
        try:
            ts = int(lines[-1])
            # Shutdown gracefully
            if ts == -1:
                return True
            # Timestamp should be less than 5 minutes, which indicates
            # UDF wrote something atleast once in the last 5 minutes
            timegap = time.time() - (ts / 1000)
            if timegap > 300:
                logger.info(
                    "Time gap is %d seconds, last line in the timestamp file was '%d'"
                    % (timegap, ts))
                return False
        except ValueError:
            logger.info("Error parsing last line in the timestamp file => '" +
                        lines[-1] + "'")
            return False
        return True

    @classmethod
    def background_job_when_master_upgrade(cls):
        '''
        Start a background application which runs while component master service gets upgraded
        :return:
        '''
        from beaver.component.hive import Hive
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        UpgradePerNode.reportProgress(
            "[INFO][Hive][BGJob] Background Job test setup when upgrading Hive started"
        )

        logger.info("Creating hive tables for short background jobs")
        query = "drop table if exists shortlr_hive_verify;\n"
        query += "create table shortlr_hive_verify (userid string, age int);\n"
        query += "drop table if exists shortlr_bline_verify;\n"
        query += "create table shortlr_bline_verify (userid string, age int);\n"
        query += "drop table if exists shortlr_bline_verify;\n"
        query += "create table shortlr_bline_verify (userid string, age int);\n"
        short_bgjob_setupfile = os.path.join(Config.getEnv('ARTIFACTS_DIR'),
                                             'shortlrsetup.sql')
        util.writeToFile(query, short_bgjob_setupfile)

        exit_code, stdout = Hive.run("-f " + short_bgjob_setupfile)
        if exit_code != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][BGJob] Background Job test setup when Hive upgrades failed due to exitcode = %d"
                % exit_code)

        logger.info("Running the Background Job when upgrading Hive")
        UpgradePerNode.reportProgress(
            "[INFO][Hive][BGJob] Long running job for Hive component upgrades started"
        )

        setqueue = ""
        if Hive.isTezEnabled():
            setqueue = "set tez.queue.name=%s; " % cls._yarn_queue
        else:
            setqueue = "set mapred.job.queue.name=%s; " % cls._yarn_queue

        logger.info("**** Running Hive CLI Test ****")
        query = setqueue + " insert overwrite table shortlr_hive_verify select userid, avg(age) from %s group by userid order by userid;" % cls._bgjtest_tbl
        cls._shortbgj_hive_process = Hive.runQuery(query, background=True)

        # Sleeping for 10 seconds to make sure that query initializes before Metastore is restarted
        time.sleep(10)

        logger.info("**** Running Beeline CLI Test ****")
        query = setqueue + "\ninsert overwrite table shortlr_bline_verify select userid, avg(age) from %s group by userid order by userid;" % cls._bgjtest_tbl
        cls._shortbgj_bline_process = Hive.runQueryOnBeeline(query,
                                                             readFromFile=True,
                                                             background=True)

        UpgradePerNode.reportProgress(
            "[INFO][Hive][BGJob] Background Job test setup when Hive upgrades finished"
        )

    @classmethod
    def background_job_teardown_when_master_upgrade(cls):
        '''
        Clean up for background job which started before upgrading master services
        :return:
        '''
        cls._shortbgj_hive_process = None
        cls._shortbgj_bline_process = None

    @classmethod
    def verify_background_job_when_master_upgrade(cls):
        '''
        Validate background job Succeeded when master got upgraded
        :return:
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        logger.info("Verify the successful completion of Hive CLI job")
        starttime = time.time()
        while (starttime - time.time() <
               300) and cls._shortbgj_hive_process.poll() is None:
            time.sleep(5)
        procpoll = cls._shortbgj_hive_process.poll()
        if procpoll is None:
            logger.info("Killing Hive CLI process '%d'" %
                        cls._shortbgj_hive_process.pid)
            Machine.killProcess(cls._shortbgj_hive_process.pid)
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][BGJob] Background job during master upgrade failed as Hive CLI failed to finish"
            )
        elif procpoll != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][BGJob] Background job during master upgrade failed as Hive CLI exited with '%d'"
                % procpoll)
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][Hive][BGJob] Background job for Hive CLI during master upgrade finished successfully"
            )

        logger.info("Verify the successful completion of Beeline CLI job")
        starttime = time.time()
        while (starttime - time.time() <
               120) and cls._shortbgj_bline_process.poll() is None:
            time.sleep(5)
        procpoll = cls._shortbgj_bline_process.poll()
        if procpoll is None:
            logger.info("Killing Beeline CLI process '%d'" %
                        cls._shortbgj_bline_process.pid)
            Machine.killProcess(cls._shortbgj_bline_process.pid)
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][BGJob] Background job during master upgrade failed as Beeline CLI failed to finish"
            )
        elif procpoll != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][BGJob] Background job during master upgrade failed as Beeline CLI exited with '%d'"
                % procpoll)
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][Hive][BGJob] Background job for Beeline CLI during master upgrade finished successfully"
            )

    @classmethod
    def upgrade_master(cls, version, config=None):
        '''
        Upgrades Master services:
        :param version: Version to be upgraded to
        :param config: Config location
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        UpgradePerNode.reportProgress(
            "[INFO][Hive][Upgrade] Hive master components upgrade started")

        cls.background_job_when_master_upgrade()
        cls.switch_master_version('upgrade', version, config=config)
        cls.verify_background_job_when_master_upgrade()
        cls.background_job_teardown_when_master_upgrade()

        UpgradePerNode.reportProgress(
            "[INFO][Hive][Upgrade] Hive master components upgrade finished")

    @classmethod
    def upgrade_slave(cls, version, node, config=None):
        '''
        Upgrades slave services :
        :param version: Version to be upgraded to
        :param node: Slave Node
        :param config: Config location
        :return:
        '''
        logger.info("Hive does not have any slaves, so no slaves to upgrade")

    @classmethod
    def downgrade_master(cls, version, config=None):
        '''
        Downgrade Hive Master services
        :param version: Version to be downgraded to
        :param config: Configuration location
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        UpgradePerNode.reportProgress(
            "[INFO][Hive][Downgrade] Hive master components downgrade started")

        cls.background_job_when_master_upgrade()
        cls.switch_master_version('downgrade', version, config=config)
        cls.verify_background_job_when_master_upgrade()
        cls.background_job_teardown_when_master_upgrade()

        UpgradePerNode.reportProgress(
            "[INFO][Hive][Downgrade] Hive master components downgrade finished"
        )

    @classmethod
    def downgrade_slave(cls, version, node, config=None):
        '''
        Downgrade Hive slave services
        :param version: version to be downgraded to
        :param config: Configuration location
        '''
        logger.info("Hive does not have any slaves, so no slaves to downgrade")

    @classmethod
    def switch_master_version(cls, action, version, config=None):
        '''
        Switches Hive master services' version
        :param action: Whether to "upgrade" or "downgrade"
        :param version: Version to be switched to
        :param config: Configuration location
        '''
        from beaver.component.rollingupgrade.ruCommon import hdpSelect
        from beaver.component.hive import Hive

        currentHiveVersion = Hive.getVersion()

        if action == 'upgrade':
            # Backup the database used by the Hive Metastore
            logger.info(
                "Performing backup of the Hive Metastore DB before starting the upgrade"
            )
            Hive.backupMetastoreDB(cls._metastore_backup_file)

        node = Hive.getHiveHost()

        # Stop the old Hive Metastore
        logger.info("Stopping the Hive Metastore")
        Hive.stopService(services=["metastore"])

        # Upgrade Hive Metastore servers to new version
        hdpSelect.changeVersion("hive-metastore", version, node)

        if action == 'upgrade':
            logger.info("Upgrading the Hive metastore schema")
            Hive.upgradeSchema()

        # Restart Hive Metastore servers one at a time
        logger.info("Restarting the Hive Metastore")
        Hive.startService(services=["metastore"])

        # Start new Hive Server 2 instance
        confHS2Port = Hive.getHiveserver2ThriftPort()
        hs2port = util.getNextAvailablePort(node, confHS2Port)

        hdpSelect.changeVersion("hive-server2", version, node)

        Hive.modifyConfig(config,
                          services=['hiveserver2'],
                          restartService=False)
        logger.info(
            "Starting a new HiveServer2 at port '%d' for assisting rolling-upgrade"
            % hs2port)
        if hs2port != confHS2Port:
            changes = {'hive-site.xml': {'hive.server2.thrift.port': hs2port}}
            Hive.modifyConfig(changes,
                              services=["hiveserver2"],
                              restartService=False)
        Hive.startService(services=["hiveserver2"])
        cls._hs2_live_ports = [Hive.getHiveserver2ThriftPort(), hs2port]

        # Deregister the old Hive Server 2 instances
        logger.info("Deregistering the HiveServer2 on version '%s'" %
                    currentHiveVersion)
        Hive.deregisterHiveServer2(version=currentHiveVersion)

        from beaver.component.hcatalog import Hcatalog

        # Stop the old WebHCat server
        logger.info("Stopping the WebHCat server")
        node = Config.get('templeton',
                          'TEMPLETON_HOST',
                          default=Machine.getfqdn())
        webhcatPort = Config.get('templeton',
                                 'TEMPLETON_PORT',
                                 default="50111")
        # Stop the old WebHCat server
        logger.info("Stop the WebHCat server")
        Hcatalog.stop(node)

        # Upgrade WebHCat to the new version
        hdpSelect.changeVersion("hive-webhcat", version, node)

        # Start the WebHCat server
        logger.info("Restarting the WebHCat server")
        newConfDir = os.path.join(Config.getEnv('ARTIFACTS_DIR'),
                                  'localWebhcatConf')
        if os.path.exists(newConfDir):
            Hcatalog.start(node, hcat_confdir=newConfDir)
        else:
            Hcatalog.start(node)

    @classmethod
    def run_client_smoketest(cls, config=None, env=None):
        '''
        Run Smoke test after upgrading Client
        :param config: Configuration location
        :param env: Set Environment variables
        '''
        from beaver.component.hive import Hive
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        UpgradePerNode.reportProgress(
            "[INFO][Hive][Smoke] Smoke test for Hive component started")

        setqueue = ""
        if Hive.isTezEnabled():
            setqueue = "set tez.queue.name=%s; " % cls._yarn_queue
        else:
            setqueue = "set mapred.job.queue.name=%s; " % cls._yarn_queue

        logger.info("**** Running Hive CLI Test ****")
        query = setqueue + " insert overwrite table %s_hive_verify select userid, avg(age) from %s group by userid order by userid; " % (
            cls._smoketest_tbl, cls._smoketest_tbl)
        query += "select count(*) from %s_hive_verify;" % cls._smoketest_tbl
        exit_code, stdout, stderr = Hive.runQuery(query,
                                                  stderr_as_stdout=False)
        if exit_code != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][Smoke] Smoke test for Hive Metastore failed with exit code '%d'"
                % exit_code)
            logger.error(
                "Smoke test for Hive failed with the following error: " +
                stderr)
        elif stdout.find("%d" % cls._num_of_rows_smoke) == -1:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][Smoke] Smoke test for Hive Metastore failed to verify number of rows in output"
            )
            logger.error(
                "Smoke test for Hive failed to find [%d] in output [%s]" %
                (cls._num_of_rows_smoke, stdout))
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][Hive][Smoke] Smoke test for Hive Metastore succeeded"
            )
            logger.info("Smoke test for Hive Metastore succeeded")

        logger.info("**** Running Beeline CLI Test ****")
        query = setqueue + "\ndrop table if exists %s_bline_verify;\n" % cls._smoketest_tbl
        query += "create table %s_bline_verify (userid string, age int);\n" % cls._smoketest_tbl
        query += "insert overwrite table %s_bline_verify select userid, avg(age) from %s group by userid order by userid;\n" % (
            cls._smoketest_tbl, cls._smoketest_tbl)
        query += "select count(*) from %s_bline_verify;\n" % cls._smoketest_tbl
        exit_code, stdout, stderr = Hive.runQueryOnBeeline(query,
                                                           readFromFile=True)
        if exit_code != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][Smoke] Smoke test for HiveServer2 failed with exit code '%d'"
                % exit_code)
            logger.error(
                "Smoke test for HiveServer2 failed with the following error: "
                + stderr)
        elif stdout.find("%d" % cls._num_of_rows_smoke) == -1:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][Smoke] Smoke test for HiveServer2 failed to verify number of rows in output"
            )
            logger.error(
                "Smoke test for HiveServer2 failed to find [%d] in output [%s]"
                % (cls._num_of_rows_smoke, stdout))
        else:
            logger.info("Smoke test for HiveServer2 succeeded")

        logger.info("**** Running WebHCat Smoke Test ****")
        query = "show tables;"
        webhcatHost = Config.get('templeton',
                                 'TEMPLETON_HOST',
                                 default=Machine.getfqdn())
        webhcatPort = Config.get('templeton',
                                 'TEMPLETON_PORT',
                                 default="50111")
        url = "http://%s:%s/templeton/v1/ddl" % (webhcatHost, webhcatPort)
        params = {'exec': query}
        status_code, stdout = util.curl(url, method='POST', params=params)
        if status_code != 200:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][Smoke] Smoke test for WebHCat failed due to status code = %d"
                % status_code)
        else:
            logger.info("Smoke test for WebHCat succeeded")

        UpgradePerNode.reportProgress(
            "[INFO][Hive][Smoke] Smoke test for Hive component finished")

    @classmethod
    def testAfterAllSlavesRestarted(cls):
        '''
        Function to test upgrade is done properly after all master and slaves are upgraded for Hdfs, yarn and Hbase
        :return:
        '''
        logger.info("Hive does not have any slaves, so nothing to do here")
Ejemplo n.º 16
0
    def downgrade_master(cls, version, config=None):
        '''
        Downgrade HDFS Master services
        :param version: Version to be downgraded to
        :param config: Configuration location
        '''
        from beaver.component.rollingupgrade.ruCommon import hdpSelect
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        if HDFS.isHAEnabled():
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] HA Namenode Downgrade Started ")
            nodes = []
            nodes.append(HDFS.getNamenodeByState('standby'))
            nodes.append(HDFS.getNamenodeByState('active'))
            for node in nodes:
                HDFS.resetNamenode('stop', host=node)
                HDFS.resetZkfc('stop', hosts=node.split())
                # BUG-25534: There is no package for zkfc. So just updating the NN is enough.
                hdpSelect.changeVersion("hadoop-hdfs-namenode", version, node)
                HDFS.resetNamenode('start', config=config, host=node)
                HDFS.resetZkfc('start', hosts=node.split())
                # lets make sure the NN is out of safemode before we proceed to the next namenode
                HDFS.waitForNNOutOfSafemode(options='-fs hdfs://%s:8020' %
                                            node)

            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] HA Namenode Downgrade Finished ")
            jn_nodes = HDFS.getJournalNodes()
            if len(jn_nodes) < 3:
                UpgradePerNode.reportProgress(
                    "[INFO][HDFS][Downgrade] Less than three Journal Nodes. Not going to do Downgrade "
                )
                return

            #Loop through all the JNs and stop flip start one at a time
            hdfs_user = Config.get('hadoop', 'HDFS_USER')
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] HA Journal Node Upgrade Started")
            cls.ensure_all_jns_are_up(jn_nodes)
            #Loop through all the JNs and stop flip start one at a time
            for node in jn_nodes:
                Hadoop.resetService(hdfs_user,
                                    node,
                                    "journalnode",
                                    'stop',
                                    binFolder="sbin")
                hdpSelect.changeVersion("hadoop-hdfs-journalnode", version,
                                        node)
                Hadoop.resetService(hdfs_user,
                                    node,
                                    "journalnode",
                                    'start',
                                    binFolder="sbin")
                time.sleep(5)
                cls.ensure_all_jns_are_up(jn_nodes)

            cls.ensure_nn_is_active()
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] HA Journal Node Downgrade Finished ")
        else:
            ## TODO add code to upgrade SNN
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] Namenode Downgrade Started ")
            node = HDFS.getNamenode()
            HDFS.stopNamenode()
            hdpSelect.changeVersion("hadoop-hdfs-namenode", version, node)
            HDFS.startNamenode(config=config)
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] Namenode Downgrade Finished ")
            # upgrade SNN
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade]Secondary Namenode Downgrade Started ")
            node = HDFS.getSecondaryNamenode()
            HDFS.stopSecondaryNamenode()
            hdpSelect.changeVersion("hadoop-hdfs-secondarynamenode", version,
                                    node)
            HDFS.startSecondaryNamenode(config=config)
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] Secondary Namenode Downgrade Finished"
            )
Ejemplo n.º 17
0
            xmlDict["yarn.scheduler.capacity.root.%s.acl_administer_jobs" %
                    component] = "*"
        util.dumpTextString(xmlDict, "====== PLANNED QUEUES ======",
                            "==================")
        master_capacity_file = os.path.join(Config.getEnv("WORKSPACE"),
                                            "tests", "rolling_upgrade", "yarn",
                                            "data", "capacity-scheduler.xml")
        modified_capacity_file = os.path.join(Config.getEnv("ARTIFACTS_DIR"),
                                              "capacity-scheduler.xml")
        Machine.copy(master_capacity_file, modified_capacity_file)
        util.writePropertiesToConfigXMLFile(modified_capacity_file,
                                            modified_capacity_file, xmlDict)
        #util.dumpText(modified_capacity_file, "====== capacity-scheduler.xml ======", "==================")
        if RuSetup._defaultQueue:
            components.remove("default")
        return modified_capacity_file


__localTestDir = os.path.join(Config.getEnv("WORKSPACE"), "tests",
                              "rolling_upgrade")
__confFile = __localTestDir + '/conf.json'
__depFile = __localTestDir + '/dep.json'
# Public: all components to run the test
COMPONENTS_TO_TEST = RuSetup.getComponnetsToTest(__confFile, __depFile)
# Public: all components to flip binary
COMPONENTS_TO_FLIP = RuSetup.getComponentsToFlip(__confFile, __depFile)
# Public: components that are affected by the tested components
COMPONENTS_AFFECTED = RuSetup.getComponentsAffected(__confFile, __depFile)
# Public: components that we should import
COMPONENTS_TO_IMPORT = list(set(COMPONENTS_TO_TEST) | set(COMPONENTS_TO_FLIP))
Ejemplo n.º 18
0
class ruFalcon:

    _base_falcon_dir = None
    _local_workspace = Config.getEnv("WORKSPACE")
    _job_user = Config.getEnv("USER")
    _falcon_user = Config.get("falcon", "FALCON_USER")
    _job_start_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%MZ")

    @classmethod
    def background_job_setup(cls, runSmokeTestSetup=True, config=None):
        '''
        Upload Data to HDFS before Upgrade starts
        Creates /user/hrt_qa/falcon/ dir on HDFS
        Upload demo files to /user/hrt_qa/falcon
        '''
        logger.info("Falcon - starting background job setup")
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][Falcon][BGJobSetup] starting Falcon background job setup")
        from beaver.component.hadoop import HDFS
        if not cls._base_falcon_dir:
            cls._base_falcon_dir = '/user/%s/falcon' % cls._job_user
        exit_code, stdout = HDFS.createDirectory(cls._base_falcon_dir,
                                                 user=cls._job_user,
                                                 perm=777,
                                                 force=True)
        ruAssert("Falcon", exit_code == 0,
                 '[BGJobSetup] could not create dir on hdfs.')
        HDFS.copyFromLocal(os.path.join(cls._local_workspace, "tests",
                                        "rolling_upgrade", "falcon", "demo"),
                           cls._base_falcon_dir,
                           user=cls._job_user)

        ## Create dirs for falcon clusters
        exit_code, stdout = HDFS.createDirectory(
            "/apps/falcon/primaryCluster/staging",
            user=cls._falcon_user,
            perm=777,
            force=True)
        ruAssert("Falcon", exit_code == 0,
                 '[BGJobSetup] could not create staging dir on hdfs ')
        exit_code, stdout = HDFS.createDirectory(
            "/apps/falcon/primaryCluster/working",
            user=cls._falcon_user,
            perm=755,
            force=True)
        ruAssert("Falcon", exit_code == 0,
                 '[BGJobSetup] could not create dir on hdfs.')
        exit_code, stdout = HDFS.createDirectory(
            "/apps/falcon/backupCluster/staging",
            user=cls._falcon_user,
            perm=777,
            force=True)
        ruAssert("Falcon", exit_code == 0,
                 '[BGJobSetup] could not create dir on hdfs.')
        exit_code, stdout = HDFS.createDirectory(
            "/apps/falcon/backupCluster/working",
            user=cls._falcon_user,
            perm=755,
            force=True)
        ruAssert("Falcon", exit_code == 0,
                 '[BGJobSetup] could not create dir on hdfs.')

        ## Create cluster entities.
        cls.createClusterEntities("USWestOregon", "oregonHadoopCluster",
                                  "primaryCluster")
        cls.createClusterEntities("USEastVirginia", "virginiaHadoopCluster",
                                  "backupCluster")

        if runSmokeTestSetup:
            logger.info("**** Running Falcon Smoke Test Setup ****")
            cls.smoke_test_setup()

        logger.info("Falcon - completed background job setup")
        return

    @classmethod
    def smoke_test_setup(cls):
        '''
        Setup required to run Smoke test
        '''
        logger.info(
            "Falcon smoke test setup is same as background job setup. Nothing to do"
        )
        return

    @classmethod
    def run_background_job(cls, runSmokeTestSetup=True, config=None):
        '''
        Runs background long running Yarn Job
        :param runSmokeTestSetup: Runs smoke test setup if set to true
        :param config: expected configuration location
        :return: Total number of long running jobs started
        '''
        logger.info("Falcon - start running background job")

        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[PASSED][Falcon][BGJob] starting Falcon background jobs")

        BACKGROUND_JOB_DIR = os.path.join(cls._local_workspace, "tests",
                                          "rolling_upgrade", "falcon",
                                          "falconChurnDemo")

        # Add clusters
        clusterXml = os.path.join(cls._local_workspace, "primaryCluster.xml")
        logger.info("Content of cluster xml %s" % clusterXml)
        f = open(clusterXml, 'r')
        logger.info(f.read())
        f.close()
        CMD = " entity -type cluster -submit -file " + clusterXml
        cls.runCommand(cls._job_user, CMD)
        clusterXml = os.path.join(cls._local_workspace, "backupCluster.xml")
        CMD = " entity -type cluster -submit -file " + clusterXml
        cls.runCommand(cls._job_user, CMD)

        # Add feed
        feedXml = os.path.join(BACKGROUND_JOB_DIR, "rawEmailFeed.xml")
        logger.info("Content of feed xml %s" % feedXml)
        f = open(feedXml, 'r')
        logger.info(f.read())
        f.close()
        CMD = " entity -type feed -submit -file " + feedXml
        cls.runCommand(cls._job_user, CMD)

        # Add Process
        processXml = os.path.join(BACKGROUND_JOB_DIR, "emailIngestProcess.xml")
        logger.info("Content of process xml %s" % processXml)
        f = open(processXml, 'r')
        logger.info(f.read())
        f.close()
        CMD = " entity -type process -submit -file " + processXml
        cls.runCommand(cls._job_user, CMD)

        # Schedule Feed and Process
        CMD = " entity -type feed -schedule -name rawEmailFeed"
        cls.runCommand(cls._job_user, CMD)
        CMD = " entity -type process -schedule -name rawEmailIngestProcess"
        cls.runCommand(cls._job_user, CMD)

        cls._job_start_time = datetime.datetime.utcnow().strftime(
            "%Y-%m-%dT%H:%MZ")
        logger.info("Falcon - completed running background job")

        return 1

    @classmethod
    def run_smoke_test(cls, smoketestnumber, config=None):
        '''
        Run smoke test for yarn
        :param smoketestnumber: Used for unique output log location
        '''
        logger.info("Falcon - start running smoke test")
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][Falcon][Smoke] starting Falcon smoke test")

        cls.runCommand(cls._falcon_user, "admin -version")
        # QE-4347: Do not run the list call as smoke is run before long running job is called.
        # cls.runCommand(cls._job_user,
        #                   " instance -type process -name rawEmailIngestProcess -list ")
        logger.info("Falcon - finished running smoke test")
        UpgradePerNode.reportProgress(
            "[INFO][Falcon][Smoke] finishing Falcon smoke test")
        return

    @classmethod
    def stopFalconLongRunningJob(cls):
        logger.info("Falcon - Begin function stopFalconLongRunningJob")
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][Falcon][BGJobStop]Stopped/deleting Falcon background jobs started"
        )

        CMD = " instance -type process -name rawEmailIngestProcess " \
              " -list -start 2016-07-25T00:00Z -end 2018-07-25T00:00Z " \
              " -filterBy STARTEDAFTER:" + cls._job_start_time + " -numResults 1000"
        exitcode, output = cls.runCommand(cls._job_user, CMD)

        success_count = output.count("SUCCEEDED")
        fail_count = output.count("KILLED")

        ruAssert(
            "Falcon", success_count > 0 and fail_count == 0,
            "[BGJobCheck] Long running job failed with " + str(fail_count) +
            " FAILED instances")
        try:
            from beaver.component.falcon import Falcon
        except ImportError:
            ## Import fails when Falcon is not installed on this machine. Nothing to do
            return

        # Delete entities
        cls.runCommand(
            cls._job_user,
            " entity -type process -delete -name rawEmailIngestProcess")
        cls.runCommand(cls._job_user,
                       " entity -type feed -delete -name rawEmailFeed")
        cls.runCommand(cls._job_user,
                       " entity -type cluster -delete -name primaryCluster")
        cls.runCommand(cls._job_user,
                       " entity -type cluster -delete -name backupCluster")

        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][Falcon][BGJobStop]Stopped/deleting Falcon background jobs")
        logger.info("Falcon - End function stopFalconLongRunningJob")

        return

    @classmethod
    def background_job_teardown(cls):
        '''
        Cleanup for long running Falcon job
        '''
        logger.info(
            "Tear down happened when long running job is stopped. Nothing to do"
        )

        return

    @classmethod
    def falcon_teardown(cls):
        '''
        Clean up entities from Falcon
        '''
        # Delete entities
        logger.info("Falcon - Begin function falcon_teardown")
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][Falcon][BGJobTeardown] Falcon background jobs cleanup started"
        )
        cls.runCommand(
            cls._job_user,
            " entity -type process -delete -name rawEmailIngestProcess",
            ignoreError=True)
        cls.runCommand(cls._job_user,
                       " entity -type feed -delete -name rawEmailFeed",
                       ignoreError=True)
        cls.runCommand(cls._job_user,
                       " entity -type cluster -delete -name primaryCluster",
                       ignoreError=True)
        cls.runCommand(cls._job_user,
                       " entity -type cluster -delete -name backupCluster",
                       ignoreError=True)

        logger.info("Falcon - End function falcon_teardown")

    @classmethod
    def verifyLongRunningJob(cls):
        '''
        Validate long running background job after end of all component upgrade
        '''
        logger.info("Verified when stopping Long Running Job ")
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][Falcon][BGJobCheck] Verify Falcon Background Application")
        cls.runCommand(cls._falcon_user, "admin -version")

    @classmethod
    def background_job_when_master_upgrade(cls):
        '''
        Start a background application which runs while component master service gets upgraded
        :return:
        '''
        logger.info("TODO - short backgroundjob when master upgrade/downgrade")

    @classmethod
    def background_job_teardown_when_master_upgrade(cls):
        '''
        Clean up for background job  which started before upgrading master services
        :return:
        '''
        logger.info(
            "TODO - short backgroundjob teardown when master upgrade/downgrade"
        )

    @classmethod
    def verify_background_job_when_master_upgrade(cls):
        '''
        Validate background job Succeeded when master got upgraded
        :return:
        '''
        logger.info(
            "Falcon - Begin function verify_background_job_when_master_upgrade"
        )
        cls.verifyLongRunningJob()

    @classmethod
    def upgrade_master(cls, version, config=None):
        '''
        Upgrades Master services:
        :param version: Version to be upgraded to
        :param config: Config location
        '''
        logger.info("Falcon - Begin function upgrade_master")
        cls.background_job_when_master_upgrade()
        cls.change_master_version(version, config)
        cls.verify_background_job_when_master_upgrade()
        cls.background_job_teardown_when_master_upgrade()

    @classmethod
    def upgrade_slave(cls, version, node, config=None):
        '''
        Upgrades slave services :
        :param version: Version to be upgraded to
        :param node: Slave Node
        :param config: Config location
        :return:
        '''
        logger.info("Falcon has only a master, No slave to upgrade.")

    @classmethod
    def downgrade_master(cls, version, config=None):
        '''
        Exactly same as upgrade master. There is no difference in steps.
        :param version: Version to be downgraded to
        :param config: Config location
        :return:
        '''
        logger.info("Falcon - Begin function downgrade_master ")
        cls.change_master_version(version, config, logText="Downgrade")

    @classmethod
    def change_master_version(cls, version, config=None, logText="Upgrade"):
        '''
        Changes Master services:
        :param version: Version to be changed to
        :param config: Config location
        '''
        logger.info("Falcon - Begin function change_master_version ")

        from beaver.component.rollingupgrade.ruCommon import hdpSelect
        from time import sleep
        try:
            from beaver.component.falcon import Falcon
        except ImportError:
            ## Import fails when Falcon is not installed on this machine. Nothing to do
            return

        node = Falcon.get_falcon_server()
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][Falcon][%s] Falcon server for node %s to version %s started"
            % (logText, node, version))

        Falcon.stop(cls._falcon_user, node)
        sleep(30)
        hdpSelect.changeVersion("falcon-server", version, node)
        hdpSelect.changeVersion("falcon-client", version, node)

        Falcon.start(cls._falcon_user, node)
        sleep(60)
        UpgradePerNode.reportProgress(
            "[INFO][Falcon][%s] Falcon server for node %s to version %s finished"
            % (logText, node, version))
        return

    @classmethod
    def downgrade_slave(cls, version, node, config=None):
        '''
        Downgrade slave services
        :param version: version to be downgraded to
        :param config: Configuration location
        '''
        logger.info("Falcon runs in standalone mode. Nothing to do.")

    @classmethod
    def run_client_smoketest(cls, config=None, env=None):
        '''
        Run Smoke test after upgrading Client
        :param config: Configuration location
        :param env: Set Environment variables
        '''
        logger.info("Falcon - Begin function run_client_smoketest ")
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][Falcon][Smoke] starting Falcon client smoke test")
        cls.run_smoke_test("123", config=None)

    @classmethod
    def testAfterAllSlavesRestarted(cls):
        '''
        Function to test upgrade is done properly after all master and slaves are upgraded for Hdfs, yarn and Hbase
        :return:
        '''
        logger.info("Falcon - Begin function testAfterAllSlavesRestarted ")
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][Falcon][AFTER SLAVE PASSED] starting Falcon smoke test")
        cls.runCommand(cls._falcon_user, "admin -version")
        cls.runCommand(
            cls._job_user,
            " instance -type process -name rawEmailIngestProcess -list -orderBy startTime"
        )

    @classmethod
    def runCommand(cls, user, cmd, ignoreError=True):
        """
        Function to run falcon commands cmd as a certain user
        :param user:
        :param cmd:
        :return:
        """
        try:
            from beaver.component.falcon import Falcon
        except ImportError:
            ## Import fails when Falcon is not installed on this machine. Nothing to do
            return 0, ""

        exit_code, output = Falcon.runas(user, cmd)
        if ignoreError:
            if exit_code != 0:
                #logger.info("Warning (ignoreError=True): Non-zero exit code when running command " + cmd + " as user " + user)
                from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
                UpgradePerNode.reportProgress(
                    "[FAILED][Falcon] Warning (ignoreError=True): Non-zero exit code when running command %s as user %s "
                    % (cmd, user))
        else:
            ruAssert(
                "Falcon", exit_code == 0,
                "[RunCommand] Could not run command " + cmd + " as user " +
                user)
        return exit_code, output

    @classmethod
    def createClusterEntities(cls, colo, desc, name):
        try:
            from beaver.component.falcon import Falcon
        except ImportError:
            ## Import fails when Falcon is not installed on this machine. Nothing to do
            return

        from beaver.component.hadoop import Hadoop, HDFS, YARN
        write_endpoint = Hadoop.getFSDefaultValue()
        webhdfs_scheme = 'webhdfs'
        if HDFS.isHttpsEnabled():
            webhdfs_scheme = 'swebhdfs'
        read_endpoint = '%s://%s:%s' % (
            webhdfs_scheme, write_endpoint.split('/')[2].split(':')[0],
            HDFS.getNNWebPort())
        execute_endpoint = YARN.getResourceManager()
        falconNode = Falcon.get_falcon_server()

        from beaver.component.oozie import Oozie
        oozieUrl = Oozie.getOozieUrl()
        entityText = "<?xml version=\"1.0\"?>" \
                     "<cluster colo=\"" + colo + "\" description=\"" + desc + "\" name=\"" + name + "\" " \
                     "xmlns=\"uri:falcon:cluster:0.1\"> " \
                        "<interfaces> " \
                            "<interface type=\"readonly\" endpoint=\""+read_endpoint+"\" version=\"0.20.2\"/> " \
                            "<interface type=\"write\" endpoint=\""+write_endpoint+"\" version=\"0.20.2\"/> " \
                            "<interface type=\"execute\" endpoint=\"" + execute_endpoint + "\" version=\"0.20.2\"/> " \
                            "<interface type=\"workflow\" endpoint=\"" + oozieUrl + "\" version=\"3.1\"/>" \
                            "<interface type=\"messaging\" endpoint=\"" \
                                "tcp://" + falconNode + ":61616?daemon=true\" version=\"5.1.6\"/>" \
                        "</interfaces>" \
                        "<locations>" \
                            "<location name=\"staging\" path=\"/apps/falcon/" + name + "/staging\" />" \
                            "<location name=\"temp\" path=\"/tmp\" />" \
                            "<location name=\"working\" path=\"/apps/falcon/" + name + "/working\" />" \
                        "</locations>" \
                        "<ACL owner=\"" + cls._job_user + "\" group=\"users\" permission=\"0755\"/>"
        if Hadoop.isSecure():
            realm = HDFS.getConfigValue(
                'dfs.namenode.kerberos.principal').split('@')[1]
            entityText += "<properties> <property name=\"dfs.namenode.kerberos.principal\" value=\"nn/_HOST@" + realm + "\"/> </properties>"
        entityText += "</cluster>"
        textFile = open(os.path.join(cls._local_workspace, name + ".xml"), "w")
        textFile.write("%s" % entityText)
        textFile.close()

        return
 def getHadoopConfDir(cls):
     return Config.get('hadoop', 'HADOOP_CONF')
Ejemplo n.º 20
0
    def run_client_smoketest(cls, config=None, env=None):
        '''
        Run Smoke test after upgrading Client
        :param config: Configuration location
        :param env: Set Environment variables
        '''
        from beaver.component.hive import Hive
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        UpgradePerNode.reportProgress(
            "[INFO][Hive][Smoke] Smoke test for Hive component started")

        setqueue = ""
        if Hive.isTezEnabled():
            setqueue = "set tez.queue.name=%s; " % cls._yarn_queue
        else:
            setqueue = "set mapred.job.queue.name=%s; " % cls._yarn_queue

        logger.info("**** Running Hive CLI Test ****")
        query = setqueue + " insert overwrite table %s_hive_verify select userid, avg(age) from %s group by userid order by userid; " % (
            cls._smoketest_tbl, cls._smoketest_tbl)
        query += "select count(*) from %s_hive_verify;" % cls._smoketest_tbl
        exit_code, stdout, stderr = Hive.runQuery(query,
                                                  stderr_as_stdout=False)
        if exit_code != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][Smoke] Smoke test for Hive Metastore failed with exit code '%d'"
                % exit_code)
            logger.error(
                "Smoke test for Hive failed with the following error: " +
                stderr)
        elif stdout.find("%d" % cls._num_of_rows_smoke) == -1:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][Smoke] Smoke test for Hive Metastore failed to verify number of rows in output"
            )
            logger.error(
                "Smoke test for Hive failed to find [%d] in output [%s]" %
                (cls._num_of_rows_smoke, stdout))
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][Hive][Smoke] Smoke test for Hive Metastore succeeded"
            )
            logger.info("Smoke test for Hive Metastore succeeded")

        logger.info("**** Running Beeline CLI Test ****")
        query = setqueue + "\ndrop table if exists %s_bline_verify;\n" % cls._smoketest_tbl
        query += "create table %s_bline_verify (userid string, age int);\n" % cls._smoketest_tbl
        query += "insert overwrite table %s_bline_verify select userid, avg(age) from %s group by userid order by userid;\n" % (
            cls._smoketest_tbl, cls._smoketest_tbl)
        query += "select count(*) from %s_bline_verify;\n" % cls._smoketest_tbl
        exit_code, stdout, stderr = Hive.runQueryOnBeeline(query,
                                                           readFromFile=True)
        if exit_code != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][Smoke] Smoke test for HiveServer2 failed with exit code '%d'"
                % exit_code)
            logger.error(
                "Smoke test for HiveServer2 failed with the following error: "
                + stderr)
        elif stdout.find("%d" % cls._num_of_rows_smoke) == -1:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][Smoke] Smoke test for HiveServer2 failed to verify number of rows in output"
            )
            logger.error(
                "Smoke test for HiveServer2 failed to find [%d] in output [%s]"
                % (cls._num_of_rows_smoke, stdout))
        else:
            logger.info("Smoke test for HiveServer2 succeeded")

        logger.info("**** Running WebHCat Smoke Test ****")
        query = "show tables;"
        webhcatHost = Config.get('templeton',
                                 'TEMPLETON_HOST',
                                 default=Machine.getfqdn())
        webhcatPort = Config.get('templeton',
                                 'TEMPLETON_PORT',
                                 default="50111")
        url = "http://%s:%s/templeton/v1/ddl" % (webhcatHost, webhcatPort)
        params = {'exec': query}
        status_code, stdout = util.curl(url, method='POST', params=params)
        if status_code != 200:
            UpgradePerNode.reportProgress(
                "[FAILED][Hive][Smoke] Smoke test for WebHCat failed due to status code = %d"
                % status_code)
        else:
            logger.info("Smoke test for WebHCat succeeded")

        UpgradePerNode.reportProgress(
            "[INFO][Hive][Smoke] Smoke test for Hive component finished")
 def getMapredUser(cls):
     return Config.get('hadoop', 'MAPRED_USER')
Ejemplo n.º 22
0
# company and Hortonworks, Inc, any use, reproduction, modification,
# redistribution or other exploitation of all or any part of the contents
# of this file is strictly prohibited.
#
#

import datetime
import logging
import time

from beaver.component.ambari import Ambari
from beaver.component.xa import Xa, XaPolicy
from beaver.config import Config

logger = logging.getLogger(__name__)
ARTIFACTS_DIR = Config.getEnv('ARTIFACTS_DIR')
_ambari_host_1 = Config.get("multicluster", "AMBARI_GATEWAY1")
_ambari_host_2 = Config.get("multicluster", "AMBARI_GATEWAY2")
source_weburl = Ambari.getWebUrl(hostname=_ambari_host_1)
target_weburl = Ambari.getWebUrl(hostname=_ambari_host_2)
source_datacenter = target_datacenter = "default"

primaryCluster = source_datacenter + "$" + Ambari.getClusterName(
    weburl=source_weburl)
backupCluster = target_datacenter + "$" + Ambari.getClusterName(
    weburl=target_weburl)
policy_prefix = 'pol_for_'


class BeaconRanger:
    def __init__(self):
Ejemplo n.º 23
0
 def runAgent(cls, name, conffile, user=None, cwd=None, env=None):
     flume_conf = Config.get('flume-ng', 'FLUME_CONF')
     cmd = "agent -n %s -c %s -f %s" % (name, flume_conf, conffile)
     return cls.runInBackgroundAs(user, cmd, cwd=cwd, env=env)
Ejemplo n.º 24
0
    def createRangerHivePolicy(cls,
                               database,
                               udf=None,
                               table=None,
                               column=None,
                               userList=None,
                               weburl=None):
        BeaconRanger.changePolicyInterval()
        logger.info('creating policy for  %s' % database)
        users = [Config.getEnv('USER')] if userList is None else userList
        if table is not None:
            polItem = XaPolicy.createPolicyItem(
                userList=users, PermissionList=XaPolicy.HIVE_ALL_PERMISSIONS)
            if column is not None:
                policy = XaPolicy.getPolicyJson(policy_prefix + '_' +
                                                database + '_table_' + table +
                                                '_col_' + column,
                                                'hive', [polItem],
                                                ambariWeburl=weburl,
                                                database=database,
                                                table=table,
                                                column=column)
            else:
                policy = XaPolicy.getPolicyJson(policy_prefix + '_' +
                                                database + '_table_' + table,
                                                'hive', [polItem],
                                                ambariWeburl=weburl,
                                                database=database,
                                                table=table,
                                                column='*')
        elif udf is not None:
            polItem = XaPolicy.createPolicyItem(
                userList=users, PermissionList=XaPolicy.HIVE_ALL_PERMISSIONS)
            policy = XaPolicy.getPolicyJson(policy_prefix + '_' + database +
                                            '_udf_' + udf,
                                            'hive', [polItem],
                                            ambariWeburl=weburl,
                                            database=database,
                                            udf=udf)
        else:
            # repladmin check
            polItem = XaPolicy.createPolicyItem(
                userList=users,
                PermissionList=[
                    XaPolicy.PERMISSION_CREATE, XaPolicy.PERMISSION_SELECT,
                    XaPolicy.PERMISSION_DROP, XaPolicy.PERMISSION_REPL_ADMIN
                ])
            policy = XaPolicy.getPolicyJson(policy_prefix + '_' + database,
                                            'hive', [polItem],
                                            ambariWeburl=weburl,
                                            database=database,
                                            table='*',
                                            column='*')
        logger.info('going to create policy: ' + str(policy))
        status_code, response = Xa.createPolicy_api_v2(
            policy,
            policyActivationWaitTime=BeaconRanger.policyActivationWaitTime,
            weburl=weburl)

        if status_code == 200 and weburl != target_weburl:
            BeaconRanger.policiesAddedBeforeTest.append(policy)
Ejemplo n.º 25
0
 def runAdminReport(cls):
     exit_code, output = Hadoop.runas(Config.get('hadoop', 'HDFS_USER'), "dfsadmin -report")
     return output
Ejemplo n.º 26
0
#
#
import os, re, string, time, socket, logging, platform, urllib2, collections, datetime, json
import urllib, sys
from beaver.component.hadoop import Hadoop, HDFS, MAPRED, YARN
from beaver.component import HadoopJobHelper
from beaver.machine import Machine
from beaver.config import Config
from beaver import util
from beaver import configUtils
import pytest
import random

logger = logging.getLogger(__name__)

HADOOPQA_USER = Config.get('hadoop', 'HADOOPQA_USER')

INPUT_CSV_FILE = 'inputCSV.csv'
TEST_FOLDER = os.path.join(Config.getEnv('ARTIFACTS_DIR'),
                           'ru_phoenix_testFolder')
TEST_TABLE = 'testTable'

logger = logging.getLogger(__name__)

HOST = None

PHOENIX_TEST_TABLE = 'basicTable'

EXAMPLE_ROWS = [
    '1,John,Snow,The Wall\n'
    '2,Jaime,Lanninster,Kings Landing\n'
Ejemplo n.º 27
0
 def runas(cls, user, cmd, cwd=None, env=None, logoutput=True):
     flume_cmd = Config.get('flume-ng', 'FLUME_CMD')
     flume_cmd += " " + cmd
     return Machine.runas(user, flume_cmd, cwd=cwd, env=env, logoutput=logoutput)
Ejemplo n.º 28
0
 def run(cls, cmd, logoutput=True):
     return Machine.run(Config.get('hbase', 'HBASE_CMD') + " " + cmd, logoutput=logoutput)
Ejemplo n.º 29
0
 def getDatanodes(cls):
     slaveFile = os.path.join(Config.get('hadoop', 'HADOOP_CONF'),"slaves")
     f = open(slaveFile, "r")
     datanodes = f.readlines()
     return datanodes
 def getVersion(cls):
     return Config.get('hadoop', 'VERSION') 
Ejemplo n.º 31
0
    def perform_express_upgrade(self):
        COMPONENT = str(self.COMPONENT)
        STACK_TYPE = str(self.STACK_TYPE)

        env = {}
        env['DISPLAY'] = self.DISPLAY

        # Update pom.xml of uifrm with Markers - applies for tests that involve a combination of API and UI tests run within the same split
        ambariutils.update_pom_xml_with_markers(self.LOCAL_WORK_DIR)

        SRC_DIR = os.path.join(Config.getEnv('WORKSPACE'), 'apitestframework')
        print "SRC_DIR = ", SRC_DIR

        # Change the current directory for api test code
        LOCAL_WORK_DIR = os.path.join(Config.getEnv('ARTIFACTS_DIR'), COMPONENT, 'apitestframework')

        # Copy the ambari api test code to artifacts dir
        shutil.copytree(SRC_DIR, LOCAL_WORK_DIR)
        # Change the permission of openstack-keypairfile
        os.chmod(os.path.join(LOCAL_WORK_DIR, 'src/resources/openstack-keypair'), 0400)

        # populate the config properties file for api test code
        configPropChanges = ambariutils.updateProperties()

        # Change the current directory for api test code
        LOCAL_WORK_DIR = os.path.join(
            Config.getEnv('ARTIFACTS_DIR'), COMPONENT, 'apitestframework', 'src', 'resources'
        )
        util.writePropertiesToFile(
            os.path.join(LOCAL_WORK_DIR, 'config.properties'), os.path.join(LOCAL_WORK_DIR, 'config.properties'),
            configPropChanges
        )

        # Change the current directory for api test code
        LOCAL_WORK_DIR = os.path.join(Config.getEnv('ARTIFACTS_DIR'), COMPONENT, 'apitestframework')

        testSuiteLocation = "src/test/suites"

        # Check what is the STACK_TYPE. Based on the stack type we will decide which test suite to select
        logger.info(
            "STACK TYPE is " + STACK_TYPE + ". So going into " + STACK_TYPE + " if block for test suite "
            "selection"
        )

        if STACK_TYPE == "HDF":
            # Setup for HDF EU. We can now add an else method to handle ru
            if 'ambarieu-hdf-downgrade' in COMPONENT:
                testSuiteFile = "ambarieu-hdf-downgrade.suite"
            elif 'ambarieu-hdf' in COMPONENT:
                testSuiteFile = "ambarieu-hdf.suite"
            logger.info("Setting HDF testsuite file to " + testSuiteFile)
        elif STACK_TYPE == "HDP" and "ambarieu-hdp-hdf" in COMPONENT:
            testSuiteFile = "ambarieu-hdp-hdf.suite"
        else:
            if 'e2e' not in COMPONENT:
                # These are system tests so use suite filename sent from json file
                testSuiteFile = COMPONENT + ".suite"
            else:
                if 'dwngd' in COMPONENT:  # For EU E2E downgrade runs
                    testSuiteFile = "ambari-expressupgrade-downgrade-e2e.suite"
                elif 'mlevel' in COMPONENT:  # For EU E2E multi-level paths runs
                    testSuiteFile = "ambari-expressupgrade-mlevel-e2e.suite"
                elif 'denygpl' in COMPONENT:  # For Deny GPL tests in EU E2E
                    testSuiteFile = "ambarieu-denygpl.suite"
                elif 'wkflow' in COMPONENT:  # For EU E2E workflow suite runs
                    testSuiteFile = "ambari-expressupgrade-wkflow-e2e.suite"
                elif 'iopmigration' in COMPONENT:  # Added for IOP migration E2E tests
                    testSuiteFile = "ambari-iopmigration-e2e.suite"
                elif 'iopintg' in COMPONENT:  # Added for IOP integration tests
                    testSuiteFile = "ambarieu-iopintg-e2e.suite"
                elif 'patchupgradeintg-ru' in COMPONENT:  # Added for RU PU integration tests
                    testSuiteFile = "ambarieu-patchupgradeintg-ru-e2e.suite"
                elif 'patchupgradeintg-thirddigit' in COMPONENT:  # Added for EU PU 3rd digit integration tests
                    testSuiteFile = "ambarieu-patchupgradeintg-thirddigit-e2e.suite"
                elif 'patchupgradeintg-revert' in COMPONENT:  # Added for EU PU integration tests with revert
                    testSuiteFile = "ambarieu-patchupgradeintg-revert-e2e.suite"
                elif 'patchupgradeintg' in COMPONENT:  # Added for EU PU integration tests
                    testSuiteFile = "ambarieu-patchupgradeintg-e2e.suite"
                elif 'experiment' in COMPONENT:  # Added for full EU integration tests
                    testSuiteFile = "ambari-expressupgrade-experiment-e2e.suite"
                else:  # Default ofr EU E2E runs
                    testSuiteFile = "ambari-expressupgrade-upgrade-e2e.suite"

        logger.info("Opening test suite file : " + testSuiteFile + " for test execution")

        file = open(os.path.join(LOCAL_WORK_DIR, testSuiteLocation, testSuiteFile))
        testSuite = json.load(file)
        file.close()

        # magic word to use as key in suite file
        magic = "split" + self.splitNumStr
        print "magic key is : ", magic

        if 'experiment' in COMPONENT or 'patchupgradeintg' in COMPONENT or 'iopintg' in COMPONENT:
            magic = "split1"
            print "magic key for experiment EU/PU run is : ", magic

        # Update pom.xml for API framework with Markers info
        ambariutils.update_pom_xml_with_markers(LOCAL_WORK_DIR)

        upgrade_test_results = {}
        # Iterate over the list of all test classes in the split and execute them
        logger.info("=====Starting Express Upgrade tests=========")

        if testSuite.has_key(magic):
            for testName in testSuite[magic]:
                if not ambariutils.isTestClassPresent(testName, LOCAL_WORK_DIR):
                    LOCAL_WORK_DIR = ambariutils.switchDirectory(LOCAL_WORK_DIR, COMPONENT)

                logger.info('LOCAL_WORK_DIR %s ', LOCAL_WORK_DIR)
                logger.info('================Running %s with maven===============' % (testName))
                self.LOCAL_WORK_DIR = LOCAL_WORK_DIR
                exit_code, stdout = self.Maven2runas(
                    ' -Dtest=%s -DfailIfNoTests=false test' % testName, cwd=self.LOCAL_WORK_DIR, env=env, user='******'
                )
                UpgradeLogger.reportProgress('================Finished %s ========================' % (testName), True)
                logger.info('Exit code of the test: %s ' % (exit_code))

                if exit_code != 0:
                    upgrade_test_results.update(UpgradeLogger.get_stack_trace(testName, LOCAL_WORK_DIR))
                    UpgradeLogger.reportProgress("Test failure encountered: %s" % (testName), False)
                    # Do not run any further tests if Upgrade itself has failed
                    if self.is_upgrade_executed():
                        if not self.is_stack_upgrade_success():
                            UpgradeLogger.reportProgress('Express Upgrade failed, aborting rest of the tests', False)
                            break
                    else:
                        UpgradeLogger.reportProgress(
                            'Error(s) in steps before starting Upgrade, aborting rest of the tests', False
                        )
                        break
        else:
            print "================Not correct test suite format========================"

        if len(upgrade_test_results) > 0:
            UpgradeLogger.reportProgress("=====List of failed test(s)=====\n", False)
            for key, value in upgrade_test_results.items():
                UpgradeLogger.reportProgress(key, False)

            UpgradeLogger.reportProgress("=====Error details for failed test(s)=====", False)
            for key, value in upgrade_test_results.items():
                UpgradeLogger.reportProgress("Test:%s AND Failure details:\n %s" % (key, value), False)
                UpgradeLogger.reportProgress("=======================================================", True)
        else:
            UpgradeLogger.reportProgress("=====Express Upgrade test(s) completed successfully=========", True)

        # Gather reports for tests executed from apitestframework dir
        LOCAL_WORK_DIR = os.path.join(Config.getEnv('ARTIFACTS_DIR'), COMPONENT, 'apitestframework')

        Machine.runas('root', 'chmod -R 777 ' + LOCAL_WORK_DIR)

        uifrmReportDirectory = os.path.join(LOCAL_WORK_DIR, '..', 'target/surefire-reports')
        if not os.path.exists(uifrmReportDirectory):
            Machine.runas('root', 'mkdir -p ' + uifrmReportDirectory)
            Machine.runas('root', 'chmod -R 777 ' + uifrmReportDirectory)
            logger.info('Created path for reporting')

        Machine.runas('root', 'chmod -R 777 ' + os.path.join(LOCAL_WORK_DIR, '..', 'target'))

        apiReportDirectory = os.path.join(LOCAL_WORK_DIR, 'target', 'surefire-reports', 'junitreports')
        if os.path.exists(apiReportDirectory):
            files = os.listdir(os.path.join(LOCAL_WORK_DIR, 'target', 'surefire-reports', 'junitreports'))
            for file in files:
                shutil.copy(
                    os.path.join(LOCAL_WORK_DIR, 'target', 'surefire-reports', 'junitreports', file),
                    os.path.join(LOCAL_WORK_DIR, '..', 'target', 'surefire-reports')
                )

        # Switch back to uifrm dir for reporting purposes
        LOCAL_WORK_DIR = os.path.join(Config.getEnv('ARTIFACTS_DIR'), COMPONENT)
        self.LOCAL_WORK_DIR = LOCAL_WORK_DIR
Ejemplo n.º 32
0
 def getDruidHome(cls):
     '''
     Returns Druid_HOME
     '''
     return Config.get('druid', 'DRUID_HOME')
Ejemplo n.º 33
0
    def verifySchemaFunctionality(cls):
        '''
          We verify that the system can operate with SCHEMA functionality.
        '''
        from beaver.component.phoenix import Phoenix
        from beaver.component.hbase import HBase
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode

        HBASE_HOME = Config.get('hbase', 'HBASE_HOME')
        HBASE_CONF_DIR = os.path.join(HBASE_HOME, "conf")
        #We verify the schema functionality.
        HBASE_CHANGES = {}
        HBASE_CHANGES['hbase-site.xml'] = {}
        HBASE_CHANGES['hbase-site.xml'][
            'phoenix.schema.isNamespaceMappingEnabled'] = 'true'
        HBASE_CHANGES['hbase-site.xml'][
            'phoenix.schema.mapSystemTablesToNamespace'] = 'true'

        TEST_TABLE_A = 'Table_A'

        SCHEMA_1 = 'SCHEMA_1'

        masterNodes = HBase.getAllMasterNodes()
        regionNodes = HBase.getRegionServers()

        hbase_allnodes = masterNodes + regionNodes
        gateway_node = Machine.getfqdn()
        if gateway_node not in hbase_allnodes:
            hbase_allnodes.append(gateway_node)

        HBase.stopHBaseCluster()

        HBase.modifyConfig(changes=HBASE_CHANGES,
                           nodeSelection={'nodes': hbase_allnodes})

        util.copy_back_to_original_config(HBase.getModifiedConfigPath(),
                                          HBASE_CONF_DIR,
                                          file_list=["hbase-site.xml"],
                                          node_list=hbase_allnodes)

        HBase.startHBaseCluster(HBase.getModifiedConfigPath())

        #We grant permissions to all tables.
        Phoenix.grantPermissionsToSystemTables(schemaFunctionalityEnabled=True)

        #We check that we can still query the original table.
        cls.verifyBasicTable()

        #We check that we can create/query schemas.
        exit_code, stdout = Phoenix.runSQLLineCmds(
            'CREATE SCHEMA IF NOT EXISTS %s;' % SCHEMA_1)
        if exit_code != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][PHOENIX][Smoke] Creation of schema %s failed due to exitcode = %s "
                % (SCHEMA_1, exit_code))
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][PHOENIX][Smoke] Schema creation %s succeeded." %
                (SCHEMA_1))

        #we create tables inside that schema
        primaryKey = {'name': 'ID', 'type': 'BIGINT'}
        columns = [{
            'name': 'FirstName',
            'type': 'VARCHAR(30)'
        }, {
            'name': 'SecondName',
            'type': 'VARCHAR(30)'
        }, {
            'name': 'City',
            'type': 'VARCHAR(30)'
        }]
        exit_code, stdout = Phoenix.createTable(SCHEMA_1 + '.' + TEST_TABLE_A,
                                                primaryKey, columns)
        if exit_code != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][PHOENIX][Smoke] Table creation %s on schema %s failed due to exitcode = %s "
                % (TEST_TABLE_A, SCHEMA_1, exit_code))
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][PHOENIX][Smoke] Table creation %s on schema %s succeeded."
                % (TEST_TABLE_A, SCHEMA_1))

        #We insert some data into the table through upsert.
        for i in range(0, 5):
            exit_code, stdout = Phoenix.runSQLLineCmds(
                'UPSERT INTO %s VALUES (%s, "name_%s","secondName_%s","city_%s");'
                % (SCHEMA_1 + '.' + TEST_TABLE_A, str(i), str(i), str(i),
                   str(i)))
            if exit_code != 0:
                UpgradePerNode.reportProgress(
                    "[FAILED][PHOENIX][Smoke] Table UPSERT %s on schema %s failed due to exitcode = %s "
                    % (TEST_TABLE_A, SCHEMA_1, exit_code))
            else:
                UpgradePerNode.reportProgress(
                    "[PASSED][PHOENIX][Smoke] Table UPSERT %s on schema %s succeeded."
                    % (TEST_TABLE_A, SCHEMA_1))

        #We verify that the data has been correctly inserted
        exit_code, stdout = Phoenix.runSQLLineCmds(
            'SELECT * FROM %s WHERE ID=3;' % (SCHEMA_1 + '.' + TEST_TABLE_A))
        if exit_code != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][PHOENIX][Smoke] Table SELECT %s on schema %s failed due to exitcode = %s "
                % (TEST_TABLE_A, SCHEMA_1, exit_code))
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][PHOENIX][Smoke] Table SELECT %s on schema %s succeeded."
                % (TEST_TABLE_A, SCHEMA_1))

        if stdout.find('name_3') == -1 or stdout.find(
                'secondName_3') == -1 or stdout.find('city_3') == -1:
            UpgradePerNode.reportProgress(
                "[FAILED][PHOENIX][Smoke] Table SELECT %s on schema %s returned the wrong results: %s"
                % (TEST_TABLE_A, SCHEMA_1, stdout))
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][PHOENIX][Smoke] Table SELECT %s on schema %s succeeded."
                % (TEST_TABLE_A, SCHEMA_1))

        #We verify that we can drop the schemas with tables on it.
        exit_code, stdout = Phoenix.runSQLLineCmds('DROP SCHEMA %s;' %
                                                   SCHEMA_1)
        if exit_code != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][PHOENIX][Smoke] Schema drop failed due to exitcode = %s "
                % (exit_code))
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][PHOENIX][Smoke] Schema drop succeeded.")

        #We verify that the schema has been dropped.
        exit_code, stdout = Phoenix.runSQLLineCmds(
            'SELECT TABLE_NAME FROM SYSTEM.CATALOG WHERE SCHEMA = %s' %
            SCHEMA_1,
            outputFormat='xmlattr')
        if exit_code != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][PHOENIX][Smoke] Schema drop failed due to exitcode = %s "
                % (exit_code))
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][PHOENIX][Smoke] Schema drop succeeded.")
        if stdout.find(TEST_TABLE_A) != 0:
            UpgradePerNode.reportProgress(
                "[FAILED][PHOENIX][Smoke] Table %s did not drop on drop schema command "
                % (TEST_TABLE_A))
        else:
            UpgradePerNode.reportProgress(
                "[PASSED][PHOENIX][Smoke] Table %s successfuly dropped." %
                TEST_TABLE_A)
Ejemplo n.º 34
0
# Copyright  (c) 2011-2017, Hortonworks Inc.  All rights reserved.
#
# Except as expressly permitted in a written agreement between your
# company and Hortonworks, Inc, any use, reproduction, modification,
# redistribution, sharing, lending or other exploitation of all or
# any part of the contents of this file is strictly prohibited.
#
#

import logging, requests, json, random, time
from beaver.config import Config
import beaver.component.dataplane.coreApi.DP_CONSTANTS as DP_CONSTANTS
from beaver.component.ambari import Ambari

logger = logging.getLogger(__name__)
DP_IP_ADDR = Config.get("dataplane", "DP_HOST_ADDR")
DP_HTTP_URL = ("https://%s/" % DP_IP_ADDR)
DP_KNOX_URL = (
    "https://%s:8443/gateway/knoxsso/api/v1/websso?originalUrl=https://%s/" %
    (DP_IP_ADDR, DP_IP_ADDR))
TEST_AMBARI_HOST1 = Config.get("multicluster", "AMBARI_GATEWAY1")
TEST_AMBARI_HOST2 = Config.get("multicluster", "AMBARI_GATEWAY2")
TEST_AMBARI_WEB_URL1 = Ambari.getWebUrl(hostname=TEST_AMBARI_HOST1)
TEST_AMBARI_WEB_URL2 = Ambari.getWebUrl(hostname=TEST_AMBARI_HOST2)


class DP_Api(object):
    def __init__(self):
        self.session = requests.session()
        self.session.headers['Content-Type'] = 'application/json'
        self.session.verify = False
Ejemplo n.º 35
0
def getTempFilepath():
    return os.path.join(Config.getEnv('ARTIFACTS_DIR'), 'tmp-%d' % int(999999*random.random()))
Ejemplo n.º 36
0
from taskreporter.taskreporter import TaskReporter

import logging
import os
import time, datetime
from selenium import webdriver

from beaver.machine import Machine
from beaver.config import Config
from beaver.seleniumHDP import Selenium
import beaver.component.hdfsUI.commonUtils as commonUtils
from beaver.component.hdfsUI.knoxLoginPage import KnoxLoginPage

logger = logging.getLogger(__name__)

USER_NAME = Config.get('hadoop', 'HADOOPQA_USER')
PASSWORD = "******"


class BasePage(object):
    KNOX_HOST = Config.get('knox', 'KNOX_HOST').split(',')[0]
    KNOX_PORT = '8443'
    PROTOCOL = "https://"
    TOPOLOGY = "ui"
    if HDFS.isFederated():
        TOPOLOGY = "ui_ns1"
    URL_APPEND = "/gateway/%s/yarnuiv2/" % TOPOLOGY
    PROXY_URL = PROTOCOL + KNOX_HOST + ":" + KNOX_PORT + URL_APPEND

    driver = None
    DEFAULT_TIMEOUT = 10
 def getArtifactsDir(cls):
     return Config.getEnv('ARTIFACTS_DIR')
Ejemplo n.º 38
0
class BasePage(object):
    KNOX_HOST = Config.get('knox', 'KNOX_HOST').split(',')[0]
    KNOX_PORT = '8443'
    PROTOCOL = "https://"
    TOPOLOGY = "ui"
    if HDFS.isFederated():
        TOPOLOGY = "ui_ns1"
    URL_APPEND = "/gateway/%s/yarnuiv2/" % TOPOLOGY
    PROXY_URL = PROTOCOL + KNOX_HOST + ":" + KNOX_PORT + URL_APPEND

    driver = None
    DEFAULT_TIMEOUT = 10
    proxy = 'true'
    __timeout = DEFAULT_TIMEOUT

    def __init__(self, driver=None, proxy='true'):
        self.proxy = proxy
        if driver:
            self.driver = driver
        else:
            self.driver = self.instantiateWebdriver()

            assert self.driver, "Could not initialize selenium webdriver"

    @TaskReporter.report_test()
    def getBaseUrl(self):
        if self.proxy == 'true' and YARN.isKnoxProxySet():
            return self.PROXY_URL
        else:
            return YARN.getRMUrl() + "/ui2/#/cluster-overview"

    def loginToknoxSSO(self):
        loginPage = KnoxLoginPage(self)
        assert loginPage.isLoginPage()

        # Also assert that URL contains SSO Login URL
        currentURL = loginPage.getCurrentURL()
        assert commonUtils.getSSOLoginURL() in currentURL

        # Do Login
        loginPage.doLogin(USER_NAME, PASSWORD)

    def getCurrentURL(self):
        return self.driver.current_url

    @TaskReporter.report_test()
    def checkIfCurrentURLIsProxyURL(self):

        url = self.getCurrentURL()
        if not YARN.isKnoxProxySet() and self.PROXY_URL not in url:
            return True
        if self.PROXY_URL in url:
            return True

        return False

    def deleteSession(self):
        self.quitWebdriver()

    def goToBasePage(self):
        self.driver.get(self.getBaseUrl())

    def navigateToPage(self, url):
        self.driver.get(url)

    def refreshCurrentPage(self):
        self.driver.get(self.driver.current_url)

    def getElement(self, locatorName, returnLocatorName=False):
        return locatorName if returnLocatorName else self.findElement(*self.locator_dictionary[locatorName])

    def getElements(self, locatorName, returnLocatorName=False):
        """
        Returns all the occurences of a matching pattern available on the web page
        """
        return locatorName if returnLocatorName else self.findElements(*self.locator_dictionary[locatorName])

    @TaskReporter.report_test()
    def findElements(self, *loc):
        try:
            return self.driver.find_elements(*loc)
        except Exception as e:
            logger.error("Caught exception: %s" % e)
            return loc

    @TaskReporter.report_test()
    def findElement(self, *loc):
        try:
            return self.driver.find_element(*loc)
        except (NoSuchElementException):
            logger.error("Element not found")
            logger.error(loc)
            return loc

    @TaskReporter.report_test()
    def quitWebdriver(self):
        try:
            Selenium.quitWebDriver(self.driver)
        except Exception, e:
            self.driver = None
            logger.warn("Ignoring webdriver quit failure")
            pass
 def getHadoopQAUser(cls):
     return Config.get('hadoop', 'HADOOPQA_USER')
Ejemplo n.º 40
0
class ruPig:

    _job_user = Config.getEnv("USER")
    _base_hdfs_dir = '/user/%s/ru-pig' % _job_user
    _SmokeInputDir = None
    _queue = 'pig'
    _local_workspace = Config.getEnv("WORKSPACE")
    _pig_script = os.path.join(_local_workspace, 'tests', 'rolling_upgrade',
                               'pig', 'script.pig')
    _golden_src_file = os.path.join(_local_workspace, 'tests',
                                    'rolling_upgrade', 'pig', 'input.txt')
    _artifacts_dir = Config.getEnv('ARTIFACTS_DIR')
    _hdfs_input_dir = _base_hdfs_dir + '/input'
    _hdfs_input_path = _hdfs_input_dir + '/input.txt'
    _hdfs_smoke_input_path = _hdfs_input_dir + '/smoke_input.txt'
    _hdfs_output_dir = _base_hdfs_dir + '/output'
    _hdfs_smoke_output_dir = _base_hdfs_dir + '/smoke_output'
    _hdfs_success_filepath = _hdfs_output_dir + "/_SUCCESS"
    _process = None
    _long_running_job_timeout_secs = 600

    @classmethod
    def background_job_setup(cls, runSmokeTestSetup=True, config=None):
        '''
        Setup for background long running job
        Upload Data to HDFS before Upgrade starts
        Creates /user/hrt_qa/ru-pig dir on HDFS
        Creates and Upload large data file to /user/hrt_qa/ru-pig/input/
        :param runSmokeTestSetup: Runs smoke test setup if set to true
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "### Running Pig BackGround Job Setup ####")
        HDFS.deleteDirectory(cls._base_hdfs_dir)
        exit_code, stdout = HDFS.createDirectory(cls._base_hdfs_dir,
                                                 user=cls._job_user,
                                                 perm=777,
                                                 force=True)
        ruAssert("Pig", exit_code == 0,
                 '[BGJobSetup] could not create dir on hdfs.')

        HDFS.createDirectory(cls._hdfs_input_dir, force=True)
        srcFile = os.path.join(cls._artifacts_dir, 'pig-ru-input.txt')
        if os.path.exists(srcFile):
            os.remove(srcFile)
        tmpFile = os.path.join(cls._artifacts_dir, 'pig-ru-tmp-input.txt')
        if os.path.exists(tmpFile):
            os.remove(tmpFile)
        util.copyFileToAnotherFile(cls._golden_src_file, srcFile)
        util.copyFileToAnotherFile(srcFile, tmpFile)
        itr = 12
        if Machine.isFlubber():
            itr = 16
        for i in range(itr):
            util.copyFileToAnotherFile(srcFile, tmpFile)
            util.copyFileToAnotherFile(tmpFile, srcFile)
        exit_code, stdout = HDFS.copyFromLocal(srcFile, cls._hdfs_input_path)
        ruAssert("Pig", exit_code == 0, '[BGJobSetup] Data Load failed')

        if runSmokeTestSetup:
            cls.smoke_test_setup()

    @classmethod
    def smoke_test_setup(cls):
        '''
        Setup required to run Smoke test
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress("### Running Pig Smoke Test Setup ####")
        exit_code, stdout = HDFS.copyFromLocal(cls._golden_src_file,
                                               cls._hdfs_smoke_input_path)
        ruAssert("Pig", exit_code == 0,
                 '[SmokeSetup] Smoke Test Data Load failed')

    @classmethod
    def run_background_job(cls, runSmokeTestSetup=True, config=None):
        '''
        Runs background long running Pig Job
        :param runSmokeTestSetup: Runs smoke test setup if set to true
        :param config: expected configuration location
        :return: Total number of long running jobs started
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "### Background application for Pig started ####")
        runCmd = '-p \"INPUT_PATH=%s\" -p \"OUTPUT_PATH=%s\" -p \"QUEUE=%s\" %s' % (
            cls._hdfs_input_path, cls._hdfs_output_dir, cls._queue,
            cls._pig_script)
        #If Tez is enabled, add it in runCmd
        TEZ_ENABLED = Hadoop.isTez(True, False)
        if TEZ_ENABLED:
            runCmd = '-x \"tez\" %s' % runCmd
        cls.runCommand(cls._job_user, runCmd, runInBackground=True)
        return 1

    @classmethod
    def run_smoke_test(cls, smoketestnumber, config=None):
        '''
        Run smoke test for Pig
        :param smoketestnumber: Used for unique output log location
        '''
        return cls.run_client_smoketest(config=config)

    @classmethod
    def background_job_teardown(cls):
        '''
        Cleanup for long running Pig job
        '''
        logger.info("TODO")

    @classmethod
    def verifyLongRunningJob(cls):
        '''
        Validate long running background job after end of all component upgrade
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "### Verifying Long running job for Pig ####")
        # Check if the Long Running process is not None
        if cls._process is not None:
            # If process poll() returns None, it means the process has not finished yet.
            if cls._process.poll() is None:
                UpgradePerNode.reportProgress(
                    "### Long running job for Pig has not Finished yet. Waiting for it to complete ####"
                )
                # Wait for _long_running_job_timeout_secs for Job to complete
                starttime = time.time()
                while (starttime - time.time() <
                       cls._long_running_job_timeout_secs
                       ) and cls._process.poll() is None:
                    time.sleep(5)

            exit_code = cls._process.poll()
            if exit_code is None:
                logger.info("Killing Pig Long running job process '%d'" %
                            cls._process.pid)
                Machine.killProcess(cls._process.pid)
                UpgradePerNode.reportProgress(
                    "### [FAILED][Pig][BGJob] Long running job for Pig Failed to finish ####"
                )
            elif exit_code != 0:
                UpgradePerNode.reportProgress(
                    "### [FAILED][Pig][BGJob] Long running job for Pig Failed and Exited with '%d' ####"
                    % exit_code)
            else:
                UpgradePerNode.reportProgress(
                    "### Long running job for Pig Finished ####")

            #Check for _SUCCESS file in HDFS Path
            if HDFS.fileExists(cls._hdfs_success_filepath, cls._job_user):
                UpgradePerNode.reportProgress(
                    "### [PASSED][Pig][BGJob] Found _SUCCESS file in HDFS for Pig Long running job ####"
                )
            else:
                UpgradePerNode.reportProgress(
                    "### [FAILED][Pig][BGJob] Not Found _SUCCESS file in HDFS for Pig Long running job. ####"
                )
        else:
            UpgradePerNode.reportProgress(
                "### [FAILED][Pig][BGJob] Long Running Pig Job Failed. No Process found ####"
            )

    @classmethod
    def upgrade_master(cls, version, config=None):
        '''
        Upgrades Master services:
        :param version: Version to be upgraded to
        :param config: Config location
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "### Pig does not have any master, so no master to upgrade ####")

    @classmethod
    def upgrade_slave(cls, version, node, config=None):
        '''
        Upgrades slave services :
        :param version: Version to be upgraded to
        :param node: Slave Node
        :param config: Config location
        :return:
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "### Pig does not have any slaves, so no slaves to upgrade ####")

    @classmethod
    def downgrade_master(cls, version, config=None):
        '''
        Downgrade Master services
        :param version: Version to be downgraded to
        :param config: Configuration location
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "### Pig does not have any master, so no master to downgrade ####")

    @classmethod
    def downgrade_slave(cls, version, node, config=None):
        '''
        Downgrade slave services
        :param version: version to be downgraded to
        :param config: Configuration location
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "### Pig does not have any slaves, so no slaves to downgrade ####")

    @classmethod
    def run_client_smoketest(cls, config=None, env=None):
        '''
        Run Smoke test after upgrading Client
        :param config: Configuration location
        :param env: Set Environment variables
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress("### Running Pig Smoke test ####")
        runCmd = '-p \"INPUT_PATH=%s\" -p \"OUTPUT_PATH=%s\" -p \"QUEUE=%s\" %s' % (
            cls._hdfs_smoke_input_path, cls._hdfs_smoke_output_dir, cls._queue,
            cls._pig_script)
        #If Tez is enabled, add it in runCmd
        TEZ_ENABLED = Hadoop.isTez(True, False)
        if TEZ_ENABLED:
            runCmd = '-x \"tez\" %s' % runCmd
        cls.runCommand(cls._job_user, runCmd, runInBackground=False)
        UpgradePerNode.reportProgress("### Pig Smoke test Finished ####")

    @classmethod
    def testAfterAllSlavesRestarted(cls):
        '''
        Function to test upgrade is done properly after all master and slaves are upgraded for Hdfs, yarn and Hbase
        :return:
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "### Pig does not have any slaves, so nothing to do here ####")

    @classmethod
    def runCommand(cls, user, cmd, runInBackground=False):
        """
        Function to run pig cmd as a certain user
        :param user:
        :param cmd:
        :return:
        """
        try:
            from beaver.component.pig import Pig
        except ImportError:
            ## Import fails when Pig is not installed on this machine. Nothing to do
            return 0, ""

        if runInBackground:
            cls._process = Pig.runas(user, cmd, runInBackground=True)
        else:
            exit_code, stdout = Pig.runas(user, cmd, runInBackground=False)
            from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
            if exit_code == 0:
                UpgradePerNode.reportProgress(
                    "### [PASSED][Pig][Smoke] Pig Smoke test passed ####")
            else:
                UpgradePerNode.reportProgress(
                    "### [FAILED][Pig][Smoke] Pig Smoke test Failed ####")
 def getHDFSUser(cls):
     return Config.get('hadoop', 'HDFS_USER')
Ejemplo n.º 42
0
class Java2:
    #set the java version
    _version = '1.8.0_112'
    _worksapce = Config.getEnv('WORKSPACE')
    #determine tools path
    _tools_path = os.path.join(_worksapce, 'tools')
    #determine java home
    _java_home = Config.get('machine', 'QA_CODE_JAVA_HOME')
    _jdk_download_url = Config.get('machine', 'JDK8_64_URL')
    _jdk_download_url_7 = Config.get('machine', 'JDK7_80_URL')
    _java_cmd = os.path.join(_java_home, 'bin', 'java')

    #method to setup java
    @classmethod
    def setupJava(cls):
        # if dir exists return as its already setup
        if os.path.isdir(cls._java_home):
            logger.info("JDK already installed, skipping setup of JDK")
            return cls._java_home
        # check if tarball exists or not before downloading it
        tarName = "jdk-8u112-linux-x64.tar.gz"
        tarballPath = os.path.join(cls._tools_path, tarName)
        if not os.path.isfile(tarballPath):
            # download java
            assert util.downloadUrl(cls._jdk_download_url, tarballPath)
        # now install java
        Machine.run('chmod 755 ' + tarballPath)
        Machine.run('cd ' + cls._tools_path + '; tar zxvf ' + tarballPath)
        #Machine.run('echo A | .' + tarballPath + ' -noregister 2>&1')
        #Machine.tarExtractAll(filepath=tarballPath, outpath = cls._tools_path, mode='r:gz')
        print cls._java_home
        assert os.path.isfile(cls._java_cmd)
        return cls._java_home

    @classmethod
    def setupJava7(cls):
        # if dir exists return as its already setup
        # check if tarball exists or not before downloading it
        tarName = "jdk-7u80-linux-x64.tar.gz"
        jdk7_folder_name = "jdk1.7.0_80"
        jdk7_java_home = os.path.join(cls._tools_path, jdk7_folder_name)
        if os.path.isdir(jdk7_java_home):
            logger.info("JDK 7 already installed, skipping setup of JDK 7")
            return jdk7_java_home
        tarballPath = os.path.join(cls._tools_path, tarName)
        if not os.path.isfile(tarballPath):
            # download java
            logger.info("JDK 7 downloading")
            assert util.downloadUrl(cls._jdk_download_url_7, tarballPath)
        # now install java
        Machine.run('chmod 755 ' + tarballPath)
        Machine.run('cd ' + cls._tools_path + '; tar zxvf ' + tarballPath)
        return jdk7_java_home

    @classmethod
    def runJar(cls, jarName, workingDir=os.getcwd()):
        (exit_code, output) = Machine.run(cls._java_cmd + " -jar " + jarName,
                                          cwd=workingDir,
                                          env=None,
                                          logoutput=False)
        return (exit_code, output)
 def getCluster(cls):
     return Config.get('hadoop', 'CLUSTER') 
Ejemplo n.º 44
0
class Maven2:
    # set the maven version
    _version = '3.0.4'
    # determine java home
    _java_home = Java2._java_home
    _worksapce = Config.getEnv('WORKSPACE')
    # determine tools path
    _tools_path = os.path.join(_worksapce, 'tools')
    # determine maven home
    _maven_home = Config.get('machine', 'MAVEN_HOME')
    _maven_cmd = os.path.join(_maven_home, 'bin', 'mvn')
    # what url to download maven from
    _maven_download_url = Config.get('machine', 'MAVEN_URL')

    #method to run maven cmd
    @classmethod
    def run(cls,
            cmd,
            cwd=None,
            env=None,
            mavenOpts=None,
            logoutput=True,
            user=None):
        # make sure maven is setup before its run
        cls.setupMaven()

        # initialize env
        if not env:
            env = {}

        # determine if MAVEN_OPTS need to be set
        if mavenOpts:
            opts = os.environ.get('MAVEN_OPTS')
            if not opts:
                opts = mavenOpts
            else:
                opts = ' '.join([opts, mavenOpts])
            env['MAVEN_OPTS'] = opts

        env['JAVA_HOME'] = cls._java_home
        env['M2_HOME'] = cls._maven_home

        # print the env so we can see what we are setting
        logger.info('Env for mvn cmd')
        logger.info(env)

        maven_cmd = "%s %s" % (cls._maven_cmd, cmd)
        exit_code, stdout = Machine.run(maven_cmd,
                                        cwd=cwd,
                                        env=env,
                                        logoutput=logoutput)
        return exit_code, stdout

    # method to setup maven
    @classmethod
    def setupMaven(cls):
        # if dir exists return as its already setup
        if not cls._maven_home:
            logger.error(
                "MAVEN_HOME parameter not found. It seems like maven is not installed!!"
            )
            sys.exit(-1)
        else:
            logger.info("_maven_home : %s" % cls._maven_home)
            logger.info("Maven already present at %s. Skipping install" %
                        cls._maven_home)

    #Method to update pom.xml file entry with the annotations specified - both inlcude and exclude
    @classmethod
    def setProjectCategories(cls, path, groupsValue, excludedGroupsValue):
        try:
            fh, abs_path = mkstemp()
            with open(abs_path, 'w') as new_file:
                with open(path) as old_file:
                    for line in old_file:
                        new_file.write(
                            line.replace("${testcase.include.groups}",
                                         str(groupsValue)).replace(
                                             "${testcase.exclude.groups}",
                                             str(excludedGroupsValue)))
            close(fh)
            remove(path)
            move(abs_path, path)
        except:
            logger.error(
                "Exception occured during setProjectCategories to update pom.xml"
            )
            logger.error(traceback.format_exc())
Ejemplo n.º 45
0
 def get_java_home(cls, refresh=False):
     if not cls._java_home or refresh:
         zookeeper_env = os.path.join(Config.get('zookeeper', 'ZK_HOME'), "conf", "zookeeper-env.sh")
         cls._java_home = util.getPropertyValueFromFile(zookeeper_env, "JAVA_HOME")
     return cls._java_home
Ejemplo n.º 46
0
from functools import wraps

###############################################################################
logger = logging.getLogger(__name__)

###############################################################################
SETUP_TIME = 60
SHORT_TEST_TIMEOUT = SETUP_TIME + 1 * 60
MEDIUM_TEST_TIMEOUT = SETUP_TIME + 3 * 60
LONG_TEST_TIMEOUT = SETUP_TIME + 9 * 60

CONF = {}
CONF['KNOX_GUEST_USERNAME'] = "******"
CONF['KNOX_GUEST_PASSWORD'] = "******"
CONF['KNOX_PROTO'] = "https"
CONF['KNOX_HOST'] = Config.get('knox', 'KNOX_HOST').split(',')[0]
CONF['AMBARI_HOST'] = Config.get("machine", "GATEWAY")
CONF['KNOX_PORT'] = 8443
CONF['AMBARI_PORT'] = 8080
CONF['KNOX_TOPO'] = "sandbox"
CONF['KNOX_BASE_URL'] = "%s://%s:%s/gateway" % (
    CONF['KNOX_PROTO'], CONF['KNOX_HOST'], CONF['KNOX_PORT'])
CONF['KNOX_TOPO_URL'] = "%s/%s" % (CONF['KNOX_BASE_URL'], CONF['KNOX_TOPO'])
CONF['KNOX_WEBHDFS_URL'] = "%s/%s/webhdfs/v1/" % (CONF['KNOX_BASE_URL'],
                                                  CONF['KNOX_TOPO'])
CONF['DIRECT_WEBHDFS_URL'] = "http://%s/webhdfs/v1/" % (
    HDFS.getNamenodeHttpAddress())
CONF['RANGER_KNOX_POLICY'] = None

#CONF['SRC_DIR'] = os.path.join(Config.getEnv('WORKSPACE'), 'tests', 'knox', 'knox_2')
knox_host = CONF['KNOX_HOST']
Ejemplo n.º 47
0
 def runas(cls, user, cmd, logoutput=True):
     hadoop_cmd = Config.get('hadoop', 'HADOOP_CMD')
     if Config.get('hadoop', 'HADOOP_CONF_EXCLUDE') == 'False':
         hadoop_cmd += " --config " + Config.get('hadoop', 'HADOOP_CONF')
     hadoop_cmd += " " + cmd
     return Machine.runas(user, hadoop_cmd, logoutput=logoutput)
Ejemplo n.º 48
0
def isRangerInstalled():
    if Config.get('xasecure', 'XA_INSTALLED') == 'yes':
        return True
    else:
        return False
Ejemplo n.º 49
0
 def waitForNNOutOfSafemode(cls):
     exit_code, output = Hadoop.runas(Config.get('hadoop', 'HDFS_USER'), "dfsadmin -safemode get")
     while (re.search(".*Safe mode is ON",output) != None):
         exit_code, output = Hadoop.runas(Config.get('hadoop', 'HDFS_USER'), "dfsadmin -safemode get")
         time.sleep(20)
     return True
Ejemplo n.º 50
0
class UpgradeLogger:
    UPGRADE_STATUS_LOG_FILE = os.path.join(Config.getEnv('ARTIFACTS_DIR'), 'upgrade_progress.log')

    # This method returns the name of upgrade log file
    @classmethod
    def get_progress_local_file(cls):
        return cls.UPGRADE_STATUS_LOG_FILE

    # This method writes info/error messages to the log file
    @classmethod
    def reportProgress(cls, message, is_info_message):
        message = message + "\n"
        timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S,%f')[:-3]
        if is_info_message:
            logger.info(message)
            util.writeToFile(
                timestamp + "|INFO|" + message.encode('utf-8'), cls.UPGRADE_STATUS_LOG_FILE, isAppend=True
            )
        else:
            logger.error(message)
            util.writeToFile(
                timestamp + "|ERROR|" + message.encode('utf-8'), cls.UPGRADE_STATUS_LOG_FILE, isAppend=True
            )

    # This method returns last 100 lines of the error text. If number of lines < 100, all lines are returned
    @classmethod
    def get_error_text(cls, error_text):
        error_messages = error_text.split("\n")
        error_lines = "======================================\n"
        max_lines_to_print = 100
        total_lines = error_messages.__len__()

        if total_lines > max_lines_to_print:
            for error_message_index in range(total_lines - max_lines_to_print, total_lines, 1):
                error_lines = error_lines + error_messages[error_message_index].decode('utf-8') + '\n'
        else:
            for error_message_index in range(total_lines):
                error_lines = error_lines + error_messages[error_message_index].decode('utf-8') + '\n'

        error_lines += "\n======================================"

        return error_lines

    # This method returns testname and associated failure message
    @classmethod
    def get_stack_trace(cls, test_name, current_dir):
        apiReportDirectory = os.path.join(current_dir, 'target', 'surefire-reports', 'junitreports')
        uifrmReportDirectory = os.path.join(current_dir, 'target', 'surefire-reports')

        pattern = '*' + UpgradeLogger.get_testclass_name(test_name) + '*.xml'

        testResultFiles = util.findMatchingFiles(
            apiReportDirectory, pattern
        )  # First search for test result xml in api framework dir
        if testResultFiles.__len__() == 0:
            testResultFiles = util.findMatchingFiles(
                uifrmReportDirectory, pattern
            )  # Now search for test result xml in uifrm dir
            Machine.runas(
                'root', 'chmod -R 755 ' + uifrmReportDirectory
            )  # Provide read permissions to everyone in uifrm junit report xml directory
        else:
            Machine.runas(
                'root', 'chmod -R 755 ' + apiReportDirectory
            )  # Provide read permissions to everyone in api framework report xml directory

        testresult = {}
        for resultFile in testResultFiles:
            testresult.update(util.parseJUnitXMLResult(resultFile))

        testOutput = {}

        for key, value in testresult.items():
            m = re.search("([^\.]*)$", key)  # Extract test name
            print "key : %s " % key
            print "value : %s " % value
            key = m.group(0)
            fail_message = value['failure']
            print "Final key: %s" % key
            print "Final fail_msg: %s" % fail_message

            if fail_message:
                testOutput[str(key)] = fail_message

        print testOutput

        return testOutput

    @classmethod
    def get_testclass_name(cls, test_name):
        if "#" in test_name:
            return test_name.split("#")[0]
        else:
            return test_name
Ejemplo n.º 51
0
 def refreshDatanodes(cls):
     exit_code, output = Hadoop.runas(Config.get('hadoop', 'HDFS_USER'), "dfsadmin -refreshNodes")
     return output
Ejemplo n.º 52
0
class Java(object):
    _JAVA_HOME = Config.get('machine', 'JAVA_HOME')
    _JAVA_CMD = os.path.join(_JAVA_HOME, 'bin', 'java')
    _JAVAC_CMD = os.path.join(_JAVA_HOME, 'bin', 'javac')
    _KEYTOOL_CMD = os.path.join(_JAVA_HOME, 'bin', 'keytool')
    _BEAVER_JAVA_BASE_DIR = os.path.join(Config.getEnv('WORKSPACE'), 'beaver',
                                         'java')
    _BEAVER_JAVA_COMMON_SRC_DIR = os.path.join(_BEAVER_JAVA_BASE_DIR, 'common',
                                               'src')
    _BEAVER_JAVA_TEZ_EXAMPLES_DIR = os.path.join(_BEAVER_JAVA_BASE_DIR, 'tez')
    _QE_UTILS_SRC_DIR = os.path.join(_BEAVER_JAVA_COMMON_SRC_DIR, 'com',
                                     'hortonworks', 'qe', 'utils')

    def __init__(self):
        pass

    @classmethod
    def getBeaverJavaCommonSrcDir(cls):
        return cls._BEAVER_JAVA_COMMON_SRC_DIR

    @classmethod
    def getBeaverJavaTezExamplesDir(cls):
        '''
        Returns WORKSPACE/beaver/java/tez
        '''
        return cls._BEAVER_JAVA_TEZ_EXAMPLES_DIR

    @classmethod
    def getQEUtilsSrcDir(cls):
        return cls._QE_UTILS_SRC_DIR

    @classmethod
    def runJavac(cls, sourceFiles, targetDir, classPath=None):
        '''
        Runs javac to compile source files with current user. Class files will be in targetDir.
        Returns (exit_code, stdout)
        '''
        if classPath is None:
            classPathStr = ""
        else:
            classPathStr = " -classpath " + classPath
        Machine.makedirs(user=None, host=None, filepath=targetDir, passwd=None)
        (exit_code,
         output) = Machine.run(cls._JAVAC_CMD + classPathStr + " -d %s %s" %
                               (targetDir, sourceFiles))
        return (exit_code, output)

    @classmethod
    def runJava(cls, workingDir, fqClassName, classPath=None, cmdArgs=None):
        if not cmdArgs:
            cmdArgs = []

        if classPath is None:
            classPathStr = ""
        else:
            classPathStr = " -classpath " + classPath
        cmdArgsStr = ''
        for cmdArg in cmdArgs:
            cmdArgsStr += ' %s' % cmdArg
        (exit_code, output) = Machine.run(cls._JAVA_CMD + classPathStr +
                                          " %s %s" % (fqClassName, cmdArgsStr),
                                          cwd=workingDir)
        return (exit_code, output)

    @classmethod
    def runKeyTool(cls, command, options=None):
        if not options:
            options = {}

        cmd = " -%s" % command
        for key, value in options.items():
            cmd += " %s %s" % (key, value)
        (exit_code, output) = Machine.run(cls._KEYTOOL_CMD + cmd)
        return (exit_code, output)

    @classmethod
    def runOpenSslTool(cls, command, options=None):
        if not options:
            options = {}

        cmd = " %s" % command
        for key, value in options.items():
            cmd += " %s %s" % (key, value)
        (exit_code, output) = Machine.run('openssl' + cmd)
        return (exit_code, output)

    @classmethod
    def createKeyStores(cls, storedir, hosts=None, certify_key=False):
        if not hosts:
            hosts = ['localhost']
        if certify_key:
            certFile = os.path.join(storedir, 'ca-cert')
            keyFile = os.path.join(storedir, 'ca-key')
            exit_code, _output = cls.runOpenSslTool(
                "req", {
                    "-nodes":
                    "",
                    "-new":
                    "",
                    "-newkey":
                    "rsa:2048",
                    "-x509":
                    "",
                    "-keyout":
                    keyFile,
                    "-out":
                    certFile,
                    "-days":
                    365,
                    "-subj":
                    "'/CN=myclusterxxx.hwx.site/OU=HWX/O=QE/L=Santa Clara/ST=CA/C=US'"
                })
            assert exit_code == 0, "Failed to generate CA"

            clientTruststoreFile = os.path.join(storedir, "truststore.jks")
            exit_code, _output = cls.runKeyTool(
                "import", {
                    "-keystore": clientTruststoreFile,
                    "-alias": "CARoot",
                    "-file": certFile,
                    "-storepass": "******",
                    "-noprompt": ""
                })
            assert exit_code == 0, "Failed to add CA to clients trustore"

            serverTruststoreFile = os.path.join(storedir,
                                                "server.truststore.jks")
            exit_code, _output = cls.runKeyTool(
                "import", {
                    "-keystore": serverTruststoreFile,
                    "-alias": "CARoot",
                    "-file": certFile,
                    "-storepass": "******",
                    "-noprompt": ""
                })
            assert exit_code == 0, "Failed to add CA to servers trustore"

        for host in hosts:
            keystoreFile = os.path.join(storedir, "keystore_%s.jks" % host)
            exit_code, _output = cls.runKeyTool(
                "genkey", {
                    "-alias":
                    "example.com_%s" % host,
                    "-keyalg":
                    "RSA",
                    "-storetype":
                    "JKS",
                    "-keysize":
                    "2048",
                    "-storepass":
                    "******",
                    "-keypass":
                    "******",
                    "-keystore":
                    keystoreFile,
                    "-dname":
                    "\"CN=%s, OU=HWX, O=QE, L=Santa Clara, ST=CA, C=US\"" %
                    host
                })
            assert exit_code == 0, "Failed to generate key"
            if certify_key:
                certReqFile = os.path.join(storedir, '%s-cert-file' % host)
                exit_code, _output = cls.runKeyTool(
                    "certreq", {
                        "-keystore": keystoreFile,
                        "-alias": "example.com_%s" % host,
                        "-file": certReqFile,
                        "-storepass": "******",
                        "-keypass": "******"
                    })
                assert exit_code == 0, "Failed to generate certificate sign request"

                certOutFile = os.path.join(storedir, '%s-cert-signed' % host)
                exit_code, _output = cls.runOpenSslTool(
                    "x509", {
                        "-req": "",
                        "-CA": certFile,
                        "-CAkey": keyFile,
                        "-in": certReqFile,
                        "-out": certOutFile,
                        "-days": 10,
                        "-CAcreateserial": "",
                        "-passin": "pass:password"
                    })

                exit_code, _output = cls.runKeyTool(
                    "import", {
                        "-keystore": keystoreFile,
                        "-alias": "CARoot",
                        "-file": certFile,
                        "-storepass": "******",
                        "-noprompt": ""
                    })
                assert exit_code == 0, "Failed to import CA"

                exit_code, _output = cls.runKeyTool(
                    "import", {
                        "-keystore": keystoreFile,
                        "-alias": "example.com_%s" % host,
                        "-file": certOutFile,
                        "-storepass": "******",
                        "-noprompt": ""
                    })
                assert exit_code == 0, "Failed to import certificate"
            else:
                certFile = os.path.join(storedir, "%s.crt" % host)
                exit_code, _output = cls.runKeyTool(
                    "export", {
                        "-alias": "example.com_%s" % host,
                        "-storepass": "******",
                        "-keystore": keystoreFile,
                        "-file": certFile
                    })
                assert exit_code == 0, "Failed to export certificate"
                truststoreFile = os.path.join(storedir, "truststore.jks")
                exit_code, _output = cls.runKeyTool(
                    "import", {
                        "-trustcacerts": "",
                        "-alias": "example.com_%s" % host,
                        "-noprompt": "",
                        "-storepass": "******",
                        "-keystore": truststoreFile,
                        "-file": certFile
                    })
                assert exit_code == 0, "Failed to import certificate"
Ejemplo n.º 53
0
 def runInBackgroundAs(cls, user, cmd):
     hadoop_cmd = Config.get('hadoop', 'HADOOP_CMD')
     if Config.get('hadoop', 'HADOOP_CONF_EXCLUDE') == 'False':
         hadoop_cmd += " --config " + Config.get('hadoop', 'HADOOP_CONF')
     hadoop_cmd += " " + cmd
     return Machine.runinbackgroundAs(user, hadoop_cmd)
Ejemplo n.º 54
0
def updateProperties():
    if CommonLib.is_ambari_security_enabled(Config.get('ambari', 'AMBARI_SERVER_HTTPS')
                                            ) or CommonLib.is_ambari_security_enabled(Config.get('ambari',
                                                                                                 'WIRE_ENCRYPTION')):
        port = 8443
        https = "yes"
    else:
        port = 8080
        https = "no"

    DEFAULT_TEST_TIMEOUT = 45  # default value of 45 minutes, will extend this later to allow as an input through ADDITIONAL_AMBARI_PROPS if user wants to

    #populate the config properties file for api test code
    configPropChanges = {}
    configPropChanges['HOST'] = Config.get('ambari', 'HOST')
    configPropChanges['PORT'] = port

    # Below check is only done for API framework as part of post Upgrade, hence a safe assumption to use STACKNAME as HDP (since the calls are made after upgrade from BigInsights to HDP)
    if 'BigInsights'.lower() in Config.get('ambari', 'STACK_TYPE').lower():
        logger.info("STACK_TYPE = BigInsights, changing to HDP for post upgrade operations")
        configPropChanges['STACKNAME'] = 'HDP'
    else:
        configPropChanges['STACKNAME'] = Config.get('ambari', 'STACK_TYPE')

    configPropChanges['STACKVERSION_TO_UPGRADE'] = Config.get('ambari', 'STACK_UPGRADE_TO')
    configPropChanges['BUILDNUMBER_TO_UPGRADE'] = Config.get('ambari', 'STACK_UPGRADE_TO')
    configPropChanges['MOTD_ENABLE'] = Config.get('ambari', 'MOTD_ENABLE')
    configPropChanges['AMBARI_DB'] = Config.get('ambari', 'AMBARI_DB')
    configPropChanges['AMBARI_SERVER_HTTPS'] = https

    configPropChanges['SECURITY'] = Config.get('machine', 'IS_SECURE')
    configPropChanges['KERBEROS_SERVER_TYPE'] = Config.get('ambari', 'KERBEROS_SERVER_TYPE')
    configPropChanges['REALM'] = Config.get('ambari', 'REALM')
    configPropChanges['USER_KERBEROS_SERVER_TYPE'] = Config.get('ambari', 'USER_KERBEROS_SERVER_TYPE')
    configPropChanges['AD_SERVER_HOST'] = Config.get('ambari', 'AD_SERVER_HOST')
    configPropChanges['USER_REALM'] = Config.get('ambari', 'USER_REALM')
    configPropChanges['CUSTOMIZED_SERVICES_USERS'] = Config.get('ambari', 'CUSTOMIZED_SERVICES_USERS')
    configPropChanges['AMBARI_2WAY_SSL'] = Config.get('ambari', 'AMBARI_2WAY_SSL')
    configPropChanges['WIRE_ENCRYPTION'] = Config.get('ambari', 'WIRE_ENCRYPTION')
    configPropChanges['DEFAULT_TESTCASE_TIMEOUT'] = DEFAULT_TEST_TIMEOUT
    configPropChanges['RUN_INSTALLER'] = Config.get('machine', 'RUN_INSTALLER')

    # Added to read additional ambari props esp. for Patch Upgrade related properties like SERVICES_TO_UPGRADE, UPGRADE_TYPE
    logger.info("Checking if any ADDITIONAL_AMBARI_PROPS need to be updated")
    ADDITIONAL_AMBARI_PROPS = Config.get('ambari', 'ADDITIONAL_AMBARI_PROPS')
    if ADDITIONAL_AMBARI_PROPS:
        parameter_map = ADDITIONAL_AMBARI_PROPS.split(",")
        for parameter in parameter_map:
            key_value = parameter.split("=")
            key = key_value[0]
            value = key_value[1]
            print "Reading key :%s = value :%s" % (key, value)
            configPropChanges[key] = value

    return configPropChanges
Ejemplo n.º 55
0
 def refreshTasktrackers(cls):
     exit_code, output = Hadoop.runas(Config.get('hadoop', 'HDFS_USER'), "mradmin -refreshNodes")
     return output
Ejemplo n.º 56
0
    def read_job_properties(cls, local_work_dir):
        propChanges = {}

        COMPONENT = Config.get('ambari', 'COMPONENT')

        if 'deploytest' in COMPONENT:
            propChanges['INSTALL_HDP'] = "true"
            propChanges['MR_FRAMEWORK'] = Config.get('ambari', 'MR_FRAMEWORK')

        propChanges['HOST'] = Config.get('ambari', 'HOST') + ":8080"
        propChanges['CLUSTER_NAME'] = Config.get('ambari', 'CLUSTER_NAME')
        propChanges['STACK'] = Config.get('ambari', 'STACK')
        propChanges['UMASK'] = Config.get('ambari', 'UMASK')
        propChanges['AMBARI_CUSTOM_USER'] = Config.get('ambari', 'AMBARI_CUSTOM_USER')
        propChanges['AMBARI_AGENT_USER'] = Config.get('ambari', 'AMBARI_AGENT_USER')
        propChanges['PWD_ENCRYPT'] = Config.get('ambari', 'PWD_ENCRYPT')
        propChanges['CUSTOMIZED_SERVICES_USERS'] = Config.get('ambari', 'CUSTOMIZED_SERVICES_USERS')
        propChanges['CUSTOM_PIDS'] = Config.get('ambari', 'CUSTOM_PIDS')
        propChanges['AMBARI_2WAY_SSL'] = Config.get('ambari', 'AMBARI_2WAY_SSL')
        propChanges['AMBARI_SERVER_HTTPS'] = Config.get('ambari', 'AMBARI_SERVER_HTTPS')
        propChanges['IS_TMP_NOEXEC'] = Config.get('ambari', 'IS_TMP_NOEXEC')
        propChanges['NUMBER_OF_BASE_HOSTS'] = Config.get('ambari', 'NUMBER_OF_BASE_HOSTS')
        propChanges['DN_NONROOT'] = Config.get('ambari', 'DN_NONROOT')
        propChanges['MOTD_ENABLE'] = Config.get('ambari', 'MOTD_ENABLE')
        propChanges['HDP_REPO_BASEURL'] = Config.get('ambari', 'HDP_REPO_BASEURL')
        propChanges['HDP_UTILS_REPO_BASEURL'] = Config.get('ambari', 'HDP_UTILS_REPO_BASEURL')
        propChanges['AMBARI_DB'] = Config.get('ambari', 'AMBARI_DB')
        propChanges['JDK_VERSION'] = Config.get('ambari', 'JDK_VERSION')
        propChanges['IS_SECURE'] = Config.get('machine', 'IS_SECURE')
        propChanges['CLIENT'] = Config.get('ambari', 'CLIENT')
        propChanges['CLIENT_OS'] = Config.get('ambari', 'CLIENT_OS')
        propChanges['CLIENT_PORT'] = Config.get('ambari', 'CLIENT_PORT')
        propChanges['UPGRADE_TO'] = Config.get('ambari', 'UPGRADE_TO')
        propChanges['STACK_UPGRADE_TO'] = Config.get('ambari', 'STACK_UPGRADE_TO')
        propChanges['VIDEO_RECORDING'] = Config.get('ambari', 'VIDEO_RECORDING')
        propChanges['BROWSER'] = Config.get('ambari', 'BROWSER')
        propChanges['AMBARI_RPM_URL'] = Config.get('ambari', 'AMBARI_VERSION')
        propChanges['HIVE_DB'] = Config.get('ambari', 'DATABASE_FLAVOR')
        propChanges['DRUID_DB'] = Config.get('ambari', 'DATABASE_FLAVOR')
        propChanges['BEACON_DB'] = Config.get('ambari', 'DATABASE_FLAVOR')
        propChanges['OOZIE_DB'] = Config.get('ambari', 'DATABASE_FLAVOR')
        propChanges['XA_DATABASE_FLAVOR'] = Config.get('ambari', 'XA_DATABASE_FLAVOR')
        propChanges['IS_HA_TEST'] = Config.get('ambari', 'IS_HA_TEST')
        propChanges['ENABLE_HA_COMPONENTS'] = Config.get('ambari', 'ENABLE_HA_COMPONENTS')
        propChanges['USE_BLUEPRINT'] = Config.get('ambari', 'USE_BLUEPRINT')
        propChanges['USER_KERBEROS_SERVER_TYPE'] = Config.get('ambari', 'USER_KERBEROS_SERVER_TYPE')
        propChanges['KERBEROS_SERVER_TYPE'] = Config.get('ambari', 'KERBEROS_SERVER_TYPE')
        propChanges['REALM'] = Config.get('ambari', 'REALM')
        propChanges['USER_REALM'] = Config.get('ambari', 'USER_REALM')
        propChanges['AD_SERVER_HOST'] = Config.get('ambari', 'AD_SERVER_HOST')
        propChanges['WIRE_ENCRYPTION'] = Config.get('ambari', 'WIRE_ENCRYPTION')
        propChanges['SPLIT_NUM'] = Config.get('ambari', 'SPLIT_NUM')
        propChanges['AMBARI_TESTSNAMES'] = Config.get('ambari', 'AMBARI_TESTSNAMES')
        propChanges['RUN_MARKER_LIST'] = Config.get('ambari', 'RUN_MARKER_LIST')
        propChanges['RUN_MARKER_VERSION'] = Config.get('ambari', 'RUN_MARKER_VERSION')
        propChanges['STACK_TYPE'] = Config.get('ambari', 'STACK_TYPE')
        propChanges['HDF_REPO_BASEURL'] = Config.get('ambari', 'HDF_REPO_BASEURL')
        propChanges['MANAGEMENT_PACK_LINK'] = Config.get('ambari', 'MANAGEMENT_PACK_LINK')
        propChanges['STREAMLINE_DB'] = Config.get('ambari', 'STREAMLINE_DB')
        propChanges['REGISTRY_DB'] = Config.get('ambari', 'REGISTRY_DB')
        propChanges['USE_BAKED_IMAGE'] = Config.get('ambari', 'USE_BAKED_IMAGE')  # QE-18454

        propChanges['INSTALL_MR'] = cls.convert_environment_variable_to_bool("INSTALL_MR")
        propChanges['INSTALL_ZOOKEEPER'] = cls.convert_environment_variable_to_bool("INSTALL_ZOOKEEPER")
        propChanges['INSTALL_AMS'] = cls.convert_environment_variable_to_bool("INSTALL_AMS")
        propChanges['INSTALL_KMS'] = cls.convert_environment_variable_to_bool("INSTALL_KMS")
        propChanges['INSTALL_NFSGATEWAYS'] = cls.convert_environment_variable_to_bool("INSTALL_NFSGATEWAYS")
        propChanges['INSTALL_HS_INTERACTIVE'] = cls.convert_environment_variable_to_bool("INSTALL_HS_INTERACTIVE")
        propChanges['INSTALL_LOGSEARCH'] = cls.convert_environment_variable_to_bool("INSTALL_LOGSEARCH")
        propChanges['INSTALL_FALCON'] = cls.convert_environment_variable_to_bool("INSTALL_FALCON")
        propChanges['INSTALL_STREAMLINE'] = cls.convert_environment_variable_to_bool("INSTALL_STREAMLINE")
        propChanges['INSTALL_SPARK'] = cls.convert_environment_variable_to_bool("INSTALL_SPARK")
        propChanges['INSTALL_PDSH'] = cls.convert_environment_variable_to_bool("INSTALL_PDSH")
        propChanges['INSTALL_PIG'] = cls.convert_environment_variable_to_bool("INSTALL_PIG")
        propChanges['INSTALL_HBASE'] = cls.convert_environment_variable_to_bool("INSTALL_HBASE")
        propChanges['INSTALL_TEMPLETON'] = cls.convert_environment_variable_to_bool("INSTALL_TEMPLETON")
        propChanges['INSTALL_OOZIE'] = cls.convert_environment_variable_to_bool("INSTALL_OOZIE")
        propChanges['INSTALL_HIVE'] = cls.convert_environment_variable_to_bool("INSTALL_HIVE")
        propChanges['INSTALL_HCAT'] = cls.convert_environment_variable_to_bool("INSTALL_HCAT")
        propChanges['INSTALL_SQOOP'] = cls.convert_environment_variable_to_bool("INSTALL_SQOOP")
        propChanges['INSTALL_SQOOP2'] = cls.convert_environment_variable_to_bool("INSTALL_SQOOP2")
        propChanges['INSTALL_FLUME'] = cls.convert_environment_variable_to_bool("INSTALL_FLUME")
        propChanges['INSTALL_STORM'] = cls.convert_environment_variable_to_bool("INSTALL_STORM")
        propChanges['INSTALL_KNOX'] = cls.convert_environment_variable_to_bool("INSTALL_KNOX")
        propChanges['INSTALL_KAFKA'] = cls.convert_environment_variable_to_bool("INSTALL_KAFKA")
        propChanges['INSTALL_HBASE_APP'] = cls.convert_environment_variable_to_bool("INSTALL_HBASE_APP")
        propChanges['INSTALL_STORM_APP'] = cls.convert_environment_variable_to_bool("INSTALL_STORM_APP")
        propChanges['INSTALL_ACCUMULO_APP'] = cls.convert_environment_variable_to_bool("NSTALL_ACCUMULO_APP")
        propChanges['INSTALL_PHOENIX_APP'] = cls.convert_environment_variable_to_bool("INSTALL_PHOENIX_APP")
        propChanges['INSTALL_MAHOUT'] = cls.convert_environment_variable_to_bool("INSTALL_MAHOUT")
        propChanges['INSTALL_SMARTSENSE'] = cls.convert_environment_variable_to_bool("INSTALL_SMARTSENSE")
        propChanges['INSTALL_PHOENIX'] = cls.convert_environment_variable_to_bool("INSTALL_PHOENIX")
        propChanges['INSTALL_RANGER'] = cls.convert_environment_variable_to_bool("INSTALL_XASECURE")
        propChanges['INSTALL_SLIDER'] = cls.convert_environment_variable_to_bool("INSTALL_SLIDER")
        propChanges['INSTALL_ACCUMULO'] = cls.convert_environment_variable_to_bool("INSTALL_ACCUMULO")
        propChanges['INSTALL_HUE'] = cls.convert_environment_variable_to_bool("INSTALL_HUE")
        propChanges['INSTALL_ATLAS'] = cls.convert_environment_variable_to_bool("INSTALL_ATLAS")
        propChanges['INSTALL_HDF'] = cls.convert_environment_variable_to_bool("INSTALL_HDF")
        propChanges['INSTALL_NIFI'] = cls.convert_environment_variable_to_bool("INSTALL_NIFI")
        propChanges['INSTALL_AMBARI_INFRA'] = cls.convert_environment_variable_to_bool("INSTALL_AMBARI_INFRA")
        propChanges['INSTALL_JNBG'] = cls.convert_environment_variable_to_bool("INSTALL_JNBG")
        propChanges['INSTALL_R4ML'] = cls.convert_environment_variable_to_bool("INSTALL_R4ML")
        propChanges['INSTALL_TITAN'] = cls.convert_environment_variable_to_bool("INSTALL_TITAN")
        propChanges['INSTALL_SOLR'] = cls.convert_environment_variable_to_bool("INSTALL_SOLR")
        propChanges['INSTALL_REGISTRY'] = cls.convert_environment_variable_to_bool("INSTALL_REGISTRY")
        propChanges['INSTALL_ZEPPELIN'] = cls.convert_environment_variable_to_bool("INSTALL_ZEPPELIN")
        propChanges['INSTALL_SPARK2'] = cls.convert_environment_variable_to_bool("INSTALL_SPARK2")
        propChanges['INSTALL_DRUID'] = cls.convert_environment_variable_to_bool("INSTALL_DRUID")
        propChanges['INSTALL_BEACON'] = cls.convert_environment_variable_to_bool("INSTALL_BEACON")
        propChanges['MR_FRAMEWORK'] = cls.convert_environment_variable_to_bool("MR_FRAMEWORK")
        propChanges['INSTALL_LZO'] = cls.convert_environment_variable_to_bool("INSTALL_LZO")
        propChanges['ENABLE_KNOX_SSO'] = cls.convert_environment_variable_to_bool("ENABLE_KNOX_SSO")  # QE-19395

        logger.info("the prop changes" + str(propChanges))

        ADDITIONAL_AMBARI_PROPS = Config.get('ambari', 'ADDITIONAL_AMBARI_PROPS')
        if ADDITIONAL_AMBARI_PROPS:
            parameter_map = ADDITIONAL_AMBARI_PROPS.split(",")
            for parameter in parameter_map:
                key_value = parameter.split("=")
                key = key_value[0]
                value = key_value[1]
                print "Reading key :%s = value :%s" % (key, value)
                propChanges[key] = value

        if 'sanity' in COMPONENT and not 'sanity-preupgrade' in COMPONENT or 'postupg-sec-enable' in COMPONENT:
            stack_upgrade_to = Config.get('ambari', 'STACK_UPGRADE_TO')
            if stack_upgrade_to is not None and len(stack_upgrade_to) > 0:
                propChanges['STACK'] = Config.get('ambari', 'STACK_UPGRADE_TO')

        if not propChanges['UPGRADE_TO'] is None:
            logger.info("Check value " + propChanges['UPGRADE_TO'])

        # If the gateway is not deployed, change /root/hosts to have only those hosts that are to be part of gateway cluster
        if CommonLib.update_hosts_for_deployed_clusters(propChanges):
            propChanges['INSTALL_HDP'] = "true"

        util.writePropertiesToFile(
            os.path.join(local_work_dir, 'ambari.properties'), os.path.join(local_work_dir, 'ambari.properties'),
            propChanges
        )

        log_prop_changes = {}
        log_prop_changes['log4j.appender.UIFRM.File'] = os.path.join(local_work_dir, "uifrm.log")
        util.writePropertiesToFile(
            os.path.join(local_work_dir, 'log4j.properties'), os.path.join(local_work_dir, 'log4j.properties'),
            log_prop_changes
        )

        return propChanges
Ejemplo n.º 57
0
 def getTasktrackers(cls):
     slaveFile = os.path.join(Config.get('hadoop', 'HADOOP_CONF'),"slaves")
     f = open(slaveFile, "r")
     tasktrackers = f.readlines()
     return tasktrackers
Ejemplo n.º 58
0
 def get_component(cls):
     return Config.get('ambari', 'COMPONENT')
Ejemplo n.º 59
0
 def common_startup_operations(cls, local_work_dir):
     from tools.artifact_server_helper import artifact_server_start
     artifact_server_start(Config.getEnv('ARTIFACTS_DIR'))
     Machine.runas('root', 'chmod -R 755 ' + os.path.join(local_work_dir, 'target'))
#Get user from config file
HADOOPQA_USER = CommonHadoopEnv.getHadoopQAUser()
HDFS_USER = CommonHadoopEnv.getHDFSUser()
MAPRED_USER = CommonHadoopEnv.getMapredUser()

SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(SCRIPT_PATH, "data")
CREATE_FILE = "CreateFile.py"
CREATE_FILE_PATH_IN_HADOOP = '/user/' + HADOOPQA_USER + '/' + CREATE_FILE
CREATE_FILE_PATH_IN_LOCAL = os.path.join(SCRIPT_PATH, "data", CREATE_FILE)
CREATE_FILE_2 = "CreateFile2.py"
CREATE_FILE_2_PATH_IN_HADOOP = '/user/' + HADOOPQA_USER + '/' + CREATE_FILE_2
CREATE_FILE_2_PATH_IN_LOCAL = os.path.join(SCRIPT_PATH, "data", CREATE_FILE_2)
OUT_PATH_IN_HADOOP = '/user/' + HADOOPQA_USER + '/out1'
HADOOP_STREAMING_JAR = Config.get('hadoop', 'HADOOP_STREAMING_JAR')

logger = logging.getLogger(__name__)
  
def validateJobId(jobId):
    jobId = jobId + ""
    return jobId.startswith('job_')

def getLocalDirInfo(host):            
    return util.getPropertyValueFromConfigXMLFile(os.path.join(Config.get('hadoop', 'HADOOP_CONF'), "mapred-site.xml"), "mapred.local.dir")

def checkJobCreatedTempFileInTT(logFileDir, currentUser, currentJobId, currentAttemptId, logfile, taskTrackerHost):      
    pathFile = os.path.join(logFileDir, 'taskTracker', currentUser, 'jobcache', currentJobId, currentAttemptId, 'work', logfile)
    logger.info("path file: " + pathFile)
    result = False
    if platform.system() == 'Windows':