Exemple #1
0
    def setup_storm_hdfs_topology(cls, useStandaloneCmd):
        storm_version = Storm.getVersion(useStandaloneCmd=True)
        try:
            file_obj = open(HDFS_CONFIG_FILE, 'w')
            if Hadoop.isSecure():
                file_obj.write('hdfs.keytab.file: ' +
                               Machine.getHeadlessUserKeytab(
                                   user=HADOOPQA_USER) + '\n')
                file_obj.write('hdfs.kerberos.principal: ' +
                               Machine.get_user_principal(user=HADOOPQA_USER) +
                               '\n')
        finally:
            file_obj.close()

        HDFS.createDirectory("/tmp/mySeqTopology", HDFS_USER, "777", False)
        HDFS.createDirectory("/tmp/dest", HDFS_USER, "777", False)
        HDFS.createDirectory("/tmp/dest2", HDFS_USER, "777", False)
        HDFS.createDirectory("/tmp/foo", HDFS_USER, "777", False)
        HDFS.createDirectory("/tmp/trident", HDFS_USER, "777", False)
        HDFS.createDirectory("/tmp/trident-seq", HDFS_USER, "777", False)

        Machine.copy(JAVA_HDFS_SRC_DIR,
                     LOCAL_HDFS_WORK_DIR,
                     user=None,
                     passwd=None)
        if not Machine.isWindows():
            (exit_code, _) = Maven.run('package',
                                       cwd=LOCAL_HDFS_WORK_DIR,
                                       env={
                                           HADOOP_VERSION_MAVEN_PARAMETER:
                                           HADOOP_VERSION,
                                           STORM_VERSION_MAVEN_PARAMETER:
                                           storm_version,
                                           HADOOP_CONF_MAVEN_PARAMETER:
                                           HADOOP_CONF,
                                           HDFS_FILE_MAVEN_PARAMETER:
                                           HDFS_FILE,
                                           HADOOP_CORE_MAVEN_PARAMETER:
                                           HADOOP_CONF,
                                           CORE_FILE_MAVEN_PARAMETER:
                                           CORE_FILE,
                                           PUBLIC_REPO_MAVEN_PARAMETER:
                                           Maven.getPublicRepoUrl()
                                       })
        else:
            filepath = os.path.join(MOD_CONF_PATH, "core-site.xml")
            (exit_code, _) = Maven.run(
                'package -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s'
                % (HADOOP_VERSION_MAVEN_PARAMETER, HADOOP_VERSION,
                   STORM_VERSION_MAVEN_PARAMETER, storm_version,
                   HADOOP_CONF_MAVEN_PARAMETER, HADOOP_CONF,
                   HDFS_FILE_MAVEN_PARAMETER, HDFS_FILE,
                   HADOOP_CORE_MAVEN_PARAMETER, HADOOP_CONF,
                   CORE_FILE_MAVEN_PARAMETER, CORE_FILE,
                   PUBLIC_REPO_MAVEN_PARAMETER, Maven.getPublicRepoUrl()),
                cwd=LOCAL_HDFS_WORK_DIR)
        ruAssert("Storm", exit_code == 0,
                 "[StormHDFSSetup] maven package command failed")
Exemple #2
0
    def setup_storm_hbase_topology(cls, useStandaloneCmd):
        from beaver.component.hbase import HBase

        storm_version = Storm.getVersion(useStandaloneCmd=True)

        Machine.copy(JAVA_HBASE_SRC_DIR,
                     LOCAL_HBASE_WORK_DIR,
                     user=None,
                     passwd=None)

        if Machine.isWindows():
            (_, _) = Maven.run(
                'package -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s -D%s=%s'
                % (HADOOP_VERSION_MAVEN_PARAMETER, HADOOP_VERSION,
                   STORM_VERSION_MAVEN_PARAMETER, storm_version,
                   HBASE_CONF_MAVEN_PARAMETER, HBASE_CONF,
                   HBASE_FILE_MAVEN_PARAMETER, HBASE_FILE,
                   HADOOP_CONF_MAVEN_PARAMETER, HADOOP_CONF,
                   HDFS_FILE_MAVEN_PARAMETER, HDFS_FILE,
                   CORE_FILE_MAVEN_PARAMETER, CORE_FILE,
                   PUBLIC_REPO_MAVEN_PARAMETER, Maven.getPublicRepoUrl()),
                cwd=LOCAL_HBASE_WORK_DIR)
        else:
            (_, _) = Maven.run('package',
                               cwd=LOCAL_HBASE_WORK_DIR,
                               env={
                                   HADOOP_VERSION_MAVEN_PARAMETER:
                                   HADOOP_VERSION,
                                   STORM_VERSION_MAVEN_PARAMETER:
                                   storm_version,
                                   HBASE_CONF_MAVEN_PARAMETER:
                                   HBASE_CONF,
                                   HBASE_FILE_MAVEN_PARAMETER:
                                   HBASE_FILE,
                                   HADOOP_CONF_MAVEN_PARAMETER:
                                   HADOOP_CONF,
                                   HDFS_FILE_MAVEN_PARAMETER:
                                   HDFS_FILE,
                                   CORE_FILE_MAVEN_PARAMETER:
                                   CORE_FILE,
                                   PUBLIC_REPO_MAVEN_PARAMETER:
                                   Maven.getPublicRepoUrl()
                               })

        exit_code, stdout = HBase.createTable(HBASE_TABLE_NAME, "cf", True,
                                              None)
        ruAssert("Storm", exit_code == 0)
        grant_cmd = "grant '%s', 'RWCXA', '%s', 'cf'" % (Storm.getStormUser(),
                                                         HBASE_TABLE_NAME)
        exit_code, stdout = HBase.runShellCmds([grant_cmd])
Exemple #3
0
 def setup_storm_jdbc_topology(cls, useStandaloneCmd):
     from beaver.dbutil import MySQL
     MySQL.createUserWithAllPriveleges(STORM_JDBC_TEST_USER,
                                       STORM_JDBC_TEST_PASSWD)
     storm_version = Storm.getVersion(useStandaloneCmd=True)
     try:
         file_obj = open(HDFS_CONFIG_FILE, 'w')
         if Hadoop.isSecure():
             file_obj.write('hdfs.keytab.file: ' +
                            Machine.getHeadlessUserKeytab(
                                user=HADOOPQA_USER) + '\n')
             file_obj.write('hdfs.kerberos.principal: ' +
                            Machine.get_user_principal(user=HADOOPQA_USER) +
                            '\n')
     finally:
         file_obj.close()
     Machine.copy(JAVA_JDBC_SRC_DIR,
                  LOCAL_JDBC_WORK_DIR,
                  user=None,
                  passwd=None)
     post_fenton_opt = " -DpostFenton=true" if Storm.isAfterFenton() else ""
     package_cmd = 'package ' + post_fenton_opt
     (exit_code, _) = Maven.run(package_cmd,
                                cwd=LOCAL_JDBC_WORK_DIR,
                                env={
                                    HADOOP_VERSION_MAVEN_PARAMETER:
                                    HADOOP_VERSION,
                                    STORM_VERSION_MAVEN_PARAMETER:
                                    storm_version,
                                    HADOOP_CONF_MAVEN_PARAMETER:
                                    HADOOP_CONF,
                                    HDFS_FILE_MAVEN_PARAMETER:
                                    HDFS_FILE,
                                    CORE_FILE_MAVEN_PARAMETER:
                                    CORE_FILE,
                                    PUBLIC_REPO_MAVEN_PARAMETER:
                                    Maven.getPublicRepoUrl()
                                })
     ruAssert("Storm", exit_code == 0,
              "[StormJDBCSetup] maven package command failed")
Exemple #4
0
    def run_smoke_test(cls, smoketestnumber, config=None):
        '''
        Run smoke test for yarn
        :param smoketestnumber: Used for unique output log location
        '''
        global new_conf_path
        global buildNo
        from beaver.component.hbase import HBase
        # Run slider agent labels funtests
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress("### Slider smoke test started ####")
        exit_code, stdout = HBase.runShellCmds(cls.SCAN_TABLE_CMD, user=cls.HBASE_USER, configPath=new_conf_path)
        UpgradePerNode.reportProgress("### Slider smoke test scanned " + cls.TABLE_NAME)

        hbase_ver = HBase.getVersion(configPath=new_conf_path)
        if buildNo not in hbase_ver:
            UpgradePerNode.reportProgress("### smoke test failed: " + buildNo + " not found in " + hbase_ver)
        else:
            UpgradePerNode.reportProgress("### smoke test passed: " + buildNo + " found in " + hbase_ver)
        UpgradePerNode.reportProgress("scanned " + cls.TABLE_NAME)
        TABLE_NM = cls.id_generator(10)
        CREATE_TABLE_CMD = ["create '" + TABLE_NM + "', 'family1', 'family2', 'family3'"]
        exit_code, stdout = HBase.runShellCmds(CREATE_TABLE_CMD, user=cls.HBASE_USER, configPath=new_conf_path)
        if exit_code == 0:
            UpgradePerNode.reportProgress("created " + TABLE_NM)
        else:
            UpgradePerNode.reportProgress("creation of " + TABLE_NM + "failed")
        if not cls._LOCAL_WORK_DIR:
            UpgradePerNode.reportProgress(
                "[FAILED][Slider][Smoke] Slider smoke test failed due to Slider source code unavailability"
            )
            return

        exit_code, stdout = Maven.run(
            "verify -Dslider.conf.dir=%s "
            "-Dhadoop.version=%s "
            "-Dprivate.repo.url=%s "
            "-Dit.test=AppsUpgradeIT" % (cls._LOCAL_CONF_DIR, Hadoop.getVersion(), Maven.getPublicRepoUrl()),
            cwd=os.path.join(cls._LOCAL_WORK_DIR, 'slider-funtest')
        )
        testresults = {}
        testResultFiles = []
        TEST_RESULT = {}

        # get a list of all the test result files
        for name, dirs, files in os.walk(cls._LOCAL_WORK_DIR):
            if os.path.basename(name) == 'target':
                # Add in each failsafe-report we find -- this test only runs failsafe reports
                testResultFiles.extend(util.findMatchingFiles(os.path.join(name, 'failsafe-reports'), 'TEST-*.xml'))

        for resultFile in testResultFiles:
            testresults.update(util.parseJUnitXMLResult(resultFile))
        for key, value in testresults.items():
            TEST_RESULT[key] = value

        logger.info("=======================")
        logger.info(TEST_RESULT)
        logger.info("=======================")
        TestCases = TEST_RESULT.keys()
        for testcase in TestCases:
            result = TEST_RESULT[testcase]['result']
            if result == "pass":
                UpgradePerNode.reportProgress("[PASSED][Slider][Smoke] Slider smoke test passed")
            else:
                UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider smoke test failed")
Exemple #5
0
    def smoke_test_setup(cls):
        '''
        Setup required to run Smoke test
        '''
        from beaver.component.slider import Slider
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        HADOOPQA_USER = Config.get('hadoop', 'HADOOPQA_USER')
        UpgradePerNode.reportProgress("###  Starting set up for Slider smoke test  ####")
        if Hadoop.isSecure():
            keytabFile = Machine.getHeadlessUserKeytab(HADOOPQA_USER)
            kinitloc = Machine.which("kinit", "root")
            cmd = "%s  -k -t %s %s" % (kinitloc, keytabFile, Machine.get_user_principal(HADOOPQA_USER))
            exit_code, stdout = Machine.run(cmd)
            if exit_code != 0:
                UpgradePerNode.reportProgress("###  smoke test setup for Slider failed due to kinit failed  ####")

        # Local directory in artifacts that we'll run tests from
        # it is possible the download_source() will fail
        try:
            cls._LOCAL_WORK_DIR = Slider.download_source(useHDPBaseRepoFile=False, isRUcluster=True)
            logger.info("Local work dir = %s" % cls._LOCAL_WORK_DIR)
        except TypeError as err:
            UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider Source download fail.")
            return
        if not cls._LOCAL_WORK_DIR:
            UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider Source Code missing.")
            return
        # Local conf directory with modified conf for tests
        if not os.path.exists(cls._LOCAL_CONF_DIR):
            os.makedirs(cls._LOCAL_CONF_DIR)
        slider_conf = os.path.join(Slider.getSliderHome(), 'conf')
        logger.info("slider_conf = %s" % slider_conf)
        logger.info("os path exist slider_conf = %s" % os.path.exists(slider_conf))
        if os.path.exists(slider_conf):
            Machine.copy(
                os.path.join(slider_conf, 'log4j.properties'), os.path.join(cls._LOCAL_CONF_DIR, 'log4j.properties')
            )
            Machine.copy(
                os.path.join(slider_conf, 'slider-env.sh'), os.path.join(cls._LOCAL_CONF_DIR, 'slider-env.sh')
            )
        else:
            UpgradePerNode.reportProgress("[FAILED][Slider][Smoke] Slider Conf %s missing" % slider_conf)
            return

        if Hadoop.isSecure():
            util.writePropertiesToConfigXMLFile(
                os.path.join(Slider.getSliderHome(), 'conf', 'slider-client.xml'),
                os.path.join(cls._LOCAL_CONF_DIR, 'slider-client.xml'), {
                    "slider.funtest.enabled": "true",
                    "slider.test.agent.enabled": "true",
                    "HADOOP_CONF_DIR": Config.get('hadoop', 'HADOOP_CONF'),
                    "slider.am.keytab.local.path": Machine.getHeadlessUserKeytab(HADOOPQA_USER),
                    "slider.keytab.principal.name": Machine.get_user_principal(HADOOPQA_USER)
                }
            )
        else:
            util.writePropertiesToConfigXMLFile(
                os.path.join(Slider.getSliderHome(), 'conf', 'slider-client.xml'),
                os.path.join(cls._LOCAL_CONF_DIR, 'slider-client.xml'), {
                    "slider.funtest.enabled": "true",
                    "HADOOP_CONF_DIR": Config.get('hadoop', 'HADOOP_CONF'),
                    "slider.test.agent.enabled": "true"
                }
            )

        logger.info("Local work dir = %s" % cls._LOCAL_WORK_DIR)
        # Check that the precondition is met and the source is available
        if not os.path.exists(cls._LOCAL_WORK_DIR) or not os.path.exists(os.path.join(cls._LOCAL_WORK_DIR, 'pom.xml')):
            logger.info('Slider source does not appear to exist at %s' % (cls._LOCAL_WORK_DIR))
            UpgradePerNode.reportProgress(
                "###  Slider source does not appear to exist at %s ####" % (cls._LOCAL_WORK_DIR)
            )
        logger.info("Local work dir = %s" % cls._LOCAL_WORK_DIR)
        if cls._LOCAL_WORK_DIR == None:
            logger.info("ERROR: cls._LOCAL_WORK_DIR is None")
        # Install first so isolated modules can be tested
        exit_code, stdout = Maven.run(
            "clean install -DskipTests "
            "-Dhadoop.version=%s "
            "-Dprivate.repo.url=%s " % (Hadoop.getVersion(), Maven.getPublicRepoUrl()),
            cwd=cls._LOCAL_WORK_DIR
        )
        if exit_code != 0:
            UpgradePerNode.reportProgress("### Error installing Slider source : %d: %s ####" % (exit_code, stdout))
        else:
            UpgradePerNode.reportProgress("### Slider source install passed ####")
Exemple #6
0
    def setup_storm_hive_topology(cls, useStandaloneCmd):
        from beaver.component.hive import Hive

        storm_version = Storm.getVersion(useStandaloneCmd=True)
        hive_version = Hive.getVersion()
        HIVE_METASTORE_URI = Hive.getConfigValue(
            "hive.metastore.uris", defaultValue="thrift://localhost:9083")

        global HIVE_METASTORE_URI
        global HIVE_HOST
        global HIVE_PORT
        global HIVE_WAREHOUSE_DIR
        HIVE_WAREHOUSE_DIR = Hive.getConfigValue(
            "hive.metastore.warehouse.dir",
            defaultValue="/apps/hive/warehouse")
        HIVE_HOST = Hive.getHiveHost()
        HIVE_PORT = Hive.getMetastoreThriftPort()
        if Storm.isDalorBeyond():
            JAVA_HIVE_SRC_DIR = os.path.join(Config.getEnv('WORKSPACE'),
                                             'tests', 'rolling_upgrade',
                                             'Storm', '2_3', 'storm-hive',
                                             'java')
        else:
            JAVA_HIVE_SRC_DIR = os.path.join(Config.getEnv('WORKSPACE'),
                                             'tests', 'rolling_upgrade',
                                             'Storm', '2_2', 'storm-hive',
                                             'java')
        # hive.txn.manager and hive.support.concurrency are set through ambari as per bug-40500
        #logger.info("Restart Hive")
        #changes = {'hive-site.xml': {'hive.txn.manager': 'org.apache.hadoop.hive.ql.lockmgr.DbTxnManager',
        #                             'hive.support.concurrency': 'true'}}
        #Hive.modifyConfig(changes, services=['metastore'], restartService=True)
        logger.info("Create test database in Hive")

        exit_code, stdout = Hive.runQuery(
            cls.get_set_queue_cmd(useStandaloneCmd) +
            " drop database if exists stormdb cascade; \
                                               create database stormdb;")
        ruAssert("Storm", exit_code == 0,
                 "[StormHiveSetup] Failed to create test database" + stdout)
        HDFS.chmod(runasUser=HDFS.getHDFSUser(),
                   perm=777,
                   directory=HIVE_WAREHOUSE_DIR + "/" + DATABASE_NAME + ".db")
        #copy tests/storm/storm-hive/java to artifacts/storm-hive-tests
        logger.info("JAVA_SRC_DIR " + JAVA_HIVE_SRC_DIR)
        logger.info("LOCAL_WORK_DIR " + LOCAL_HIVE_WORK_DIR)
        Machine.copy(JAVA_HIVE_SRC_DIR,
                     LOCAL_HIVE_WORK_DIR,
                     user=None,
                     passwd=None)
        #mvn package
        if Machine.isWindows():
            (_, _) = Maven.run(
                'package -D%s=%s -D%s=%s -D%s=%s -D%s=%s' %
                (HADOOP_VERSION_MAVEN_PARAMETER, HADOOP_VERSION,
                 STORM_VERSION_MAVEN_PARAMETER, storm_version,
                 HIVE_VERSION_MAVEN_PARAMETER,
                 hive_version, PUBLIC_REPO_MAVEN_PARAMETER,
                 Maven.getPublicRepoUrl(), CORE_FILE_MAVEN_PARAMETER,
                 CORE_FILE, HADOOP_CORE_MAVEN_PARAMETER, HADOOP_CONF,
                 HIVE_CORE_MAVEN_PARAMETER, HIVE_CORE_DIR,
                 HIVE_FILE_MAVEN_PARAMETER, HIVE_FILE),
                cwd=LOCAL_HIVE_WORK_DIR)
        else:
            (_, _) = Maven.run('package',
                               cwd=LOCAL_HIVE_WORK_DIR,
                               env={
                                   HADOOP_VERSION_MAVEN_PARAMETER:
                                   HADOOP_VERSION,
                                   STORM_VERSION_MAVEN_PARAMETER:
                                   storm_version,
                                   HIVE_VERSION_MAVEN_PARAMETER:
                                   hive_version,
                                   PUBLIC_REPO_MAVEN_PARAMETER:
                                   Maven.getPublicRepoUrl(),
                                   CORE_FILE_MAVEN_PARAMETER:
                                   CORE_FILE,
                                   HADOOP_CONF_MAVEN_PARAMETER:
                                   HADOOP_CONF,
                                   HDFS_FILE_MAVEN_PARAMETER:
                                   HDFS_FILE,
                                   HADOOP_CORE_MAVEN_PARAMETER:
                                   HADOOP_CONF,
                                   HIVE_CORE_MAVEN_PARAMETER:
                                   HIVE_CORE_DIR,
                                   HIVE_FILE_MAVEN_PARAMETER:
                                   HIVE_FILE
                               })
        create_table_q = "use %s; \
          drop table if exists %s; \
          create table %s (id int, name string, phone string, street string) \
          partitioned by (city string, state string) \
          clustered by (id) into %s buckets \
          stored as orc \
          tblproperties ('transactional'='true');" % (
            DATABASE_NAME, HIVE_TABLE_NAME, HIVE_TABLE_NAME, "5")

        exit_code, stdout = Hive.runQuery(
            cls.get_set_queue_cmd(useStandaloneCmd) + create_table_q)
        ruAssert(
            "Storm", exit_code == 0,
            "[StormHiveSetup] Failed to create test table userdata_partitioned"
        )
        HDFS.chmod(runasUser=HDFS.getHDFSUser(),
                   perm=777,
                   directory=HIVE_WAREHOUSE_DIR + "/" + DATABASE_NAME +
                   ".db/" + HIVE_TABLE_NAME)
Exemple #7
0
def runJdbcMultiSessionDriver(
        testDir,
        addlClasspath=[],
        connectionUrl=None,
        skippedTests=[],
        addlArgs=[],
        reuseConnections=False,
        testFilter=None,
        logsDir=None,
        queryTimeout=3600
):
    '''
  Run the Hive Jdbc MultiSession Test Driver
  '''
    harnessDir = os.path.join(Config.getEnv('WORKSPACE'), 'datateamtest', 'hive_jdbc_multisession')
    logger.info("Build the TestDriver to run tests")
    exit_code, stdout = Maven.run("clean package", cwd=harnessDir)
    assert exit_code == 0, "Failed to build the test driver"
    classpath = [
        os.path.join(harnessDir, "target", "hive-multisession-test-0.1.jar"),
        Config.get('hadoop', 'HADOOP_CONF')
    ]
    if len(addlClasspath) == 0:
        hiveJdbcDriver = getStandaloneHiveJdbcJar()
        classpath.insert(0, hiveJdbcDriver)
    else:
        classpath = addlClasspath + classpath

    cobert_tool_version = "cobertura-2.1.1"
    COBERTURA_CLASSPTH = os.path.join(
        tempfile.gettempdir(), "coverage-tmp", cobert_tool_version, cobert_tool_version + ".jar"
    )
    if Machine.pathExists(Machine.getAdminUser(), None, COBERTURA_CLASSPTH, Machine.getAdminPasswd()):
        classpath.append(COBERTURA_CLASSPTH)

    args = ["-t " + testDir]
    if connectionUrl is None:
        connectionUrl = Hive.getHiveServer2Url()
    args.append("-c \"%s\"" % connectionUrl)
    if Hadoop.isSecure():
        args.append("-k " + Config.get('machine', 'KEYTAB_FILES_DIR'))
        if Config.hasOption('machine', 'USER_REALM'):
            USER_REALM = Config.get('machine', 'USER_REALM', '')
            args.append("-e USER_REALM=%s" % (USER_REALM))
    args.extend(["--skip %s" % t for t in skippedTests])
    if reuseConnections:
        args.append("--reuseConnections")
    if testFilter:
        args.append("-f " + testFilter)
    from beaver.marker import getMarkerCondition
    markerCondition = getMarkerCondition()
    if markerCondition:
        args.append("-e 'marker=%s'" % markerCondition)
    if not logsDir:
        logsDir = os.path.join(Config.getEnv('ARTIFACTS_DIR'), "logs_%d" % int(999999 * random.random()))
    args.append("-l " + logsDir)
    if queryTimeout > 0:
        args.append("--queryTimeout %d" % queryTimeout)
    args.extend(addlArgs)
    return Java.runJava(
        Config.getEnv('ARTIFACTS_DIR'),
        "org.apache.hive.jdbc.TestDriver",
        classPath=(os.pathsep).join(classpath),
        cmdArgs=args
    )