def submit_storm_hive_topology(cls, tcId, className, args, useStandaloneCmd): if Hadoop.isSecure(): if Config.hasOption('machine', 'USER_REALM'): user_realm = Config.get('machine', 'USER_REALM', '') else: nnKerbPrincipal = HDFS.getNameNodePrincipal(defaultValue='') atloc = nnKerbPrincipal.find("@") if (atloc != -1): user_realm = nnKerbPrincipal[atloc:] if user_realm != None: args += " " + Machine.getHeadlessUserKeytab( Config.getEnv('USER')) + " " + Config.getEnv( 'USER') + '@' + user_realm exit_code, stdout = Storm.runStormHdfsTopology( TARGET_HIVE_STORM_JAR, className, args, None, logoutput=True, inBackground=False, useStandaloneCmd=useStandaloneCmd) logger.info(exit_code) ruAssert("Storm", exit_code == 0, "[StormHiveSubmit] %s Failed" % (tcId))
def submit_storm_hdfs_topology(cls, tcId, className, args, topologyName, useStandaloneCmd): """ Name: Storm-HDFS Topologies Description: Testing all 4 storm-hdfs topologies in distributed mode 1. SequenceFileTopology 2. HdfsFileTopology 3. TridentFileTopology 4. TridentSequenceTopology Steps to verify: 1. create necessary input/output dir location if needed 2. Run storm topology in distributed mode using "storm <jar> <classname> <args>" 3. Verify expected output from hdfs output dir 4. kill the topology """ exit_code, stdout = Storm.runStormHdfsTopology( TARGET_HDFS_STORM_JAR, className, args, None, logoutput=True, inBackground=False, useStandaloneCmd=useStandaloneCmd) ruAssert("Storm", exit_code == 0, "[StormHDFSSubmit] %s Failed" % (tcId))
def submit_storm_hbase_topology(cls, tcId, className, args, useStandaloneCmd): if Hadoop.isSecure(): keytab_file = Machine.getHeadlessUserKeytab(user=HADOOPQA_USER) principal = Machine.get_user_principal(user=HADOOPQA_USER) args = args + " %s %s" % (keytab_file, principal) exit_code, stdout = Storm.runStormHdfsTopology( TARGET_HBASE_STORM_JAR, className, args, None, logoutput=True, inBackground=False, useStandaloneCmd=useStandaloneCmd) logger.info(exit_code) ruAssert("Storm", exit_code == 0, "[StormHBaseSubmit] %s Failed" % (tcId))