示例#1
0
def get_kafka_topics():
  '''List the kafka topics on the current installation.
  
  Requires that Kafka is installed on the same machine and Ambari is up and running. Will start the service and use the Kafka scripts to list out all of the topics.
  
  
  Args:
    N/A
    
  Returns:
    list: [0] will contain the list of all the topics in a string, typically separated by newlines. [1] will contain any errors when retrieving the topics.
  
  '''
  conf = config.read_config('global.conf')
  am_conf = conf['AMBARI']
  amc = Ambari(am_conf['username'], am_conf['password'], am_conf['proto'], am_conf['server'], am_conf['port']);
  
  logger.info('Starting Kafka Broker')
  
  if amc.service_action('Sandbox', 'KAFKA', 'START'):
    sh = Shell()
    topics_script = conf['DEMO']['kafka_topics_script']
    zk = conf['DEMO']['zk_connection']
    logger.info('Attempting to create new Kafka Topic')
    out = sh.run(topics_script + ' --list --zookeeper ' + zk)
    
    if len(out[1]) == 0:
      topics = out[0]
      topics = topics.strip().split('\n')
      logger.info('Kafka topics output: ' + str(topics))
      return topics
    
  return ['', 'Unable to get topics. Could not start Kafka Broker']
示例#2
0
def get_kafka_topics():
    '''List the kafka topics on the current installation.
  
  Requires that Kafka is installed on the same machine and Ambari is up and running. Will start the service and use the Kafka scripts to list out all of the topics.
  
  
  Args:
    N/A
    
  Returns:
    list: [0] will contain the list of all the topics in a string, typically separated by newlines. [1] will contain any errors when retrieving the topics.
  
  '''
    conf = config.read_config('global.conf')
    am_conf = conf['AMBARI']
    amc = Ambari(am_conf['username'], am_conf['password'], am_conf['proto'],
                 am_conf['server'], am_conf['port'])

    logger.info('Starting Kafka Broker')

    if amc.service_action('Sandbox', 'KAFKA', 'START'):
        sh = Shell()
        topics_script = conf['DEMO']['kafka_topics_script']
        zk = conf['DEMO']['zk_connection']
        logger.info('Attempting to create new Kafka Topic')
        out = sh.run(topics_script + ' --list --zookeeper ' + zk)

        if len(out[1]) == 0:
            topics = out[0]
            topics = topics.strip().split('\n')
            logger.info('Kafka topics output: ' + str(topics))
            return topics

    return ['', 'Unable to get topics. Could not start Kafka Broker']
示例#3
0
	def test_default_cwd(self):
		try:
			cmd = Shell('');
			out = cmd.run('bash --version')
			out = cmd.run('bash', '--version')
		except Exception as e:
			self.fail('Exception should not have been raised')
示例#4
0
	def test_simple_run(self):
		try:
			cmd = Shell('/tmp');
			out = cmd.run('bash --version')
			out = cmd.run('bash', '--version')
		except Exception as e:
			self.fail('Exception should not have been raised. ' + str(e))
示例#5
0
 def test_default_cwd(self):
     try:
         cmd = Shell('')
         out = cmd.run('bash --version')
         out = cmd.run('bash', '--version')
     except Exception as e:
         self.fail('Exception should not have been raised')
示例#6
0
 def test_simple_run(self):
     try:
         cmd = Shell('/tmp')
         out = cmd.run('bash --version')
         out = cmd.run('bash', '--version')
     except Exception as e:
         self.fail('Exception should not have been raised. ' + str(e))
示例#7
0
 def export_hdfs(self, data):
   '''Write out data from the generator to a file **in CSV format in HDFS**
   
   The file to write to is found in ``global.conf``. Header lines are not written to the file. All data is appended to a single file.
   
   When a new data generator starts the file is essentially 'wiped out' so make sure to copy the data elsewhere before stopping/restarting the generator.
   
   Args:
     data (dict/object): The data from the generator here writes out the data as a CSV for easier ingestion into other places like Hive or Spark.
   
   Returns:
     N/A
   
   '''
   
   self.hdfs_data_pool.append(data)
   if len(self.hdfs_data_pool) > self.data_pool_size:
     header = ', '.join(map(lambda v: v, sorted(self.hdfs_data_pool[0].keys())))
     lines = '\n'.join(map(lambda v: ', '.join(map( lambda k: str(v[k]), sorted(v.keys()))), self.hdfs_data_pool))
     lines = lines.replace('\"', '"') # Unescape to make sure all quotes are unescaped first
     lines = lines.replace('"', '\"') # Escape so bash command doesn't fail if we have quotes included.
     self.hdfs_data_pool = []
     hdfs_file = self.export_hdfs_file
     bash = Shell();
     hdfs_cmd = 'hdfs dfs -appendToFile - ' + hdfs_file
     echo_cmd = 'echo "%s"' % (lines)
     cmd = ' | '.join([echo_cmd, hdfs_cmd])
     output = bash.run(cmd)
     logger.debug('HDFS Append Output: ' + str(output))
示例#8
0
def create_demo_kafka_topic():
  '''Creates a kafka topic for the demo if it doesn't already exist.
  
  The caveat here in using this is that Kafka must be installed on the same machine as the demo, and thus the same machine as Ambari as well. The function will try to start the Kafka service through Ambari and then once the service is started is will use the location of the Kafka topics script to create the topic
  
  The name for the topic is specified in ``global.conf``.
  
  
  Args:
    N/A
    
  Returns:
    bool: True if the creation is successful. False otherwise.
  '''
  conf = config.read_config('global.conf')
  am_conf = conf['AMBARI']
  amc = Ambari(am_conf['username'], am_conf['password'], am_conf['proto'], am_conf['server'], am_conf['port']);
  
  logger.info('Starting Kafka Broker')
  
  if amc.service_action('Sandbox', 'KAFKA', 'START'):
    sh = Shell()
    topics_script = conf['DEMO']['kafka_topics_script']
    zk = conf['DEMO']['zk_connection']
    topic_name = conf['DEMO']['kafka_topic_name']
    logger.info('Attempting to create new Kafka Topic')
    out = sh.run(topics_script + ' --create --zookeeper ' + zk + ' --replication-factor 1 --partitions 1 --topic ' + topic_name)
    logger.debug(str(out))
    if len(out[1]) == 0:
      return True
    else:
      return False
示例#9
0
def create_demo_kafka_topic():
    '''Creates a kafka topic for the demo if it doesn't already exist.
  
  The caveat here in using this is that Kafka must be installed on the same machine as the demo, and thus the same machine as Ambari as well. The function will try to start the Kafka service through Ambari and then once the service is started is will use the location of the Kafka topics script to create the topic
  
  The name for the topic is specified in ``global.conf``.
  
  
  Args:
    N/A
    
  Returns:
    bool: True if the creation is successful. False otherwise.
  '''
    conf = config.read_config('global.conf')
    am_conf = conf['AMBARI']
    amc = Ambari(am_conf['username'], am_conf['password'], am_conf['proto'],
                 am_conf['server'], am_conf['port'])

    logger.info('Starting Kafka Broker')

    if amc.service_action('Sandbox', 'KAFKA', 'START'):
        sh = Shell()
        topics_script = conf['DEMO']['kafka_topics_script']
        zk = conf['DEMO']['zk_connection']
        topic_name = conf['DEMO']['kafka_topic_name']
        logger.info('Attempting to create new Kafka Topic')
        out = sh.run(topics_script + ' --create --zookeeper ' + zk +
                     ' --replication-factor 1 --partitions 1 --topic ' +
                     topic_name)
        logger.debug(str(out))
        if len(out[1]) == 0:
            return True
        else:
            return False
示例#10
0
def install_service():
	print("Install here...")
	sh = Shell('demo-files')
	version = '2.4'
	sh.set_cwd('.') # Sets cwd to directory where python was called
	cmd = 'cp -r . /var/lib/ambari-server/resources/stacks/HDP/%s/services/DEMOSERVICE' % version
	print(sh.run(cmd))
	cmd = 'ambari-server restart'
	print(sh.run(cmd))
示例#11
0
	def start(self, env):
		# Fill me in!
		print 'Start the Sample Srv Master';
		sh = Shell()
		self.configure(env)
		sh = Shell('/root/devicemanagerdemo/demo-files')
		print(sh.run('bash ' 'startDemoServices.sh'))
		print("WORKING DIR")
		print(sh.run('pwd'))
示例#12
0
def kerberize():
    '''Kerberize the cluster using a script. Untested. Can take 10-15 minutes.
  
  This utilizes a script found at https://github.com/crazyadmins/useful-scripts/tree/master/ambari
  
  If you're running this script on a cluster you should look in ``configuration/kerberos/ambari.props`` to make sure the proper values are present in the file or else the script will fail.
  
  Args:
    N/A
    
  Returns:
    N/A
  '''
    script = config.get_path('kerberos/setup_kerberos.sh')
    sh = Shell()
    sh.run('bash ' + script)
示例#13
0
def kerberize():
  '''Kerberize the cluster using a script. Untested. Can take 10-15 minutes.
  
  This utilizes a script found at https://github.com/crazyadmins/useful-scripts/tree/master/ambari
  
  If you're running this script on a cluster you should look in ``configuration/kerberos/ambari.props`` to make sure the proper values are present in the file or else the script will fail.
  
  Args:
    N/A
    
  Returns:
    N/A
  '''
  script = config.get_path('kerberos/setup_kerberos.sh')
  sh = Shell()
  sh.run('bash ' + script)
示例#14
0
    def export_hdfs(self, data):
        '''Write out data from the generator to a file **in CSV format in HDFS**
    
    The file to write to is found in ``global.conf``. Header lines are not written to the file. All data is appended to a single file.
    
    When a new data generator starts the file is essentially 'wiped out' so make sure to copy the data elsewhere before stopping/restarting the generator.
    
    Args:
      data (dict/object): The data from the generator here writes out the data as a CSV for easier ingestion into other places like Hive or Spark.
    
    Returns:
      N/A
    
    '''

        self.hdfs_data_pool.append(data)
        if len(self.hdfs_data_pool) > self.data_pool_size:
            header = ', '.join(
                map(lambda v: v, sorted(self.hdfs_data_pool[0].keys())))
            lines = '\n'.join(
                map(
                    lambda v: ', '.join(
                        map(lambda k: str(v[k]), sorted(v.keys()))),
                    self.hdfs_data_pool))
            lines = lines.replace(
                '\"',
                '"')  # Unescape to make sure all quotes are unescaped first
            lines = lines.replace(
                '"', '\"'
            )  # Escape so bash command doesn't fail if we have quotes included.
            self.hdfs_data_pool = []
            hdfs_file = self.export_hdfs_file
            bash = Shell()
            hdfs_cmd = 'hdfs dfs -appendToFile - ' + hdfs_file
            echo_cmd = 'echo "%s"' % (lines)
            cmd = ' | '.join([echo_cmd, hdfs_cmd])
            output = bash.run(cmd)
            logger.debug('HDFS Append Output: ' + str(output))
示例#15
0
	def install(self, env):
		# Fill me in!
		print 'Install the Sample Srv Master';
		self.configure(env)
		conf_dir = '/root/devicemanagerdemo/'
		sh = Shell()
		print("WORKING DIR")
		print(sh.run('pwd'))
		sh.set_cwd(conf_dir)
		sh.run('bash demo-files/install.sh')
示例#16
0
	def stop(self, env):
		# Fill me in!
		print 'Stop the Sample Srv Master';
		sh = Shell('/root/devicemanagerdemo/demo-files')
		sh.run("yarn application -kill $(yarn application -list | grep -Po '(application_[0-9]+_[0-9]+)\s(biologicsmanufacturingui)' | grep -Po '(application_[0-9]+_[0-9]+)')")
		sh.run('service docker stop')
示例#17
0
 def test_nonexistent_cwd(self):
     try:
         cmd = Shell('/missing/directory')
         self.fail('Should raise IOError on setting nonexistent directory')
     except IOError as e:
         return
示例#18
0
 def test_existing_directory(self):
     try:
         # Actual Directory
         cmd = Shell('/tmp')
     except IOError as e:
         self.fail('Valid path should pass here')
示例#19
0
 def test_default_cwd(self):
     try:
         cmd = Shell()
     except IOError as e:
         self.fail('No argument shell constructor should not raise IOError')
示例#20
0
def pre_install():
	sh = Shell('demo-files')
	print(sh.run('bash pre-install.sh'))