示例#1
0
 def transfer():
     common_snitch.authorize()
     operation = bottle.request.forms.get('operation')
     src = bottle.request.forms.get('src')
     dst = bottle.request.forms.get('dst')
     multiprocessing.Process(target=do_transfer,
                             args=(operation, src, dst)).start()
     # We'll send info later if there are problems
     return cfg.ok_reply
 def transfer():
   common_snitch.authorize()
   operation = bottle.request.forms.get('operation')
   src = bottle.request.forms.get('src')
   dst = bottle.request.forms.get('dst')
   multiprocessing.Process(target=do_transfer, args=(operation, src,
                                                     dst)).start()
   # We'll send info later if there are problems
   return cfg.ok_reply
 def start_slave():
   common_snitch.authorize()
   # Just start the two components that run on us. start-mapred.sh and
   # start-dfs.sh just ssh in and do this anyway
   logging.info('Starting datanode...')
   subprocess.call([cfg.hadoop_bin + 'hadoop-daemon.sh', 'start', 'datanode'])
   logging.info('Starting tasktracker...')
   subprocess.call([cfg.hadoop_bin + 'hadoop-daemon.sh', 'start',
                    'tasktracker'])
   logging.info('Both started!')
   return cfg.ok_reply
  def start_job():
    """Downloads a JAR locally and submits a MapReduce job."""
    common_snitch.authorize()
    jar = bottle.request.forms.get('jar')
    job_args = map(str, json.loads(bottle.request.forms.get('args')))
    local_jobdir = tempfile.mkdtemp()
    local_jar = os.path.join(local_jobdir, os.path.basename(jar))

    get_file(jar, local_jar)

    job_name = '{0}_{1}'.format(local_jar, uuid.uuid1())
    util.bg_exec([cfg.hadoop_bin + 'hadoop', 'jar', local_jar] + job_args,
                 '/home/hadoop/log_job_{0}'.format(os.path.basename(job_name)))
    return cfg.ok_reply
示例#5
0
    def start_job():
        """Downloads a JAR locally and submits a MapReduce job."""
        common_snitch.authorize()
        jar = bottle.request.forms.get('jar')
        job_args = map(str, json.loads(bottle.request.forms.get('args')))
        local_jobdir = tempfile.mkdtemp()
        local_jar = os.path.join(local_jobdir, os.path.basename(jar))

        get_file(jar, local_jar)

        job_name = '{0}_{1}'.format(local_jar, uuid.uuid1())
        util.bg_exec([cfg.hadoop_bin + 'hadoop', 'jar', local_jar] + job_args,
                     '/home/hadoop/log_job_{0}'.format(
                         os.path.basename(job_name)))
        return cfg.ok_reply
 def start_jobtracker():
   common_snitch.authorize()
   subprocess.call([cfg.hadoop_bin + 'hadoop-daemon.sh', 'start',
                    'jobtracker'])
   logging.info('Start done!')
   return cfg.ok_reply
示例#7
0
 def clean():
     common_snitch.authorize()
     path = bottle.request.forms.get('path')
     subprocess.call([cfg.hadoop_bin + 'hadoop', 'fs', '-rmr', path])
     return cfg.ok_reply
示例#8
0
 def start_jobtracker():
     common_snitch.authorize()
     subprocess.call(
         [cfg.hadoop_bin + 'hadoop-daemon.sh', 'start', 'jobtracker'])
     logging.info('Start done!')
     return cfg.ok_reply
 def clean():
   common_snitch.authorize()
   path = bottle.request.forms.get('path')
   subprocess.call([cfg.hadoop_bin + 'hadoop', 'fs', '-rmr', path])
   return cfg.ok_reply