Beispiel #1
0
  def execute(self, notebook, snippet):

    if snippet['type'] == 'spark2':
      handle = AltusDataEngApi(self.user).submit_spark_job(
          cluster_name=self.cluster_name,
          jars=snippet['properties']['jars'],
          main_class=snippet['properties']['class'],
          arguments=snippet['properties']['spark_arguments'],
          spark_arguments=snippet['properties']['spark_opts'],
#           properties_file
      )
    else:
      statement = snippet['statement']
      handle = AltusDataEngApi(self.user).submit_hive_job(self.cluster_name, statement, params=None, job_xml=None)

    if 'jobs' not in handle:
      raise QueryError('Submission failure: %s' % handle)

    job = handle['jobs'][0]

    if job['status'] not in RUNNING_STATES:
      raise QueryError('Submission failure', handle=job['status'])

    return {
      'id': job['jobId'],
      'crn': job['crn'],
      'has_result_set': False,
    }
Beispiel #2
0
  def cancel(self, notebook, snippet):
    if snippet['result']['handle'].get('id'):
      job_id = snippet['result']['handle']['id']
      AltusDataEngApi(self.user).terminate_job(job_id=job_id)
      response = {'status': 0}
    else:
      response = {'status': -1, 'message': _('Could not cancel because of unsuccessful submission.')}

    return response
Beispiel #3
0
  def check_status(self, notebook, snippet):
    response = {'status': 'running'}

    job_id = snippet['result']['handle']['id']

    handle = AltusDataEngApi(self.user).list_jobs(job_ids=[job_id])
    job = handle['jobs'][0]

    if job['status'] in RUNNING_STATES:
      return response
    elif job['status'] in ('failed', 'terminated'):
      raise QueryError(_('Job was %s') % job['status'])
    else:
      response['status'] = 'available'

    return response