Esempio n. 1
0
def get_api(request, snippet):
  from notebook.connectors.dataeng import DataEngApi
  from notebook.connectors.hiveserver2 import HS2Api
  from notebook.connectors.jdbc import JdbcApi
  from notebook.connectors.rdbms import RdbmsApi
  from notebook.connectors.oozie_batch import OozieApi
  from notebook.connectors.solr import SolrApi
  from notebook.connectors.spark_shell import SparkApi
  from notebook.connectors.spark_batch import SparkBatchApi
  from notebook.connectors.text import TextApi

  if snippet.get('wasBatchExecuted'):
    return OozieApi(user=request.user, request=request)

  interpreter = [interpreter for interpreter in get_ordered_interpreters(request.user) if interpreter['type'] == snippet['type']]
  if not interpreter:
    raise PopupException(_('Snippet type %(type)s is not configured in hue.ini') % snippet)
  interpreter = interpreter[0]
  interface = interpreter['interface']

  # Multi cluster
  cluster = Cluster(request.user)
  if cluster and cluster.get_type() == 'dataeng':
    interface = 'dataeng'

  if interface == 'hiveserver2':
    return HS2Api(user=request.user, request=request)
  elif interface == 'oozie':
    return OozieApi(user=request.user, request=request)
  elif interface == 'livy':
    return SparkApi(request.user)
  elif interface == 'livy-batch':
    return SparkBatchApi(request.user)
  elif interface == 'text' or interface == 'markdown':
    return TextApi(request.user)
  elif interface == 'rdbms':
    return RdbmsApi(request.user, interpreter=snippet['type'])
  elif interface == 'dataeng':
    return DataEngApi(user=request.user, request=request, cluster_name=cluster.get_interface())
  elif interface == 'jdbc':
    return JdbcApi(request.user, interpreter=interpreter)
  elif interface == 'solr':
    return SolrApi(request.user, interpreter=interpreter)
  elif interface == 'pig':
    return OozieApi(user=request.user, request=request) # Backward compatibility until Hue 4
  else:
    raise PopupException(_('Notebook connector interface not recognized: %s') % interface)
Esempio n. 2
0
    def apps(self, filters):
        kwargs = {}

        if 'time' in filters:
            if filters['time']['time_unit'] == 'minutes':
                delta = timedelta(minutes=int(filters['time']['time_value']))
            elif filters['time']['time_unit'] == 'hours':
                delta = timedelta(hours=int(filters['time']['time_value']))
            else:
                delta = timedelta(days=int(filters['time']['time_value']))
            kwargs['creation_date_after'] = (datetime.today() -
                                             delta).strftime(DATE_FORMAT)

        # Could also come from filters
        cluster = Cluster(self.user)
        if cluster.get_type() == DATAENG:
            kwargs['cluster_crn'] = cluster.get_id()

        api = DataEng(self.user)

        jobs = api.list_jobs(**kwargs)

        return {
            'apps': [{
                'id': app['jobId'],
                'name': app['creationDate'],
                'status': app['status'],
                'apiStatus': self._api_status(app['status']),
                'type': app['jobType'],
                'user': '',
                'progress': 100,
                'duration': 10 * 3600,
                'submitted': app['creationDate']
            } for app in jobs['jobs']],
            'total':
            len(jobs)
        }
Esempio n. 3
0
def _default_interpreters(user):
  interpreters = []
  apps = appmanager.get_apps_dict(user)

  if 'hive' in apps:
    interpreters.append(('hive', {
      'name': 'Hive', 'interface': 'hiveserver2', 'options': {}
    }),)

  if 'impala' in apps:
    interpreters.append(('impala', {
      'name': 'Impala', 'interface': 'hiveserver2', 'options': {}
    }),)

  if 'pig' in apps:
    interpreters.append(('pig', {
      'name': 'Pig', 'interface': 'oozie', 'options': {}
    }))

  if 'oozie' in apps and 'jobsub' in apps:
    interpreters.extend((
      ('java', {
          'name': 'Java', 'interface': 'oozie', 'options': {}
      }),
      ('spark2', {
          'name': 'Spark', 'interface': 'oozie', 'options': {}
      }),
      ('mapreduce', {
          'name': 'MapReduce', 'interface': 'oozie', 'options': {}
      }),
      ('shell', {
          'name': 'Shell', 'interface': 'oozie', 'options': {}
      }),
      ('sqoop1', {
          'name': 'Sqoop 1', 'interface': 'oozie', 'options': {}
      }),
      ('distcp', {
          'name': 'Distcp', 'interface': 'oozie', 'options': {}
      }),
    ))

  from dashboard.conf import get_properties  # Cyclic dependency
  dashboards = get_properties()
  if dashboards.get('solr') and dashboards['solr']['analytics']:
    interpreters.append(('solr', {
        'name': 'Solr SQL', 'interface': 'solr', 'options': {}
    }),)

  from desktop.models import Cluster  # Cyclic dependency
  cluster = Cluster(user)
  if cluster and cluster.get_type() == 'dataeng':
    interpreters.append(('dataeng', {
        'name': 'DataEng', 'interface': 'dataeng', 'options': {}
    }))

  if 'spark' in apps:
    interpreters.extend((
      ('spark', {
          'name': 'Scala', 'interface': 'livy', 'options': {}
      }),
      ('pyspark', {
          'name': 'PySpark', 'interface': 'livy', 'options': {}
      }),
      ('r', {
          'name': 'R', 'interface': 'livy', 'options': {}
      }),
      ('jar', {
          'name': 'Spark Submit Jar', 'interface': 'livy-batch', 'options': {}
      }),
      ('py', {
          'name': 'Spark Submit Python', 'interface': 'livy-batch', 'options': {}
      }),
      ('text', {
          'name': 'Text', 'interface': 'text', 'options': {}
      }),
      ('markdown', {
          'name': 'Markdown', 'interface': 'text', 'options': {}
      })
    ))

  INTERPRETERS.set_for_testing(OrderedDict(interpreters))