Exemple #1
0
def get_api(request, snippet):
    from notebook.connectors.oozie_batch import OozieApi

    if snippet.get('wasBatchExecuted') and not TASK_SERVER.ENABLED.get():
        return OozieApi(user=request.user, request=request)

    if snippet.get('type') == 'report':
        snippet['type'] = 'impala'

    if snippet.get('connector'):
        connector_name = snippet['connector'][
            'type']  # Ideally unify with name and nice_name
        snippet['type'] = connector_name
    else:
        connector_name = snippet['type']

    if has_connectors() and snippet.get('type') == 'hello' and is_admin(
            request.user):
        interpreter = snippet.get('interpreter')
    else:
        interpreter = get_interpreter(connector_type=connector_name,
                                      user=request.user)

    interface = interpreter['interface']

    if get_cluster_config(request.user)['has_computes']:
        compute = json.loads(request.POST.get(
            'cluster',
            '""'))  # Via Catalog autocomplete API or Notebook create sessions.
        if compute == '""' or compute == 'undefined':
            compute = None
        if not compute and snippet.get('compute'):  # Via notebook.ko.js
            interpreter['compute'] = snippet['compute']

    LOG.debug('Selected interpreter %s interface=%s compute=%s' %
              (interpreter['type'], interface, interpreter.get('compute')
               and interpreter['compute']['name']))

    if interface == 'hiveserver2' or interface == 'hms':
        from notebook.connectors.hiveserver2 import HS2Api
        return HS2Api(user=request.user,
                      request=request,
                      interpreter=interpreter)
    elif interface == 'oozie':
        return OozieApi(user=request.user, request=request)
    elif interface == 'livy':
        from notebook.connectors.spark_shell import SparkApi
        return SparkApi(request.user)
    elif interface == 'livy-batch':
        from notebook.connectors.spark_batch import SparkBatchApi
        return SparkBatchApi(request.user)
    elif interface == 'text' or interface == 'markdown':
        from notebook.connectors.text import TextApi
        return TextApi(request.user)
    elif interface == 'rdbms':
        from notebook.connectors.rdbms import RdbmsApi
        return RdbmsApi(request.user,
                        interpreter=snippet['type'],
                        query_server=snippet.get('query_server'))
    elif interface == 'jdbc':
        if interpreter['options'] and interpreter['options'].get(
                'url', '').find('teradata') >= 0:
            from notebook.connectors.jdbc_teradata import JdbcApiTeradata
            return JdbcApiTeradata(request.user, interpreter=interpreter)
        if interpreter['options'] and interpreter['options'].get(
                'url', '').find('awsathena') >= 0:
            from notebook.connectors.jdbc_athena import JdbcApiAthena
            return JdbcApiAthena(request.user, interpreter=interpreter)
        elif interpreter['options'] and interpreter['options'].get(
                'url', '').find('presto') >= 0:
            from notebook.connectors.jdbc_presto import JdbcApiPresto
            return JdbcApiPresto(request.user, interpreter=interpreter)
        elif interpreter['options'] and interpreter['options'].get(
                'url', '').find('clickhouse') >= 0:
            from notebook.connectors.jdbc_clickhouse import JdbcApiClickhouse
            return JdbcApiClickhouse(request.user, interpreter=interpreter)
        elif interpreter['options'] and interpreter['options'].get(
                'url', '').find('vertica') >= 0:
            from notebook.connectors.jdbc_vertica import JdbcApiVertica
            return JdbcApiVertica(request.user, interpreter=interpreter)
        else:
            from notebook.connectors.jdbc import JdbcApi
            return JdbcApi(request.user, interpreter=interpreter)
    elif interface == 'teradata':
        from notebook.connectors.jdbc import JdbcApiTeradata
        return JdbcApiTeradata(request.user, interpreter=interpreter)
    elif interface == 'athena':
        from notebook.connectors.jdbc import JdbcApiAthena
        return JdbcApiAthena(request.user, interpreter=interpreter)
    elif interface == 'presto':
        from notebook.connectors.jdbc_presto import JdbcApiPresto
        return JdbcApiPresto(request.user, interpreter=interpreter)
    elif interface == 'sqlalchemy':
        from notebook.connectors.sql_alchemy import SqlAlchemyApi
        return SqlAlchemyApi(request.user, interpreter=interpreter)
    elif interface == 'solr':
        from notebook.connectors.solr import SolrApi
        return SolrApi(request.user, interpreter=interpreter)
    elif interface == 'hbase':
        from notebook.connectors.hbase import HBaseApi
        return HBaseApi(request.user)
    elif interface == 'ksql':
        from notebook.connectors.ksql import KSqlApi
        return KSqlApi(request.user, interpreter=interpreter)
    elif interface == 'flink':
        from notebook.connectors.flink import FlinkSqlApi
        return FlinkSqlApi(request.user, interpreter=interpreter)
    elif interface == 'kafka':
        from notebook.connectors.kafka import KafkaApi
        return KafkaApi(request.user)
    elif interface == 'pig':
        return OozieApi(user=request.user,
                        request=request)  # Backward compatibility until Hue 4
    else:
        raise PopupException(
            _('Notebook connector interface not recognized: %s') % interface)
Exemple #2
0
def get_api(request, snippet):
  from notebook.connectors.oozie_batch import OozieApi

  if snippet.get('wasBatchExecuted'):
    return OozieApi(user=request.user, request=request)

  if snippet['type'] == 'report':
    snippet['type'] = 'impala'

  interpreter = [interpreter for interpreter in get_ordered_interpreters(request.user) if interpreter['type'] == snippet['type']]
  if not interpreter:
    if snippet['type'] == 'hbase':
      interpreter = [{
        'name': 'hbase',
        'type': 'hbase',
        'interface': 'hbase',
        'options': {},
        'is_sql': False
      }]
    elif snippet['type'] == 'kafka':
      interpreter = [{
        'name': 'kafka',
        'type': 'kafka',
        'interface': 'kafka',
        'options': {},
        'is_sql': False
      }]
    elif snippet['type'] == 'solr':
      interpreter = [{
        'name': 'solr',
        'type': 'solr',
        'interface': 'solr',
        'options': {},
        'is_sql': False
      }]
    elif snippet['type'] == 'custom':
      interpreter = [{
        'name': snippet['name'],
        'type': snippet['type'],
        'interface': snippet['interface'],
        'options': snippet.get('options', {}),
        'is_sql': False
      }]
    else:
      raise PopupException(_('Snippet type %(type)s is not configured in hue.ini') % snippet)

  interpreter = interpreter[0]
  interface = interpreter['interface']

  # Multi cluster
  if has_multi_cluster():
    cluster = json.loads(request.POST.get('cluster', '""')) # Via Catalog autocomplete API or Notebook create sessions
    if cluster == '""' or cluster == 'undefined':
      cluster = None
    if not cluster and snippet.get('compute'): # Via notebook.ko.js
      cluster = snippet['compute']
  else:
    cluster = None

  cluster_name = cluster.get('id') if cluster else None

  if cluster and 'altus:dataware:k8s' in cluster_name:
    interface = 'hiveserver2'
  elif cluster and 'crn:altus:dataware:' in cluster_name:
    interface = 'altus-adb'
  elif cluster and 'crn:altus:dataeng:' in cluster_name:
    interface = 'dataeng'

  LOG.info('Selected cluster %s %s interface %s' % (cluster_name, cluster, interface))
  snippet['interface'] = interface

  if interface == 'hiveserver2':
    from notebook.connectors.hiveserver2 import HS2Api
    return HS2Api(user=request.user, request=request, cluster=cluster)
  elif interface == 'oozie':
    return OozieApi(user=request.user, request=request)
  elif interface == 'livy':
    from notebook.connectors.spark_shell import SparkApi
    return SparkApi(request.user)
  elif interface == 'livy-batch':
    from notebook.connectors.spark_batch import SparkBatchApi
    return SparkBatchApi(request.user)
  elif interface == 'text' or interface == 'markdown':
    from notebook.connectors.text import TextApi
    return TextApi(request.user)
  elif interface == 'rdbms':
    from notebook.connectors.rdbms import RdbmsApi
    return RdbmsApi(request.user, interpreter=snippet['type'], query_server=snippet.get('query_server'))
  elif interface == 'altus-adb':
    from notebook.connectors.altus_adb import AltusAdbApi
    return AltusAdbApi(user=request.user, cluster_name=cluster_name, request=request)
  elif interface == 'dataeng':
    from notebook.connectors.dataeng import DataEngApi
    return DataEngApi(user=request.user, request=request, cluster_name=cluster_name)
  elif interface == 'jdbc':
    if interpreter['options'] and interpreter['options'].get('url', '').find('teradata') >= 0:
      from notebook.connectors.jdbc_teradata import JdbcApiTeradata
      return JdbcApiTeradata(request.user, interpreter=interpreter)
    elif interpreter['options'] and interpreter['options'].get('url', '').find('presto') >= 0:
      from notebook.connectors.jdbc_presto import JdbcApiPresto
      return JdbcApiPresto(request.user, interpreter=interpreter)
    elif interpreter['options'] and interpreter['options'].get('url', '').find('clickhouse') >= 0:
      from notebook.connectors.jdbc_clickhouse import JdbcApiClickhouse
      return JdbcApiClickhouse(request.user, interpreter=interpreter)
    else:
      from notebook.connectors.jdbc import JdbcApi
      return JdbcApi(request.user, interpreter=interpreter)
  elif interface == 'teradata':
    from notebook.connectors.jdbc import JdbcApiTeradata
    return JdbcApiTeradata(request.user, interpreter=interpreter)
  elif interface == 'presto':
    from notebook.connectors.jdbc_presto import JdbcApiPresto
    return JdbcApiPresto(request.user, interpreter=interpreter)
  elif interface == 'sqlalchemy':
    from notebook.connectors.sqlalchemyapi import SqlAlchemyApi
    return SqlAlchemyApi(request.user, interpreter=interpreter)
  elif interface == 'solr':
    from notebook.connectors.solr import SolrApi
    return SolrApi(request.user, interpreter=interpreter)
  elif interface == 'hbase':
    from notebook.connectors.hbase import HBaseApi
    return HBaseApi(request.user)
  elif interface == 'kafka':
    from notebook.connectors.kafka import KafkaApi
    return KafkaApi(request.user)
  elif interface == 'pig':
    return OozieApi(user=request.user, request=request) # Backward compatibility until Hue 4
  else:
    raise PopupException(_('Notebook connector interface not recognized: %s') % interface)