コード例 #1
0
ファイル: models.py プロジェクト: CaeserNieh/hue
def make_notebook2(name='Browse', description='', is_saved=False, snippets=None):

  from notebook.connectors.hiveserver2 import HS2Api

  editor = Notebook()

  _snippets = []

  for snippet in snippets:
    default_properties = {
        'files': [],
        'functions': [],
        'settings': []
    }

    if snippet['type'] == 'hive':
      pass
    elif snippet['type'] == 'impala':
      pass
    elif snippet['type'] == 'java':
      pass

    _snippets.append(snippet)

  print _snippets

  data = {
    'name': name,
    'uuid': str(uuid.uuid4()),
    'description': description,
    'sessions': [
      {
         'type': _snippet['type'],
         'properties': HS2Api.get_properties(snippet['type']),
         'id': None
      } for _snippet in _snippets # Non unique types currently
    ],
    'selectedSnippet': _snippets[0]['type'],
    'type': 'notebook',
    'showHistory': False,
    'isSaved': is_saved,
    'snippets': [
      {
         'status': _snippet.get('status', 'ready'),
         'id': str(uuid.uuid4()),
         'statement_raw': _snippet.get('statement', ''),
         'statement': _snippet.get('statement', ''),
         'type': _snippet.get('type'),
         'properties': _snippet.properties,
         'name': name,
         'database': _snippet.get('database'),
         'result': {}
      } for _snippet in _snippets
    ]
  }

  editor.data = json.dumps(data)

  return editor
コード例 #2
0
ファイル: models.py プロジェクト: 277800076/hue
def make_notebook(name='Browse', description='', editor_type='hive', statement='', status='ready',
                  files=None, functions=None, settings=None):

  from notebook.connectors.hiveserver2 import HS2Api

  editor = Notebook()

  properties = HS2Api.get_properties(editor_type)

  if editor_type == 'hive':
    if files is not None:
      _update_property_value(properties, 'files', files)

    if functions is not None:
      _update_property_value(properties, 'functions', functions)

    if settings is not None:
      _update_property_value(properties, 'settings', settings)
  elif editor_type == 'impala':
    if settings is not None:
      _update_property_value(properties, 'files', files)

  editor.data = json.dumps({
    'name': name,
    'description': description,
    'sessions': [
      {
         'type': editor_type,
         'properties': properties,
         'id': None
      }
    ],
    'selectedSnippet': editor_type,
    'type': 'query-%s' % editor_type,
    'showHistory': True,

    'snippets': [
      {
         'status': status,
         'id': str(uuid.uuid4()),
         'statement_raw': statement,
         'statement': statement,
         'type': editor_type,
         'properties': {
            'files': [] if files is None else files,
            'functions': [] if functions is None else functions,
            'settings': [] if settings is None else settings
         },
         'name': name,
         'database': 'default',
         'result': {}
      }
    ]
  })
  
  return editor
コード例 #3
0
  def _add_session(self, data, snippet_type):
    from notebook.connectors.hiveserver2 import HS2Api # Cyclic dependency

    if snippet_type not in [_s['type'] for _s in data['sessions']]:
      data['sessions'].append({
         'type': snippet_type,
         'properties': HS2Api.get_properties(snippet_type),
         'id': None
      }
    )
コード例 #4
0
ファイル: tests_hiveserver2.py プロジェクト: vonkonyoung/hue
    def setUp(self):
        self.client.post('/beeswax/install_examples')

        self.user = User.objects.get(username='******')
        add_to_group('test')
        grant_access("test", "test", "notebook")

        self.db = dbms.get(self.user, get_query_server_config())
        self.cluster.fs.do_as_user('test', self.cluster.fs.create_home_dir,
                                   '/user/test')
        self.api = HS2Api(self.user)

        self.statement = 'SELECT description, salary FROM sample_07 WHERE (sample_07.salary > 100000) ORDER BY salary DESC LIMIT 1000'
コード例 #5
0
ファイル: tests_hiveserver2.py プロジェクト: zlcken/hue
    def setUp(self):
        self.client.post('/beeswax/install_examples')

        self.user = User.objects.get(username='******')
        grant_access("test", "test", "notebook")

        self.db = dbms.get(self.user, get_query_server_config())
        self.cluster.fs.do_as_user('test', self.cluster.fs.create_home_dir,
                                   '/user/test')
        self.api = HS2Api(self.user)

        self.notebook_json = """
      {
        "uuid": "f5d6394d-364f-56e8-6dd3-b1c5a4738c52",
        "id": 1234,
        "sessions": [{"type": "hive", "properties": [], "id": "1234"}],
        "type": "query-hive",
        "name": "Test Hiveserver2 Editor"
      }
    """
        self.statement = 'SELECT description, salary FROM sample_07 WHERE (sample_07.salary > 100000) ORDER BY salary DESC LIMIT 1000'
        self.snippet_json = """
      {
          "status": "running",
          "database": "%(database)s",
          "id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
          "result": {
              "type": "table",
              "handle": {},
              "id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
          },
          "statement": "%(statement)s",
          "type": "hive",
          "properties": {
              "files": [],
              "functions": [],
              "settings": []
          }
      }
    """ % {
            'database': self.db_name,
            'statement': self.statement
        }

        doc, created = Document2.objects.get_or_create(id=1234,
                                                       name='Test Hive Query',
                                                       type='query-hive',
                                                       owner=self.user,
                                                       is_history=True,
                                                       data=self.notebook_json)
コード例 #6
0
def get_api(request, snippet):
  from notebook.connectors.dataeng import DataEngApi
  from notebook.connectors.hiveserver2 import HS2Api
  from notebook.connectors.jdbc import JdbcApi
  from notebook.connectors.rdbms import RdbmsApi
  from notebook.connectors.oozie_batch import OozieApi
  from notebook.connectors.solr import SolrApi
  from notebook.connectors.spark_shell import SparkApi
  from notebook.connectors.spark_batch import SparkBatchApi
  from notebook.connectors.text import TextApi

  if snippet.get('wasBatchExecuted'):
    return OozieApi(user=request.user, request=request)

  interpreter = [interpreter for interpreter in get_ordered_interpreters(request.user) if interpreter['type'] == snippet['type']]
  if not interpreter:
    raise PopupException(_('Snippet type %(type)s is not configured in hue.ini') % snippet)
  interpreter = interpreter[0]
  interface = interpreter['interface']

  # Multi cluster
  cluster = Cluster(request.user)
  if cluster and cluster.get_type() == 'dataeng':
    interface = 'dataeng'

  if interface == 'hiveserver2':
    return HS2Api(user=request.user, request=request)
  elif interface == 'oozie':
    return OozieApi(user=request.user, request=request)
  elif interface == 'livy':
    return SparkApi(request.user)
  elif interface == 'livy-batch':
    return SparkBatchApi(request.user)
  elif interface == 'text' or interface == 'markdown':
    return TextApi(request.user)
  elif interface == 'rdbms':
    return RdbmsApi(request.user, interpreter=snippet['type'])
  elif interface == 'dataeng':
    return DataEngApi(user=request.user, request=request, cluster_name=cluster.get_interface())
  elif interface == 'jdbc':
    return JdbcApi(request.user, interpreter=interpreter)
  elif interface == 'solr':
    return SolrApi(request.user, interpreter=interpreter)
  elif interface == 'pig':
    return OozieApi(user=request.user, request=request) # Backward compatibility until Hue 4
  else:
    raise PopupException(_('Notebook connector interface not recognized: %s') % interface)
コード例 #7
0
def get_api(request, snippet):
    from notebook.connectors.hiveserver2 import HS2Api
    from notebook.connectors.jdbc import JdbcApi
    from notebook.connectors.rdbms import RdbmsApi
    from notebook.connectors.oozie_batch import OozieApi
    from notebook.connectors.pig_batch import PigApi
    from notebook.connectors.solr import SolrApi
    from notebook.connectors.spark_shell import SparkApi
    from notebook.connectors.spark_batch import SparkBatchApi
    from notebook.connectors.text import TextApi

    if snippet.get('wasBatchExecuted'):
        return OozieApi(user=request.user, request=request)

    interpreter = [
        interpreter for interpreter in get_interpreters(request.user)
        if interpreter['type'] == snippet['type']
    ]
    if not interpreter:
        raise PopupException(
            _('Snippet type %(type)s is not configured in hue.ini') % snippet)
    interpreter = interpreter[0]
    interface = interpreter['interface']

    if interface == 'hiveserver2':
        return HS2Api(user=request.user, request=request)
    elif interface == 'oozie':
        return OozieApi(user=request.user, request=request)
    elif interface == 'livy':
        return SparkApi(request.user)
    elif interface == 'livy-batch':
        return SparkBatchApi(request.user)
    elif interface == 'text' or interface == 'markdown':
        return TextApi(request.user)
    elif interface == 'rdbms':
        return RdbmsApi(request.user, interpreter=snippet['type'])
    elif interface == 'jdbc':
        return JdbcApi(request.user, interpreter=interpreter)
    elif interface == 'solr':
        return SolrApi(request.user, interpreter=interpreter)
    elif interface == 'pig':
        return PigApi(user=request.user, request=request)
    else:
        raise PopupException(
            _('Notebook connector interface not recognized: %s') % interface)
コード例 #8
0
ファイル: base.py プロジェクト: tempbottle/hue
def get_api(user, snippet, fs, jt):
    from notebook.connectors.hiveserver2 import HS2Api
    from notebook.connectors.jdbc import JdbcApi
    from notebook.connectors.mysql import MySqlApi
    from notebook.connectors.pig_batch import PigApi
    from notebook.connectors.spark_shell import SparkApi
    from notebook.connectors.spark_batch import SparkBatchApi
    from notebook.connectors.text import TextApi

    interpreter = [
        interpreter for interpreter in get_interpreters()
        if interpreter['type'] == snippet['type']
    ]
    if not interpreter:
        raise PopupException(
            _('Snippet type %(type)s is not configured in hue.ini') % snippet)
    interface = interpreter[0]['interface']
    options = interpreter[0]['options']

    if interface == 'hiveserver2':
        return HS2Api(user)
    elif interface == 'livy':
        return SparkApi(user)
    elif interface == 'livy-batch':
        return SparkBatchApi(user)
    elif interface == 'text':
        return TextApi(user)
    elif interface == 'mysql':
        return MySqlApi(user)
    elif interface == 'jdbc':
        return JdbcApi(user, options=options)
    elif interface == 'pig':
        return PigApi(user, fs=fs, jt=jt)
    else:
        raise PopupException(
            _('Notebook connector interface not recognized: %s') % interface)
コード例 #9
0
def get_api(request, snippet):
    from notebook.connectors.oozie_batch import OozieApi

    if snippet.get('wasBatchExecuted') and not TASK_SERVER.ENABLED.get():
        return OozieApi(user=request.user, request=request)

    if snippet.get('type') == 'report':
        snippet['type'] = 'impala'

    patch_snippet_for_connector(snippet)

    connector_name = snippet['type']

    if has_connectors() and snippet.get('type') == 'hello' and is_admin(
            request.user):
        interpreter = snippet.get('interpreter')
    else:
        interpreter = get_interpreter(connector_type=connector_name,
                                      user=request.user)

    interface = interpreter['interface']

    if get_cluster_config(request.user)['has_computes']:
        compute = json.loads(request.POST.get(
            'cluster',
            '""'))  # Via Catalog autocomplete API or Notebook create sessions.
        if compute == '""' or compute == 'undefined':
            compute = None
        if not compute and snippet.get('compute'):  # Via notebook.ko.js
            interpreter['compute'] = snippet['compute']

    LOG.debug('Selected interpreter %s interface=%s compute=%s' %
              (interpreter['type'], interface, interpreter.get('compute')
               and interpreter['compute']['name']))

    if interface == 'hiveserver2' or interface == 'hms':
        from notebook.connectors.hiveserver2 import HS2Api
        return HS2Api(user=request.user,
                      request=request,
                      interpreter=interpreter)
    elif interface == 'oozie':
        return OozieApi(user=request.user, request=request)
    elif interface == 'livy':
        from notebook.connectors.spark_shell import SparkApi
        return SparkApi(request.user, interpreter=interpreter)
    elif interface == 'livy-batch':
        from notebook.connectors.spark_batch import SparkBatchApi
        return SparkBatchApi(request.user, interpreter=interpreter)
    elif interface == 'text' or interface == 'markdown':
        from notebook.connectors.text import TextApi
        return TextApi(request.user)
    elif interface == 'rdbms':
        from notebook.connectors.rdbms import RdbmsApi
        return RdbmsApi(request.user,
                        interpreter=snippet['type'],
                        query_server=snippet.get('query_server'))
    elif interface == 'jdbc':
        if interpreter['options'] and interpreter['options'].get(
                'url', '').find('teradata') >= 0:
            from notebook.connectors.jdbc_teradata import JdbcApiTeradata
            return JdbcApiTeradata(request.user, interpreter=interpreter)
        if interpreter['options'] and interpreter['options'].get(
                'url', '').find('awsathena') >= 0:
            from notebook.connectors.jdbc_athena import JdbcApiAthena
            return JdbcApiAthena(request.user, interpreter=interpreter)
        elif interpreter['options'] and interpreter['options'].get(
                'url', '').find('presto') >= 0:
            from notebook.connectors.jdbc_presto import JdbcApiPresto
            return JdbcApiPresto(request.user, interpreter=interpreter)
        elif interpreter['options'] and interpreter['options'].get(
                'url', '').find('clickhouse') >= 0:
            from notebook.connectors.jdbc_clickhouse import JdbcApiClickhouse
            return JdbcApiClickhouse(request.user, interpreter=interpreter)
        elif interpreter['options'] and interpreter['options'].get(
                'url', '').find('vertica') >= 0:
            from notebook.connectors.jdbc_vertica import JdbcApiVertica
            return JdbcApiVertica(request.user, interpreter=interpreter)
        else:
            from notebook.connectors.jdbc import JdbcApi
            return JdbcApi(request.user, interpreter=interpreter)
    elif interface == 'teradata':
        from notebook.connectors.jdbc_teradata import JdbcApiTeradata
        return JdbcApiTeradata(request.user, interpreter=interpreter)
    elif interface == 'athena':
        from notebook.connectors.jdbc_athena import JdbcApiAthena
        return JdbcApiAthena(request.user, interpreter=interpreter)
    elif interface == 'presto':
        from notebook.connectors.jdbc_presto import JdbcApiPresto
        return JdbcApiPresto(request.user, interpreter=interpreter)
    elif interface == 'sqlalchemy':
        from notebook.connectors.sql_alchemy import SqlAlchemyApi
        return SqlAlchemyApi(request.user, interpreter=interpreter)
    elif interface == 'solr':
        from notebook.connectors.solr import SolrApi
        return SolrApi(request.user, interpreter=interpreter)
    elif interface == 'hbase':
        from notebook.connectors.hbase import HBaseApi
        return HBaseApi(request.user)
    elif interface == 'ksql':
        from notebook.connectors.ksql import KSqlApi
        return KSqlApi(request.user, interpreter=interpreter)
    elif interface == 'flink':
        from notebook.connectors.flink_sql import FlinkSqlApi
        return FlinkSqlApi(request.user, interpreter=interpreter)
    elif interface == 'kafka':
        from notebook.connectors.kafka import KafkaApi
        return KafkaApi(request.user)
    elif interface == 'pig':
        return OozieApi(user=request.user,
                        request=request)  # Backward compatibility until Hue 4
    else:
        raise PopupException(
            _('Notebook connector interface not recognized: %s') % interface)
コード例 #10
0
ファイル: models.py プロジェクト: evanzh7/goumang
def make_notebook(name='Browse', description='', editor_type='hive', statement='', status='ready',
                  files=None, functions=None, settings=None, is_saved=False, database='default', snippet_properties=None, batch_submit=False,
                  on_success_url=None, skip_historify=False, is_task=False, last_executed=-1, is_notebook=False, pub_sub_url=None, result_properties={},
                  namespace=None, compute=None):
  '''
  skip_historify: do not add the task to the query history. e.g. SQL Dashboard
  is_task / isManaged: true when being a managed by Hue operation (include_managed=True in document), e.g. exporting query result, dropping some tables
  '''
  from notebook.connectors.hiveserver2 import HS2Api

  # impala can have compute name appended to the editor_type (impala/dbms.py - get_query_server_config)
  if editor_type.startswith('impala'):
    editor_type = 'impala'

  editor = Notebook()
  if snippet_properties is None:
    snippet_properties = {}

  if editor_type == 'hive':
    sessions_properties = HS2Api.get_properties(editor_type)
    if files is not None:
      _update_property_value(sessions_properties, 'files', files)

    if functions is not None:
      _update_property_value(sessions_properties, 'functions', functions)

    if settings is not None:
      _update_property_value(sessions_properties, 'settings', settings)
  elif editor_type == 'impala':
    sessions_properties = HS2Api.get_properties(editor_type)
    if settings is not None:
      _update_property_value(sessions_properties, 'files', files)
  elif editor_type == 'java':
    sessions_properties = [] # Java options
  else:
    sessions_properties = []

  data = {
    'name': name,
    'uuid': str(uuid.uuid4()),
    'description': description,
    'sessions': [
      {
         'type': editor_type,
         'properties': sessions_properties,
         'id': None
      }
    ],
    'selectedSnippet': editor_type,
    'type': 'notebook' if is_notebook else 'query-%s' % editor_type,
    'showHistory': True,
    'isSaved': is_saved,
    'onSuccessUrl': urllib.quote(on_success_url.encode('utf-8'), safe=SAFE_CHARACTERS_URI) if on_success_url else None,
    'pubSubUrl': pub_sub_url,
    'skipHistorify': skip_historify,
    'isManaged': is_task,
    'snippets': [
      {
         'status': status,
         'id': str(uuid.uuid4()),
         'statement_raw': statement,
         'statement': statement,
         'type': editor_type,
         'wasBatchExecuted': batch_submit,
         'lastExecuted': last_executed,
         'properties': {
            'files': [] if files is None else files,
            'functions': [] if functions is None else functions,
            'settings': [] if settings is None else settings
         },
         'name': name,
         'database': database,
         'namespace': namespace if namespace else {},
         'compute': compute if compute else {},
         'result': {'handle':{}},
         'variables': []
      }
    ] if not is_notebook else []
  }

  if snippet_properties:
    data['snippets'][0]['properties'].update(snippet_properties)
  if result_properties:
    data['snippets'][0]['result'].update(result_properties)

  editor.data = json.dumps(data)

  return editor
コード例 #11
0
def make_notebook(name='Browse',
                  description='',
                  editor_type='hive',
                  statement='',
                  status='ready',
                  files=None,
                  functions=None,
                  settings=None,
                  is_saved=False,
                  database='default',
                  snippet_properties=None,
                  batch_submit=False,
                  on_success_url=None,
                  skip_historify=False,
                  is_task=False,
                  last_executed=-1):
    '''
  skip_historify: do not add the task to the query history. e.g. SQL Dashboard
  isManaged: true when being a managed by Hue operation (include_managed=True in document), e.g. exporting query result, dropping some tables
  '''
    from notebook.connectors.hiveserver2 import HS2Api

    editor = Notebook()
    if snippet_properties is None:
        snippet_properties = {}

    if editor_type == 'hive':
        sessions_properties = HS2Api.get_properties(editor_type)
        if files is not None:
            _update_property_value(sessions_properties, 'files', files)

        if functions is not None:
            _update_property_value(sessions_properties, 'functions', functions)

        if settings is not None:
            _update_property_value(sessions_properties, 'settings', settings)
    elif editor_type == 'impala':
        sessions_properties = HS2Api.get_properties(editor_type)
        if settings is not None:
            _update_property_value(sessions_properties, 'files', files)
    elif editor_type == 'java':
        sessions_properties = []  # Java options
    else:
        sessions_properties = []

    data = {
        'name':
        name,
        'uuid':
        str(uuid.uuid4()),
        'description':
        description,
        'sessions': [{
            'type': editor_type,
            'properties': sessions_properties,
            'id': None
        }],
        'selectedSnippet':
        editor_type,
        'type':
        'query-%s' % editor_type,
        'showHistory':
        True,
        'isSaved':
        is_saved,
        'onSuccessUrl':
        on_success_url,
        'skipHistorify':
        skip_historify,
        'isManaged':
        is_task,
        'snippets': [{
            'status': status,
            'id': str(uuid.uuid4()),
            'statement_raw': statement,
            'statement': statement,
            'type': editor_type,
            'wasBatchExecuted': batch_submit,
            'lastExecuted': last_executed,
            'properties': {
                'files': [] if files is None else files,
                'functions': [] if functions is None else functions,
                'settings': [] if settings is None else settings
            },
            'name': name,
            'database': database,
            'result': {
                'handle': {}
            },
            'variables': []
        }]
    }

    if snippet_properties:
        data['snippets'][0]['properties'].update(snippet_properties)

    editor.data = json.dumps(data)

    return editor
コード例 #12
0
def make_notebook(name='Browse', description='', editor_type='hive', statement='', status='ready',
                  files=None, functions=None, settings=None, is_saved=False, database='default', snippet_properties=None, batch_submit=False,
                  on_success_url=None):
  from notebook.connectors.hiveserver2 import HS2Api

  editor = Notebook()
  if snippet_properties is None:
    snippet_properties = {}

  if editor_type == 'hive':
    sessions_properties = HS2Api.get_properties(editor_type)
    if files is not None:
      _update_property_value(sessions_properties, 'files', files)

    if functions is not None:
      _update_property_value(sessions_properties, 'functions', functions)

    if settings is not None:
      _update_property_value(sessions_properties, 'settings', settings)
  elif editor_type == 'impala':
    sessions_properties = HS2Api.get_properties(editor_type)
    if settings is not None:
      _update_property_value(sessions_properties, 'files', files)
  elif editor_type == 'java':
    sessions_properties = [] # Java options
  else:
    sessions_properties = []

  data = {
    'name': name,
    'uuid': str(uuid.uuid4()),
    'description': description,
    'sessions': [
      {
         'type': editor_type,
         'properties': sessions_properties,
         'id': None
      }
    ],
    'selectedSnippet': editor_type,
    'type': 'query-%s' % editor_type,
    'showHistory': True,
    'isSaved': is_saved,
    'onSuccessUrl': on_success_url,
    'snippets': [
      {
         'status': status,
         'id': str(uuid.uuid4()),
         'statement_raw': statement,
         'statement': statement,
         'type': editor_type,
         'wasBatchExecuted': batch_submit,
         'properties': {
            'files': [] if files is None else files,
            'functions': [] if functions is None else functions,
            'settings': [] if settings is None else settings
         },
         'name': name,
         'database': database,
         'result': {'handle':{}},
         'variables': []
      }
    ]
  }

  if snippet_properties:
    data['snippets'][0]['properties'].update(snippet_properties)

  editor.data = json.dumps(data)

  return editor
コード例 #13
0
ファイル: base.py プロジェクト: pioneerzcy/hue
def get_api(request, snippet):
    from notebook.connectors.oozie_batch import OozieApi

    if snippet.get('wasBatchExecuted'):
        return OozieApi(user=request.user, request=request)

    if snippet['type'] == 'report':
        snippet['type'] = 'impala'

    interpreter = [
        interpreter for interpreter in get_ordered_interpreters(request.user)
        if interpreter['type'] == snippet['type']
    ]
    if not interpreter:
        if snippet['type'] == 'hbase':
            interpreter = [{
                'name': 'hbase',
                'type': 'hbase',
                'interface': 'hbase',
                'options': {},
                'is_sql': False
            }]
        elif snippet['type'] == 'kafka':
            interpreter = [{
                'name': 'kafka',
                'type': 'kafka',
                'interface': 'kafka',
                'options': {},
                'is_sql': False
            }]
        elif snippet['type'] == 'solr':
            interpreter = [{
                'name': 'solr',
                'type': 'solr',
                'interface': 'solr',
                'options': {},
                'is_sql': False
            }]
        else:
            raise PopupException(
                _('Snippet type %(type)s is not configured in hue.ini') %
                snippet)

    interpreter = interpreter[0]
    interface = interpreter['interface']

    # Multi cluster
    cluster = json.loads(request.POST.get('cluster', '""'))  # Via Catalog API
    if cluster == 'undefined':
        cluster = None
    if not cluster and snippet.get('compute'):  # Via notebook.ko.js
        cluster = snippet.get('compute').get('id')
    if cluster and 'crn:altus:dataware:' in cluster:
        interface = 'altus-adb'
    if cluster:
        LOG.info('Selected cluster %s' % cluster)

    if interface == 'hiveserver2':
        from notebook.connectors.hiveserver2 import HS2Api
        return HS2Api(user=request.user, request=request, cluster=cluster)
    elif interface == 'oozie':
        return OozieApi(user=request.user, request=request)
    elif interface == 'livy':
        from notebook.connectors.spark_shell import SparkApi
        return SparkApi(request.user)
    elif interface == 'livy-batch':
        from notebook.connectors.spark_batch import SparkBatchApi
        return SparkBatchApi(request.user)
    elif interface == 'text' or interface == 'markdown':
        from notebook.connectors.text import TextApi
        return TextApi(request.user)
    elif interface == 'rdbms':
        from notebook.connectors.rdbms import RdbmsApi
        return RdbmsApi(request.user, interpreter=snippet['type'])
    elif interface == 'altus-adb':
        from notebook.connectors.altus_adb import AltusAdbApi
        return AltusAdbApi(user=request.user,
                           cluster_name=cluster,
                           request=request)
    elif interface == 'dataeng':
        from notebook.connectors.dataeng import DataEngApi
        return DataEngApi(user=request.user,
                          request=request,
                          cluster_name=cluster.get('name'))
    elif interface == 'jdbc' or interface == 'teradata':
        from notebook.connectors.jdbc import JdbcApi
        return JdbcApi(request.user, interpreter=interpreter)
    elif interface == 'sqlalchemy':
        from notebook.connectors.sqlalchemyapi import SqlAlchemyApi
        return SqlAlchemyApi(request.user, interpreter=interpreter)
    elif interface == 'solr':
        from notebook.connectors.solr import SolrApi
        return SolrApi(request.user, interpreter=interpreter)
    elif interface == 'hbase':
        from notebook.connectors.hbase import HBaseApi
        return HBaseApi(request.user)
    elif interface == 'kafka':
        from notebook.connectors.kafka import KafkaApi
        return KafkaApi(request.user)
    elif interface == 'pig':
        return OozieApi(user=request.user,
                        request=request)  # Backward compatibility until Hue 4
    else:
        raise PopupException(
            _('Notebook connector interface not recognized: %s') % interface)
コード例 #14
0
ファイル: base.py プロジェクト: tonyzhimin/hue
def get_api(request, snippet):
    from notebook.connectors.oozie_batch import OozieApi

    if snippet.get('wasBatchExecuted'):
        return OozieApi(user=request.user, request=request)

    if snippet['type'] == 'report':
        snippet['type'] = 'impala'

    interpreter = [
        interpreter for interpreter in get_ordered_interpreters(request.user)
        if snippet['type'] in (interpreter['type'], interpreter['interface'])
    ]
    if not interpreter:
        if snippet['type'] == 'hbase':
            interpreter = [{
                'name': 'hbase',
                'type': 'hbase',
                'interface': 'hbase',
                'options': {},
                'is_sql': False
            }]
        elif snippet['type'] == 'kafka':
            interpreter = [{
                'name': 'kafka',
                'type': 'kafka',
                'interface': 'kafka',
                'options': {},
                'is_sql': False
            }]
        elif snippet['type'] == 'solr':
            interpreter = [{
                'name': 'solr',
                'type': 'solr',
                'interface': 'solr',
                'options': {},
                'is_sql': False
            }]
        elif snippet['type'] == 'custom':
            interpreter = [{
                'name': snippet['name'],
                'type': snippet['type'],
                'interface': snippet['interface'],
                'options': snippet.get('options', {}),
                'is_sql': False
            }]
        else:
            raise PopupException(
                _('Snippet type %(type)s is not configured.') % snippet)

    interpreter = interpreter[0]
    interface = interpreter['interface']

    if CONNECTORS.IS_ENABLED.get():
        cluster = {
            'connector': snippet['type'],
            'id': interpreter['type'],
        }
        snippet['type'] = snippet['type'].split('-', 2)[0]
        cluster.update(interpreter['options'])
    # Multi cluster
    elif has_multi_cluster():
        cluster = json.loads(request.POST.get(
            'cluster',
            '""'))  # Via Catalog autocomplete API or Notebook create sessions
        if cluster == '""' or cluster == 'undefined':
            cluster = None
        if not cluster and snippet.get('compute'):  # Via notebook.ko.js
            cluster = snippet['compute']
    else:
        cluster = None

    cluster_name = cluster.get('id') if cluster else None

    if cluster and 'altus:dataware:k8s' in cluster_name:
        interface = 'hiveserver2'
    elif cluster and 'crn:altus:dataware:' in cluster_name:
        interface = 'altus-adb'
    elif cluster and 'crn:altus:dataeng:' in cluster_name:
        interface = 'dataeng'

    LOG.info('Selected cluster %s %s interface %s' %
             (cluster_name, cluster, interface))
    snippet['interface'] = interface

    if interface.startswith('hiveserver2') or interface == 'hms':
        from notebook.connectors.hiveserver2 import HS2Api
        return HS2Api(user=request.user,
                      request=request,
                      cluster=cluster,
                      interface=interface)
    elif interface == 'oozie':
        return OozieApi(user=request.user, request=request)
    elif interface == 'livy':
        from notebook.connectors.spark_shell import SparkApi
        return SparkApi(request.user)
    elif interface == 'livy-batch':
        from notebook.connectors.spark_batch import SparkBatchApi
        return SparkBatchApi(request.user)
    elif interface == 'text' or interface == 'markdown':
        from notebook.connectors.text import TextApi
        return TextApi(request.user)
    elif interface == 'rdbms':
        from notebook.connectors.rdbms import RdbmsApi
        return RdbmsApi(request.user,
                        interpreter=snippet['type'],
                        query_server=snippet.get('query_server'))
    elif interface == 'altus-adb':
        from notebook.connectors.altus_adb import AltusAdbApi
        return AltusAdbApi(user=request.user,
                           cluster_name=cluster_name,
                           request=request)
    elif interface == 'dataeng':
        from notebook.connectors.dataeng import DataEngApi
        return DataEngApi(user=request.user,
                          request=request,
                          cluster_name=cluster_name)
    elif interface == 'jdbc':
        if interpreter['options'] and interpreter['options'].get(
                'url', '').find('teradata') >= 0:
            from notebook.connectors.jdbc_teradata import JdbcApiTeradata
            return JdbcApiTeradata(request.user, interpreter=interpreter)
        if interpreter['options'] and interpreter['options'].get(
                'url', '').find('awsathena') >= 0:
            from notebook.connectors.jdbc_athena import JdbcApiAthena
            return JdbcApiAthena(request.user, interpreter=interpreter)
        elif interpreter['options'] and interpreter['options'].get(
                'url', '').find('presto') >= 0:
            from notebook.connectors.jdbc_presto import JdbcApiPresto
            return JdbcApiPresto(request.user, interpreter=interpreter)
        elif interpreter['options'] and interpreter['options'].get(
                'url', '').find('clickhouse') >= 0:
            from notebook.connectors.jdbc_clickhouse import JdbcApiClickhouse
            return JdbcApiClickhouse(request.user, interpreter=interpreter)
        else:
            from notebook.connectors.jdbc import JdbcApi
            return JdbcApi(request.user, interpreter=interpreter)
    elif interface == 'teradata':
        from notebook.connectors.jdbc import JdbcApiTeradata
        return JdbcApiTeradata(request.user, interpreter=interpreter)
    elif interface == 'athena':
        from notebook.connectors.jdbc import JdbcApiAthena
        return JdbcApiAthena(request.user, interpreter=interpreter)
    elif interface == 'presto':
        from notebook.connectors.jdbc_presto import JdbcApiPresto
        return JdbcApiPresto(request.user, interpreter=interpreter)
    elif interface == 'sqlalchemy':
        from notebook.connectors.sqlalchemyapi import SqlAlchemyApi
        return SqlAlchemyApi(request.user, interpreter=interpreter)
    elif interface == 'solr':
        from notebook.connectors.solr import SolrApi
        return SolrApi(request.user, interpreter=interpreter)
    elif interface == 'hbase':
        from notebook.connectors.hbase import HBaseApi
        return HBaseApi(request.user)
    elif interface == 'kafka':
        from notebook.connectors.kafka import KafkaApi
        return KafkaApi(request.user)
    elif interface == 'pig':
        return OozieApi(user=request.user,
                        request=request)  # Backward compatibility until Hue 4
    else:
        raise PopupException(
            _('Notebook connector interface not recognized: %s') % interface)
コード例 #15
0
def make_notebook(name='Browse',
                  description='',
                  editor_type='hive',
                  statement='',
                  status='ready',
                  files=None,
                  functions=None,
                  settings=None,
                  is_saved=False,
                  database='default',
                  snippet_properties=None,
                  batch_submit=False):
    from notebook.connectors.hiveserver2 import HS2Api

    editor = Notebook()
    if snippet_properties is None:
        snippet_properties = {}

    if editor_type == 'hive':
        sessions_properties = HS2Api.get_properties(editor_type)
        if files is not None:
            _update_property_value(sessions_properties, 'files', files)

        if functions is not None:
            _update_property_value(sessions_properties, 'functions', functions)

        if settings is not None:
            _update_property_value(sessions_properties, 'settings', settings)
    elif editor_type == 'impala':
        sessions_properties = HS2Api.get_properties(editor_type)
        if settings is not None:
            _update_property_value(sessions_properties, 'files', files)
    elif editor_type == 'java':
        sessions_properties = []  # Java options
    else:
        sessions_properties = []

    data = {
        'name':
        name,
        'uuid':
        str(uuid.uuid4()),
        'description':
        description,
        'sessions': [{
            'type': editor_type,
            'properties': sessions_properties,
            'id': None
        }],
        'selectedSnippet':
        editor_type,
        'type':
        'query-%s' % editor_type,
        'showHistory':
        True,
        'isSaved':
        is_saved,
        'snippets': [{
            'status': status,
            'id': str(uuid.uuid4()),
            'statement_raw': statement,
            'statement': statement,
            'type': editor_type,
            'wasBatchExecuted': batch_submit,
            'properties': {
                'files': [] if files is None else files,
                'functions': [] if functions is None else functions,
                'settings': [] if settings is None else settings
            },
            'name': name,
            'database': database,
            'result': {},
            'variables': []
        }]
    }

    if snippet_properties:
        data['snippets'][0]['properties'].update(snippet_properties)

    editor.data = json.dumps(data)

    return editor
コード例 #16
0
ファイル: models.py プロジェクト: mazensibai/hue
def make_notebook(name='Browse',
                  description='',
                  editor_type='hive',
                  statement='',
                  status='ready',
                  files=None,
                  functions=None,
                  settings=None,
                  is_saved=False,
                  database='default'):

    from notebook.connectors.hiveserver2 import HS2Api

    editor = Notebook()

    properties = HS2Api.get_properties(editor_type)

    if editor_type == 'hive':
        if files is not None:
            _update_property_value(properties, 'files', files)

        if functions is not None:
            _update_property_value(properties, 'functions', functions)

        if settings is not None:
            _update_property_value(properties, 'settings', settings)
    elif editor_type == 'impala':
        if settings is not None:
            _update_property_value(properties, 'files', files)

    editor.data = json.dumps({
        'name':
        name,
        'description':
        description,
        'sessions': [{
            'type': editor_type,
            'properties': properties,
            'id': None
        }],
        'selectedSnippet':
        editor_type,
        'type':
        'query-%s' % editor_type,
        'showHistory':
        True,
        'isSaved':
        is_saved,
        'snippets': [{
            'status': status,
            'id': str(uuid.uuid4()),
            'statement_raw': statement,
            'statement': statement,
            'type': editor_type,
            'properties': {
                'files': [] if files is None else files,
                'functions': [] if functions is None else functions,
                'settings': [] if settings is None else settings
            },
            'name': name,
            'database': database,
            'result': {}
        }]
    })

    return editor
コード例 #17
0
def make_notebook2(name='Browse',
                   description='',
                   is_saved=False,
                   snippets=None):

    from notebook.connectors.hiveserver2 import HS2Api

    editor = Notebook()

    _snippets = []

    for snippet in snippets:
        default_properties = {'files': [], 'functions': [], 'settings': []}

        default_properties.update(snippet['properties'])
        snippet['properties'] = default_properties

        if snippet['type'] == 'hive':
            pass
        elif snippet['type'] == 'impala':
            pass
        elif snippet['type'] == 'java':
            pass

        _snippets.append(snippet)

    data = {
        'name':
        name,
        'uuid':
        str(uuid.uuid4()),
        'type':
        'notebook',
        'description':
        description,
        'sessions': [
            {
                'type': _snippet['type'],
                'properties': HS2Api.get_properties(snippet['type']),
                'id': None
            } for _snippet in _snippets  # Non unique types currently
        ],
        'selectedSnippet':
        _snippets[0]['type'],
        'type':
        'notebook',
        'showHistory':
        False,
        'isSaved':
        is_saved,
        'snippets': [{
            'status': _snippet.get('status', 'ready'),
            'id': str(uuid.uuid4()),
            'statement_raw': _snippet.get('statement', ''),
            'statement': _snippet.get('statement', ''),
            'type': _snippet.get('type'),
            'properties': _snippet['properties'],
            'name': name,
            'database': _snippet.get('database'),
            'result': {},
            'variables': []
        } for _snippet in _snippets]
    }

    editor.data = json.dumps(data)

    return editor