Esempio n. 1
0
File: base.py Progetto: qccash/hue
def get_api(user, snippet, fs, jt):
  from notebook.connectors.hiveserver2 import HS2Api
  from notebook.connectors.jdbc import JdbcApi
  from notebook.connectors.mysql import MySqlApi
  from notebook.connectors.pig_batch import PigApi
  from notebook.connectors.spark_shell import SparkApi
  from notebook.connectors.spark_batch import SparkBatchApi
  from notebook.connectors.text import TextApi


  interpreter = [interpreter for interpreter in get_interpreters() if interpreter['type'] == snippet['type']]
  if not interpreter:
    raise PopupException(_('Snippet type %(type)s is not configured in hue.ini') % snippet)
  interface = interpreter[0]['interface']
  options = interpreter[0]['options']

  if interface == 'hiveserver2':
    return HS2Api(user)
  elif interface == 'livy':
    return SparkApi(user)
  elif interface == 'livy-batch':
    return SparkBatchApi(user)
  elif interface == 'text':
    return TextApi(user)
  elif interface == 'mysql':
    return MySqlApi(user)
  elif interface == 'jdbc':
    return JdbcApi(user, options=options)
  elif interface == 'pig':
    return PigApi(user, fs=fs, jt=jt)
  else:
    raise PopupException(_('Notebook connector interface not recognized: %s') % interface)
Esempio n. 2
0
def browse(request, database, table):
    editor_type = request.GET.get('type', 'hive')

    snippet = {'type': editor_type}
    sql_select = get_api(request, snippet).get_select_star_query(
        snippet, database, table)

    editor = make_notebook(name='Browse',
                           editor_type=editor_type,
                           statement=sql_select,
                           status='ready-execute')

    return render(
        'editor.mako', request, {
            'notebooks_json':
            json.dumps([editor.get_data()]),
            'options_json':
            json.dumps({
                'languages': get_interpreters(request.user),
                'mode': 'editor',
                'editor_type': editor_type
            }),
            'editor_type':
            editor_type,
        })
Esempio n. 3
0
def notebook(request):
    notebook_id = request.GET.get('notebook')

    is_yarn_mode = False
    try:
        from spark.conf import LIVY_SERVER_SESSION_KIND
        is_yarn_mode = LIVY_SERVER_SESSION_KIND.get()
    except:
        LOG.exception('Spark is not enabled')

    return render(
        'notebook.mako', request, {
            'editor_id':
            notebook_id or None,
            'notebooks_json':
            '{}',
            'options_json':
            json.dumps({
                'languages': get_interpreters(request.user),
                'session_properties': SparkApi.get_properties(),
                'is_optimizer_enabled': has_optimizer(),
                'is_navigator_enabled': has_navigator(),
                'editor_type': 'notebook'
            }),
            'is_yarn_mode':
            is_yarn_mode,
        })
Esempio n. 4
0
File: views.py Progetto: shobull/hue
def notebook(request):
  notebook_id = request.GET.get('notebook')

  if notebook_id:
    notebook = Notebook(document=Document2.objects.get(id=notebook_id))
  else:
    notebook = Notebook()

  autocomplete_base_url = ''
  try:
    autocomplete_base_url = reverse('beeswax:api_autocomplete_databases', kwargs={})
  except:
    LOG.exception('failed to get autocomplete base url')

  is_yarn_mode = False
  try:
    from spark.conf import LIVY_SERVER_SESSION_KIND
    is_yarn_mode = LIVY_SERVER_SESSION_KIND.get()
  except:
    LOG.exception('Spark is not enabled')

  return render('notebook.mako', request, {
      'notebooks_json': json.dumps([notebook.get_data()]),
      'options_json': json.dumps({
          'languages': get_interpreters(request.user),
          'session_properties': SparkApi.PROPERTIES,
      }),
      'autocomplete_base_url': autocomplete_base_url,
      'is_yarn_mode': is_yarn_mode
  })
Esempio n. 5
0
File: base.py Progetto: wdai-aa/hue
def get_api(request, snippet):
  from notebook.connectors.hiveserver2 import HS2Api
  from notebook.connectors.jdbc import JdbcApi
  from notebook.connectors.rdbms import RdbmsApi
  from notebook.connectors.pig_batch import PigApi
  from notebook.connectors.spark_shell import SparkApi
  from notebook.connectors.spark_batch import SparkBatchApi
  from notebook.connectors.text import TextApi

  interpreter = [interpreter for interpreter in get_interpreters(request.user) if interpreter['type'] == snippet['type']]
  if not interpreter:
    raise PopupException(_('Snippet type %(type)s is not configured in hue.ini') % snippet)
  interpreter = interpreter[0]
  interface = interpreter['interface']

  if interface == 'hiveserver2':
    return HS2Api(user=request.user)
  elif interface == 'livy':
    return SparkApi(request.user)
  elif interface == 'livy-batch':
    return SparkBatchApi(request.user)
  elif interface == 'text' or interface == 'markdown':
    return TextApi(request.user)
  elif interface == 'rdbms':
    return RdbmsApi(request.user, interpreter=snippet['type'])
  elif interface == 'jdbc':
    return JdbcApi(request.user, interpreter=interpreter)
  elif interface == 'pig':
    return PigApi(user=request.user, request=request)
  else:
    raise PopupException(_('Notebook connector interface not recognized: %s') % interface)
Esempio n. 6
0
def editor(request, is_mobile=False):
    editor_id = request.GET.get('editor')
    editor_type = request.GET.get('type', 'hive')

    if editor_id:  # Open existing saved editor document
        document = Document2.objects.get(id=editor_id)
        editor_type = document.type.rsplit('-', 1)[-1]

    template = 'editor_m.mako' if is_mobile else 'editor.mako'

    return render(
        template, request, {
            'editor_id':
            editor_id or None,
            'notebooks_json':
            '{}',
            'editor_type':
            editor_type,
            'options_json':
            json.dumps({
                'languages': get_interpreters(request.user),
                'mode': 'editor',
                'is_optimizer_enabled': has_optimizer(),
                'is_navigator_enabled': has_navigator(request.user),
                'editor_type': editor_type,
                'mobile': is_mobile
            })
        })
Esempio n. 7
0
def notebook(request):
  notebook_id = request.GET.get('notebook')

  if notebook_id:
    notebook = Notebook(document=Document2.objects.get(id=notebook_id))
  else:
    notebook = Notebook()

  autocomplete_base_url = ''
  try:
    autocomplete_base_url = reverse('beeswax:api_autocomplete_databases', kwargs={})
  except:
    LOG.exception('failed to get autocomplete base url')

  return render('notebook.mako', request, {
      'notebooks_json': json.dumps([notebook.get_data()]),
      'options_json': json.dumps({
          'languages': get_interpreters(),
          'snippet_placeholders' : {
              'sql': _('Example: 1 + 1, or press CTRL + space'),
              'spark': _('Example: 1 + 1, or press CTRL + space'),
              'pyspark': _('Example: 1 + 1, or press CTRL + space'),
              'impala': _('Example: SELECT * FROM tablename, or press CTRL + space'),
              'hive': _('Example: SELECT * FROM tablename, or press CTRL + space'),
              'r': _('Example: 1 + 1, or press CTRL + space')
          },
          'session_properties': SparkApi.PROPERTIES
      }),
      'autocomplete_base_url': autocomplete_base_url,
      'is_yarn_mode': LIVY_SERVER_SESSION_KIND.get()
  })
Esempio n. 8
0
def notebook(request):
    notebook_id = request.GET.get('notebook')

    if notebook_id:
        notebook = Notebook(document=Document2.objects.get(id=notebook_id))
    else:
        notebook = Notebook()

    is_yarn_mode = False
    try:
        from spark.conf import LIVY_SERVER_SESSION_KIND
        is_yarn_mode = LIVY_SERVER_SESSION_KIND.get()
    except:
        LOG.exception('Spark is not enabled')

    return render(
        'notebook.mako', request, {
            'notebooks_json':
            json.dumps([notebook.get_data()]),
            'options_json':
            json.dumps({
                'languages': get_interpreters(request.user),
                'session_properties': SparkApi.PROPERTIES,
            }),
            'is_yarn_mode':
            is_yarn_mode
        })
Esempio n. 9
0
def get_api(user, snippet, fs, jt):
  from notebook.connectors.hiveserver2 import HS2Api
  from notebook.connectors.jdbc import JdbcApi
  from notebook.connectors.mysql import MySqlApi
  from notebook.connectors.pig_batch import PigApi
  from notebook.connectors.spark_shell import SparkApi
  from notebook.connectors.spark_batch import SparkBatchApi
  from notebook.connectors.text import TextApi


  interpreter = [interpreter for interpreter in get_interpreters() if interpreter['type'] == snippet['type']]
  if not interpreter:
    raise PopupException(_('Snippet type %(type)s is not configured in hue.ini') % snippet)
  interpreter = interpreter[0]
  interface = interpreter['interface']

  if interface == 'hiveserver2':
    return HS2Api(user)
  elif interface == 'livy':
    return SparkApi(user)
  elif interface == 'livy-batch':
    return SparkBatchApi(user)
  elif interface == 'text':
    return TextApi(user)
  elif interface == 'mysql':
    return MySqlApi(user)
  elif interface == 'jdbc':
    return JdbcApi(user, interpreter=interpreter)
  elif interface == 'pig':
    return PigApi(user, fs=fs, jt=jt)
  else:
    raise PopupException(_('Notebook connector interface not recognized: %s') % interface)
Esempio n. 10
0
def notebook(request):
    notebook_id = request.GET.get("notebook")

    if notebook_id:
        notebook = Notebook(document=Document2.objects.get(id=notebook_id))
    else:
        notebook = Notebook()

    autocomplete_base_url = ""
    try:
        autocomplete_base_url = reverse("beeswax:api_autocomplete_databases", kwargs={})
    except:
        LOG.exception("failed to get autocomplete base url")

    is_yarn_mode = False
    try:
        from spark.conf import LIVY_SERVER_SESSION_KIND

        is_yarn_mode = LIVY_SERVER_SESSION_KIND.get()
    except:
        LOG.exception("Spark is not enabled")

    return render(
        "notebook.mako",
        request,
        {
            "notebooks_json": json.dumps([notebook.get_data()]),
            "options_json": json.dumps(
                {"languages": get_interpreters(request.user), "session_properties": SparkApi.PROPERTIES}
            ),
            "autocomplete_base_url": autocomplete_base_url,
            "is_yarn_mode": is_yarn_mode,
        },
    )
Esempio n. 11
0
File: views.py Progetto: mastanr/hue
def responsive(request):
    apps = appmanager.get_apps_dict(request.user)

    return render(
        'responsive.mako', request, {
            'apps': apps,
            'tours_and_tutorials': Settings.get_settings().tours_and_tutorials,
            'interpreters': get_interpreters(request.user),
            'is_s3_enabled': is_s3_enabled() and has_s3_access(request.user)
        })
Esempio n. 12
0
def get_api(request, snippet):
    from notebook.connectors.hiveserver2 import HS2Api
    from notebook.connectors.jdbc import JdbcApi
    from notebook.connectors.rdbms import RdbmsApi
    from notebook.connectors.oozie_batch import OozieApi
    from notebook.connectors.solr import SolrApi
    from notebook.connectors.spark_shell import SparkApi
    from notebook.connectors.spark_batch import SparkBatchApi
    from notebook.connectors.text import TextApi

    if snippet.get('wasBatchExecuted'):
        return OozieApi(user=request.user, request=request)

    interpreter = [
        interpreter for interpreter in get_interpreters(request.user)
        if interpreter['type'] == snippet['type']
    ]
    if not interpreter:
        raise PopupException(
            _('Snippet type %(type)s is not configured in hue.ini') % snippet)
    interpreter = interpreter[0]
    interface = interpreter['interface']

    if interface == 'hiveserver2':
        return HS2Api(user=request.user, request=request)
    elif interface == 'oozie':
        return OozieApi(user=request.user, request=request)
    elif interface == 'livy':
        return SparkApi(request.user)
    elif interface == 'livy-batch':
        return SparkBatchApi(request.user)
    elif interface == 'text' or interface == 'markdown':
        return TextApi(request.user)
    elif interface == 'rdbms':
        return RdbmsApi(request.user, interpreter=snippet['type'])
    elif interface == 'jdbc':
        return JdbcApi(request.user, interpreter=interpreter)
    elif interface == 'solr':
        return SolrApi(request.user, interpreter=interpreter)
    elif interface == 'pig':
        return OozieApi(user=request.user,
                        request=request)  # Backward compatibility until Hue 4
    else:
        raise PopupException(
            _('Notebook connector interface not recognized: %s') % interface)
Esempio n. 13
0
def notebook(request):
  notebook_id = request.GET.get('notebook')

  is_yarn_mode = False
  try:
    from spark.conf import LIVY_SERVER_SESSION_KIND
    is_yarn_mode = LIVY_SERVER_SESSION_KIND.get()
  except:
    LOG.exception('Spark is not enabled')

  return render('notebook.mako', request, {
      'editor_id': notebook_id or None,
      'notebooks_json': '{}',
      'options_json': json.dumps({
          'languages': get_interpreters(request.user),
          'session_properties': SparkApi.get_properties(),
          'is_optimizer_enabled': has_optimizer(),
      }),
      'is_yarn_mode': is_yarn_mode,
  })
Esempio n. 14
0
def notebook(request):
  notebook_id = request.GET.get('notebook')

  if notebook_id:
    notebook = Notebook(document=Document2.objects.get(id=notebook_id))
  else:
    notebook = Notebook()

  is_yarn_mode = False
  try:
    from spark.conf import LIVY_SERVER_SESSION_KIND
    is_yarn_mode = LIVY_SERVER_SESSION_KIND.get()
  except:
    LOG.exception('Spark is not enabled')

  return render('notebook.mako', request, {
      'notebooks_json': json.dumps([notebook.get_data()]),
      'options_json': json.dumps({
          'languages': get_interpreters(request.user),
          'session_properties': SparkApi.PROPERTIES,
      }),
      'is_yarn_mode': is_yarn_mode
  })
Esempio n. 15
0
def notebook(request):
    notebook_id = request.GET.get('notebook')

    if notebook_id:
        notebook = Notebook(document=Document2.objects.get(id=notebook_id))
    else:
        notebook = Notebook()

    autocomplete_base_url = ''
    try:
        autocomplete_base_url = reverse('beeswax:api_autocomplete_databases',
                                        kwargs={})
    except:
        LOG.exception('failed to get autocomplete base url')

    is_yarn_mode = False
    try:
        from spark.conf import LIVY_SERVER_SESSION_KIND
        is_yarn_mode = LIVY_SERVER_SESSION_KIND.get()
    except:
        LOG.exception('Spark is not enabled')

    return render(
        'notebook.mako', request, {
            'notebooks_json':
            json.dumps([notebook.get_data()]),
            'options_json':
            json.dumps({
                'languages': get_interpreters(request.user),
                'session_properties': SparkApi.PROPERTIES,
            }),
            'autocomplete_base_url':
            autocomplete_base_url,
            'is_yarn_mode':
            is_yarn_mode
        })
Esempio n. 16
0
# limitations under the License.

# Start DBProxy server if we have some JDBC snippets

from notebook.conf import get_interpreters, ENABLE_DBPROXY_SERVER


def _start_livy_server():
  import atexit
  import subprocess
  import sys
  import time

  p = subprocess.Popen([sys.executable, sys.argv[0], 'dbproxy_server'])

  def cleanup():
    p.terminate()
    for _ in xrange(5):
      if p.poll() == None:
        time.sleep(1)
      else:
        break
    else:
      p.kill()

  atexit.register(cleanup)


if ENABLE_DBPROXY_SERVER.get() and [interpreter for interpreter in get_interpreters() if interpreter['interface'] == 'jdbc']:
  _start_livy_server()