예제 #1
0
def _importer(request, prefill):
    source_type = request.GET.get('sourceType') or get_cluster_config(
        request.user)['default_sql_interpreter']

    return render(
        'importer.mako', request, {
            'is_embeddable':
            request.GET.get('is_embeddable', False),
            'fields_json':
            json.dumps({
                'solr': [field.name for field in FIELD_TYPES],
                'hive': HIVE_TYPES,
                'hivePrimitive': HIVE_PRIMITIVE_TYPES
            }),
            'operators_json':
            json.dumps([operator.to_dict() for operator in OPERATORS]),
            'file_types_json':
            json.dumps([
                format_.format_info()
                for format_ in get_file_indexable_format_types()
            ]),
            'default_field_type':
            json.dumps(Field().to_dict()),
            'prefill':
            json.dumps(prefill),
            'source_type':
            source_type
        })
예제 #2
0
파일: views.py 프로젝트: bopopescu/Hue-4
def _get_db(user, source_type=None):
    if source_type is None:
        source_type = get_cluster_config(user)['default_sql_interpreter']

    query_server = get_query_server_config(
        name=source_type if source_type != 'hive' else 'beeswax')
    return dbms.get(user, query_server)
예제 #3
0
파일: api2.py 프로젝트: dulems/hue-1
def get_config(request):
    if request.POST.get(USER_PREFERENCE_CLUSTER):
        set_user_preferences(request.user, USER_PREFERENCE_CLUSTER,
                             request.POST.get(USER_PREFERENCE_CLUSTER))

    config = get_cluster_config(request.user)
    config['status'] = 0

    return JsonResponse(config)
예제 #4
0
파일: api2.py 프로젝트: e11it/hue-1
def get_config(request):
  config = get_cluster_config(request.user)
  config['clusters'] = list(get_clusters(request.user).values())
  config['documents'] = {
    'types': list(Document2.objects.documents(user=request.user).order_by().values_list('type', flat=True).distinct())
  }
  config['status'] = 0

  return JsonResponse(config)
예제 #5
0
파일: api2.py 프로젝트: xuejunshuang/hue
def get_context_namespaces(request, interface):
  '''
  Namespaces are node cluster contexts (e.g. Hive + Ranger) that can be queried by computes.
  '''
  response = {}
  namespaces = []

  clusters = list(get_clusters(request.user).values())

  # Currently broken if not sent
  namespaces.extend([{
      'id': cluster['id'],
      'name': cluster['name'],
      'status': 'CREATED',
      'computes': [cluster]
    } for cluster in clusters if cluster.get('type') == 'direct'
  ])

  if interface == 'hive' or interface == 'impala' or interface == 'report':
    if get_cluster_config(request.user)['has_computes']:
      # Note: attaching computes to namespaces might be done via the frontend in the future
      if interface == 'impala':
        if IS_K8S_ONLY.get():
          adb_clusters = DataWarehouse2Api(request.user).list_clusters()['clusters']
        else:
          adb_clusters = AnalyticDbApi(request.user).list_clusters()['clusters']
        for _cluster in adb_clusters: # Add "fake" namespace if needed
          if not _cluster.get('namespaceCrn'):
            _cluster['namespaceCrn'] = _cluster['crn']
            _cluster['id'] = _cluster['crn']
            _cluster['namespaceName'] = _cluster['clusterName']
            _cluster['name'] = _cluster['clusterName']
            _cluster['compute_end_point'] = '%(publicHost)s' % _cluster['coordinatorEndpoint'] if IS_K8S_ONLY.get() else '',
      else:
        adb_clusters = []

      if IS_K8S_ONLY.get():
        sdx_namespaces = []
      else:
        sdx_namespaces = SdxApi(request.user).list_namespaces()

      # Adding "fake" namespace for cluster without one
      sdx_namespaces.extend([_cluster for _cluster in adb_clusters if not _cluster.get('namespaceCrn') or (IS_K8S_ONLY.get() and 'TERMINAT' not in _cluster['status'])])

      namespaces.extend([{
          'id': namespace.get('crn', 'None'),
          'name': namespace.get('namespaceName'),
          'status': namespace.get('status'),
          'computes': [_cluster for _cluster in adb_clusters if _cluster.get('namespaceCrn') == namespace.get('crn')]
        } for namespace in sdx_namespaces if namespace.get('status') == 'CREATED' or IS_K8S_ONLY.get()
      ])

  response[interface] = namespaces
  response['status'] = 0

  return JsonResponse(response)
예제 #6
0
def _get_db(user, source_type=None):
  if source_type is None:
    cluster_config = get_cluster_config(user)
    if FORCE_HS2_METADATA.get() and cluster_config['app_config'].get('editor') and 'hive' in cluster_config['app_config'].get('editor')['interpreter_names']:
      source_type = 'hive'
    else:
      source_type = cluster_config['default_sql_interpreter']

  query_server = get_query_server_config(name=source_type if source_type != 'hive' else 'beeswax')
  return dbms.get(user, query_server)
예제 #7
0
파일: views.py 프로젝트: ymping/hue
def _make_select_statement_gist(host_domain, is_http_secure, user, channel_id,
                                statement):
    default_dialect = get_cluster_config(
        rewrite_user(user))['main_button_action']['dialect']
    gist_response = _gist_create(host_domain, is_http_secure, user, statement,
                                 default_dialect)

    msg = 'Here is the gist link\n {gist_link}'.format(
        gist_link=gist_response['link'])
    _send_message(channel_id, message=msg)
예제 #8
0
파일: views.py 프로젝트: mapr/hue
def _get_db(user, source_type=None, cluster=None):
  if source_type is None:
    cluster_config = get_cluster_config(user)
    if FORCE_HS2_METADATA.get() and cluster_config['app_config'].get('editor') and 'hive' in cluster_config['app_config'].get('editor')['interpreter_names']:
      source_type = 'hive'
    else:
      source_type = cluster_config['default_sql_interpreter']

  name = source_type if source_type != 'hive' else 'beeswax'

  query_server = get_query_server_config(name=name, cluster=cluster)
  return dbms.get(user, query_server)
예제 #9
0
def detect_select_statement(host_domain, is_http_secure, channel_id, user_id,
                            statement):
    slack_user = check_slack_user_permission(host_domain, user_id)
    user = get_user(channel_id, slack_user)

    default_dialect = get_cluster_config(
        rewrite_user(user))['default_sql_interpreter']

    gist_response = _gist_create(host_domain, is_http_secure, user, statement,
                                 default_dialect)

    bot_message = (
        'Hi <@{user}>\n'
        'Looks like you are copy/pasting SQL, instead now you can send Editor links which unfurls in a rich preview!\n'
        'Here is the gist link\n {gist_link}').format(
            user=user_id, gist_link=gist_response['link'])
    _send_message(channel_id, bot_message)
예제 #10
0
파일: api2.py 프로젝트: xuejunshuang/hue
def get_context_computes(request, interface):
  '''
  Some clusters like Snowball can have multiple computes for a certain languages (Hive, Impala...).
  '''
  response = {}
  computes = []

  clusters = list(get_clusters(request.user).values())

  if get_cluster_config(request.user)['has_computes']: # TODO: only based on interface selected?
    interpreter = get_interpreter(connector_type=interface, user=request.user)
    if interpreter['dialect'] == 'impala':
      # dw_clusters = DataWarehouse2Api(request.user).list_clusters()['clusters']
      dw_clusters = [
        {'crn': 'c1', 'clusterName': 'c1', 'status': 'created', 'options': {'server_host': 'c1.gethue.com', 'server_port': 10000}},
        {'crn': 'c2', 'clusterName': 'c2', 'status': 'created', 'options': {'server_host': 'c2.gethue.com', 'server_port': 10000}},
      ]
      computes.extend([{
          'id': cluster.get('crn'),
          'name': cluster.get('clusterName'),
          'status': cluster.get('status'),
          'namespace': cluster.get('namespaceCrn', cluster.get('crn')),
          'type': interpreter['dialect'],
          'options': cluster['options'],
        } for cluster in dw_clusters]
      )
  else:
    # Currently broken if not sent
    computes.extend([{
        'id': cluster['id'],
        'name': cluster['name'],
        'namespace': cluster['id'],
        'interface': interface,
        'type': cluster['type'],
        'options': {}
      } for cluster in clusters if cluster.get('type') == 'direct'
    ])

  response[interface] = computes
  response['status'] = 0

  return JsonResponse(response)
예제 #11
0
def get_api(request, snippet):
    from notebook.connectors.oozie_batch import OozieApi

    if snippet.get('wasBatchExecuted') and not TASK_SERVER.ENABLED.get():
        return OozieApi(user=request.user, request=request)

    if snippet.get('type') == 'report':
        snippet['type'] = 'impala'

    patch_snippet_for_connector(snippet)

    connector_name = snippet['type']

    if has_connectors() and snippet.get('type') == 'hello' and is_admin(
            request.user):
        interpreter = snippet.get('interpreter')
    else:
        interpreter = get_interpreter(connector_type=connector_name,
                                      user=request.user)

    interface = interpreter['interface']

    if get_cluster_config(request.user)['has_computes']:
        compute = json.loads(request.POST.get(
            'cluster',
            '""'))  # Via Catalog autocomplete API or Notebook create sessions.
        if compute == '""' or compute == 'undefined':
            compute = None
        if not compute and snippet.get('compute'):  # Via notebook.ko.js
            interpreter['compute'] = snippet['compute']

    LOG.debug('Selected interpreter %s interface=%s compute=%s' %
              (interpreter['type'], interface, interpreter.get('compute')
               and interpreter['compute']['name']))

    if interface == 'hiveserver2' or interface == 'hms':
        from notebook.connectors.hiveserver2 import HS2Api
        return HS2Api(user=request.user,
                      request=request,
                      interpreter=interpreter)
    elif interface == 'oozie':
        return OozieApi(user=request.user, request=request)
    elif interface == 'livy':
        from notebook.connectors.spark_shell import SparkApi
        return SparkApi(request.user, interpreter=interpreter)
    elif interface == 'livy-batch':
        from notebook.connectors.spark_batch import SparkBatchApi
        return SparkBatchApi(request.user, interpreter=interpreter)
    elif interface == 'text' or interface == 'markdown':
        from notebook.connectors.text import TextApi
        return TextApi(request.user)
    elif interface == 'rdbms':
        from notebook.connectors.rdbms import RdbmsApi
        return RdbmsApi(request.user,
                        interpreter=snippet['type'],
                        query_server=snippet.get('query_server'))
    elif interface == 'jdbc':
        if interpreter['options'] and interpreter['options'].get(
                'url', '').find('teradata') >= 0:
            from notebook.connectors.jdbc_teradata import JdbcApiTeradata
            return JdbcApiTeradata(request.user, interpreter=interpreter)
        if interpreter['options'] and interpreter['options'].get(
                'url', '').find('awsathena') >= 0:
            from notebook.connectors.jdbc_athena import JdbcApiAthena
            return JdbcApiAthena(request.user, interpreter=interpreter)
        elif interpreter['options'] and interpreter['options'].get(
                'url', '').find('presto') >= 0:
            from notebook.connectors.jdbc_presto import JdbcApiPresto
            return JdbcApiPresto(request.user, interpreter=interpreter)
        elif interpreter['options'] and interpreter['options'].get(
                'url', '').find('clickhouse') >= 0:
            from notebook.connectors.jdbc_clickhouse import JdbcApiClickhouse
            return JdbcApiClickhouse(request.user, interpreter=interpreter)
        elif interpreter['options'] and interpreter['options'].get(
                'url', '').find('vertica') >= 0:
            from notebook.connectors.jdbc_vertica import JdbcApiVertica
            return JdbcApiVertica(request.user, interpreter=interpreter)
        else:
            from notebook.connectors.jdbc import JdbcApi
            return JdbcApi(request.user, interpreter=interpreter)
    elif interface == 'teradata':
        from notebook.connectors.jdbc_teradata import JdbcApiTeradata
        return JdbcApiTeradata(request.user, interpreter=interpreter)
    elif interface == 'athena':
        from notebook.connectors.jdbc_athena import JdbcApiAthena
        return JdbcApiAthena(request.user, interpreter=interpreter)
    elif interface == 'presto':
        from notebook.connectors.jdbc_presto import JdbcApiPresto
        return JdbcApiPresto(request.user, interpreter=interpreter)
    elif interface == 'sqlalchemy':
        from notebook.connectors.sql_alchemy import SqlAlchemyApi
        return SqlAlchemyApi(request.user, interpreter=interpreter)
    elif interface == 'solr':
        from notebook.connectors.solr import SolrApi
        return SolrApi(request.user, interpreter=interpreter)
    elif interface == 'hbase':
        from notebook.connectors.hbase import HBaseApi
        return HBaseApi(request.user)
    elif interface == 'ksql':
        from notebook.connectors.ksql import KSqlApi
        return KSqlApi(request.user, interpreter=interpreter)
    elif interface == 'flink':
        from notebook.connectors.flink_sql import FlinkSqlApi
        return FlinkSqlApi(request.user, interpreter=interpreter)
    elif interface == 'kafka':
        from notebook.connectors.kafka import KafkaApi
        return KafkaApi(request.user)
    elif interface == 'pig':
        return OozieApi(user=request.user,
                        request=request)  # Backward compatibility until Hue 4
    else:
        raise PopupException(
            _('Notebook connector interface not recognized: %s') % interface)
예제 #12
0
파일: api2.py 프로젝트: yuzhenguo27833/hue
def get_config(request):
    config = get_cluster_config(request.user)
    config['status'] = 0

    return JsonResponse(config)
예제 #13
0
파일: api2.py 프로젝트: sandredd/hue-1
def get_config(request):
    config = get_cluster_config(request.user)
    config['clusters'] = list(get_clusters(request.user).values())
    config['status'] = 0

    return JsonResponse(config)
예제 #14
0
def _submit_coordinator(request, coordinator, mapping):
    try:
        wf = coordinator.workflow
        if IS_MULTICLUSTER_ONLY.get() and get_cluster_config(
                request.user)['has_computes']:
            mapping['auto-cluster'] = {
                u'additionalClusterResourceTags': [],
                u'automaticTerminationCondition':
                u'EMPTY_JOB_QUEUE',  #'u'NONE',
                u'cdhVersion':
                u'CDH514',
                u'clouderaManagerPassword':
                u'guest',
                u'clouderaManagerUsername':
                u'guest',
                u'clusterName':
                u'analytics4',  # Add time variable
                u'computeWorkersConfiguration': {
                    u'bidUSDPerHr': 0,
                    u'groupSize': 0,
                    u'useSpot': False
                },
                u'environmentName':
                u'crn:altus:environments:us-west-1:12a0079b-1591-4ca0-b721-a446bda74e67:environment:analytics/236ebdda-18bd-428a-9d2b-cd6973d42946',
                u'instanceBootstrapScript':
                u'',
                u'instanceType':
                u'm4.xlarge',
                u'jobSubmissionGroupName':
                u'',
                u'jobs': [
                    {
                        u'failureAction': u'INTERRUPT_JOB_QUEUE',
                        u'name': u'a87e20d7-5c0d-49ee-ab37-625fa2803d51',
                        u'sparkJob': {
                            u'applicationArguments': ['5'],
                            u'jars': [
                                u's3a://datawarehouse-customer360/ETL/spark-examples.jar'
                            ],
                            u'mainClass':
                            u'org.apache.spark.examples.SparkPi'
                        }
                    },
                    #         {
                    #           u'failureAction': u'INTERRUPT_JOB_QUEUE',
                    #           u'name': u'a87e20d7-5c0d-49ee-ab37-625fa2803d51',
                    #           u'sparkJob': {
                    #             u'applicationArguments': ['10'],
                    #             u'jars': [u's3a://datawarehouse-customer360/ETL/spark-examples.jar'],
                    #             u'mainClass': u'org.apache.spark.examples.SparkPi'
                    #           }
                    #         },
                    #         {
                    #           u'failureAction': u'INTERRUPT_JOB_QUEUE',
                    #           u'name': u'a87e20d7-5c0d-49ee-ab37-625fa2803d51',
                    #           u'sparkJob': {
                    #             u'applicationArguments': [u'filesystems3.conf'],
                    #             u'jars': [u's3a://datawarehouse-customer360/ETL/envelope-0.6.0-SNAPSHOT-c6.jar'],
                    #             u'mainClass': u'com.cloudera.labs.envelope.EnvelopeMain',
                    #             u'sparkArguments': u'--archives=s3a://datawarehouse-customer360/ETL/filesystems3.conf'
                    #           }
                    #         }
                ],
                u'namespaceName':
                u'crn:altus:sdx:us-west-1:12a0079b-1591-4ca0-b721-a446bda74e67:namespace:analytics/7ea35fe5-dbc9-4b17-92b1-97a1ab32e410',
                u'publicKey':
                DEFAULT_PUBLIC_KEY.get(),
                u'serviceType':
                u'SPARK',
                u'workersConfiguration': {},
                u'workersGroupSize':
                u'3'
            }
        wf_dir = Submission(
            request.user,
            wf,
            request.fs,
            request.jt,
            mapping,
            local_tz=coordinator.data['properties']['timezone']).deploy()

        properties = {'wf_application_path': request.fs.get_hdfs_path(wf_dir)}
        properties.update(mapping)

        submission = Submission(request.user,
                                coordinator,
                                request.fs,
                                request.jt,
                                properties=properties)
        job_id = submission.run()

        return job_id
    except RestException as ex:
        LOG.exception('Error submitting coordinator')
        raise PopupException(_("Error submitting coordinator %s") %
                             (coordinator, ),
                             detail=ex._headers.get('oozie-error-message', ex),
                             error_code=200)
예제 #15
0
파일: api2.py 프로젝트: cloudera/hue
def get_config(request):
  config = get_cluster_config(request.user)
  config['status'] = 0

  return JsonResponse(config)