def _submit_workflow_helper(request, workflow, submit_action): ParametersFormSet = formset_factory(ParameterForm, extra=0) if request.method == 'POST': cluster = json.loads(request.POST.get('cluster', '{}')) params_form = ParametersFormSet(request.POST) if params_form.is_valid(): mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data]) mapping['dryrun'] = request.POST.get('dryrun_checkbox') == 'on' mapping['send_email'] = request.POST.get('email_checkbox') == 'on' if '/submit_single_action/' in submit_action: mapping['submit_single_action'] = True if cluster.get('type') == 'altus-de': notebook = {} snippet = {'statement': 'SELECT 1'} handle = DataEngApi(user=request.user, request=request, cluster_name=cluster.get('name')).execute( notebook, snippet) return JsonResponse( { 'status': 0, 'job_id': handle.get('id'), 'type': 'workflow' }, safe=False) try: job_id = _submit_workflow(request.user, request.fs, request.jt, workflow, mapping) except Exception, e: raise PopupException(_('Workflow submission failed'), detail=smart_str(e), error_code=200) jsonify = request.POST.get('format') == 'json' if jsonify: return JsonResponse( { 'status': 0, 'job_id': job_id, 'type': 'workflow' }, safe=False) else: request.info(_('Workflow submitted')) return redirect( reverse('oozie:list_oozie_workflow', kwargs={'job_id': job_id})) else: request.error(_('Invalid submission form: %s' % params_form.errors))
def get_api(request, snippet): from notebook.connectors.dataeng import DataEngApi from notebook.connectors.hiveserver2 import HS2Api from notebook.connectors.jdbc import JdbcApi from notebook.connectors.rdbms import RdbmsApi from notebook.connectors.oozie_batch import OozieApi from notebook.connectors.solr import SolrApi from notebook.connectors.spark_shell import SparkApi from notebook.connectors.spark_batch import SparkBatchApi from notebook.connectors.text import TextApi if snippet.get('wasBatchExecuted'): return OozieApi(user=request.user, request=request) interpreter = [interpreter for interpreter in get_ordered_interpreters(request.user) if interpreter['type'] == snippet['type']] if not interpreter: raise PopupException(_('Snippet type %(type)s is not configured in hue.ini') % snippet) interpreter = interpreter[0] interface = interpreter['interface'] # Multi cluster cluster = Cluster(request.user) if cluster and cluster.get_type() == 'dataeng': interface = 'dataeng' if interface == 'hiveserver2': return HS2Api(user=request.user, request=request) elif interface == 'oozie': return OozieApi(user=request.user, request=request) elif interface == 'livy': return SparkApi(request.user) elif interface == 'livy-batch': return SparkBatchApi(request.user) elif interface == 'text' or interface == 'markdown': return TextApi(request.user) elif interface == 'rdbms': return RdbmsApi(request.user, interpreter=snippet['type']) elif interface == 'dataeng': return DataEngApi(user=request.user, request=request, cluster_name=cluster.get_interface()) elif interface == 'jdbc': return JdbcApi(request.user, interpreter=interpreter) elif interface == 'solr': return SolrApi(request.user, interpreter=interpreter) elif interface == 'pig': return OozieApi(user=request.user, request=request) # Backward compatibility until Hue 4 else: raise PopupException(_('Notebook connector interface not recognized: %s') % interface)
def get_api(request, snippet): from notebook.connectors.oozie_batch import OozieApi if snippet.get('wasBatchExecuted'): return OozieApi(user=request.user, request=request) if snippet['type'] == 'report': snippet['type'] = 'impala' interpreter = [ interpreter for interpreter in get_ordered_interpreters(request.user) if interpreter['type'] == snippet['type'] ] if not interpreter: if snippet['type'] == 'hbase': interpreter = [{ 'name': 'hbase', 'type': 'hbase', 'interface': 'hbase', 'options': {}, 'is_sql': False }] elif snippet['type'] == 'kafka': interpreter = [{ 'name': 'kafka', 'type': 'kafka', 'interface': 'kafka', 'options': {}, 'is_sql': False }] elif snippet['type'] == 'solr': interpreter = [{ 'name': 'solr', 'type': 'solr', 'interface': 'solr', 'options': {}, 'is_sql': False }] else: raise PopupException( _('Snippet type %(type)s is not configured in hue.ini') % snippet) interpreter = interpreter[0] interface = interpreter['interface'] # Multi cluster cluster = json.loads(request.POST.get('cluster', '""')) # Via Catalog API if cluster == 'undefined': cluster = None if not cluster and snippet.get('compute'): # Via notebook.ko.js cluster = snippet.get('compute').get('id') if cluster and 'crn:altus:dataware:' in cluster: interface = 'altus-adb' if cluster: LOG.info('Selected cluster %s' % cluster) if interface == 'hiveserver2': from notebook.connectors.hiveserver2 import HS2Api return HS2Api(user=request.user, request=request, cluster=cluster) elif interface == 'oozie': return OozieApi(user=request.user, request=request) elif interface == 'livy': from notebook.connectors.spark_shell import SparkApi return SparkApi(request.user) elif interface == 'livy-batch': from notebook.connectors.spark_batch import SparkBatchApi return SparkBatchApi(request.user) elif interface == 'text' or interface == 'markdown': from notebook.connectors.text import TextApi return TextApi(request.user) elif interface == 'rdbms': from notebook.connectors.rdbms import RdbmsApi return RdbmsApi(request.user, interpreter=snippet['type']) elif interface == 'altus-adb': from notebook.connectors.altus_adb import AltusAdbApi return AltusAdbApi(user=request.user, cluster_name=cluster, request=request) elif interface == 'dataeng': from notebook.connectors.dataeng import DataEngApi return DataEngApi(user=request.user, request=request, cluster_name=cluster.get('name')) elif interface == 'jdbc' or interface == 'teradata': from notebook.connectors.jdbc import JdbcApi return JdbcApi(request.user, interpreter=interpreter) elif interface == 'sqlalchemy': from notebook.connectors.sqlalchemyapi import SqlAlchemyApi return SqlAlchemyApi(request.user, interpreter=interpreter) elif interface == 'solr': from notebook.connectors.solr import SolrApi return SolrApi(request.user, interpreter=interpreter) elif interface == 'hbase': from notebook.connectors.hbase import HBaseApi return HBaseApi(request.user) elif interface == 'kafka': from notebook.connectors.kafka import KafkaApi return KafkaApi(request.user) elif interface == 'pig': return OozieApi(user=request.user, request=request) # Backward compatibility until Hue 4 else: raise PopupException( _('Notebook connector interface not recognized: %s') % interface)
def get_api(request, snippet): from notebook.connectors.oozie_batch import OozieApi if snippet.get('wasBatchExecuted'): return OozieApi(user=request.user, request=request) if snippet['type'] == 'report': snippet['type'] = 'impala' interpreter = [ interpreter for interpreter in get_ordered_interpreters(request.user) if snippet['type'] in (interpreter['type'], interpreter['interface']) ] if not interpreter: if snippet['type'] == 'hbase': interpreter = [{ 'name': 'hbase', 'type': 'hbase', 'interface': 'hbase', 'options': {}, 'is_sql': False }] elif snippet['type'] == 'kafka': interpreter = [{ 'name': 'kafka', 'type': 'kafka', 'interface': 'kafka', 'options': {}, 'is_sql': False }] elif snippet['type'] == 'solr': interpreter = [{ 'name': 'solr', 'type': 'solr', 'interface': 'solr', 'options': {}, 'is_sql': False }] elif snippet['type'] == 'custom': interpreter = [{ 'name': snippet['name'], 'type': snippet['type'], 'interface': snippet['interface'], 'options': snippet.get('options', {}), 'is_sql': False }] else: raise PopupException( _('Snippet type %(type)s is not configured.') % snippet) interpreter = interpreter[0] interface = interpreter['interface'] if CONNECTORS.IS_ENABLED.get(): cluster = { 'connector': snippet['type'], 'id': interpreter['type'], } snippet['type'] = snippet['type'].split('-', 2)[0] cluster.update(interpreter['options']) # Multi cluster elif has_multi_cluster(): cluster = json.loads(request.POST.get( 'cluster', '""')) # Via Catalog autocomplete API or Notebook create sessions if cluster == '""' or cluster == 'undefined': cluster = None if not cluster and snippet.get('compute'): # Via notebook.ko.js cluster = snippet['compute'] else: cluster = None cluster_name = cluster.get('id') if cluster else None if cluster and 'altus:dataware:k8s' in cluster_name: interface = 'hiveserver2' elif cluster and 'crn:altus:dataware:' in cluster_name: interface = 'altus-adb' elif cluster and 'crn:altus:dataeng:' in cluster_name: interface = 'dataeng' LOG.info('Selected cluster %s %s interface %s' % (cluster_name, cluster, interface)) snippet['interface'] = interface if interface.startswith('hiveserver2') or interface == 'hms': from notebook.connectors.hiveserver2 import HS2Api return HS2Api(user=request.user, request=request, cluster=cluster, interface=interface) elif interface == 'oozie': return OozieApi(user=request.user, request=request) elif interface == 'livy': from notebook.connectors.spark_shell import SparkApi return SparkApi(request.user) elif interface == 'livy-batch': from notebook.connectors.spark_batch import SparkBatchApi return SparkBatchApi(request.user) elif interface == 'text' or interface == 'markdown': from notebook.connectors.text import TextApi return TextApi(request.user) elif interface == 'rdbms': from notebook.connectors.rdbms import RdbmsApi return RdbmsApi(request.user, interpreter=snippet['type'], query_server=snippet.get('query_server')) elif interface == 'altus-adb': from notebook.connectors.altus_adb import AltusAdbApi return AltusAdbApi(user=request.user, cluster_name=cluster_name, request=request) elif interface == 'dataeng': from notebook.connectors.dataeng import DataEngApi return DataEngApi(user=request.user, request=request, cluster_name=cluster_name) elif interface == 'jdbc': if interpreter['options'] and interpreter['options'].get( 'url', '').find('teradata') >= 0: from notebook.connectors.jdbc_teradata import JdbcApiTeradata return JdbcApiTeradata(request.user, interpreter=interpreter) if interpreter['options'] and interpreter['options'].get( 'url', '').find('awsathena') >= 0: from notebook.connectors.jdbc_athena import JdbcApiAthena return JdbcApiAthena(request.user, interpreter=interpreter) elif interpreter['options'] and interpreter['options'].get( 'url', '').find('presto') >= 0: from notebook.connectors.jdbc_presto import JdbcApiPresto return JdbcApiPresto(request.user, interpreter=interpreter) elif interpreter['options'] and interpreter['options'].get( 'url', '').find('clickhouse') >= 0: from notebook.connectors.jdbc_clickhouse import JdbcApiClickhouse return JdbcApiClickhouse(request.user, interpreter=interpreter) else: from notebook.connectors.jdbc import JdbcApi return JdbcApi(request.user, interpreter=interpreter) elif interface == 'teradata': from notebook.connectors.jdbc import JdbcApiTeradata return JdbcApiTeradata(request.user, interpreter=interpreter) elif interface == 'athena': from notebook.connectors.jdbc import JdbcApiAthena return JdbcApiAthena(request.user, interpreter=interpreter) elif interface == 'presto': from notebook.connectors.jdbc_presto import JdbcApiPresto return JdbcApiPresto(request.user, interpreter=interpreter) elif interface == 'sqlalchemy': from notebook.connectors.sqlalchemyapi import SqlAlchemyApi return SqlAlchemyApi(request.user, interpreter=interpreter) elif interface == 'solr': from notebook.connectors.solr import SolrApi return SolrApi(request.user, interpreter=interpreter) elif interface == 'hbase': from notebook.connectors.hbase import HBaseApi return HBaseApi(request.user) elif interface == 'kafka': from notebook.connectors.kafka import KafkaApi return KafkaApi(request.user) elif interface == 'pig': return OozieApi(user=request.user, request=request) # Backward compatibility until Hue 4 else: raise PopupException( _('Notebook connector interface not recognized: %s') % interface)