Example #1
0
File: api3.py Project: hkj123/hue
def guess_field_types(request):
    file_format = json.loads(request.POST.get('fileFormat', '{}'))

    if file_format['inputFormat'] == 'file':
        indexer = MorphlineIndexer(request.user, request.fs)
        path = urllib_unquote(file_format["path"])
        stream = request.fs.open(path)
        encoding = check_encoding(stream.read(10000))
        stream.seek(0)
        _convert_format(file_format["format"], inverse=True)

        format_ = indexer.guess_field_types({
            "file": {
                "stream": stream,
                "name": path
            },
            "format": file_format['format']
        })

        # Note: Would also need to set charset to table (only supported in Hive)
        if 'sample' in format_ and format_['sample']:
            format_['sample'] = escape_rows(format_['sample'],
                                            nulls_only=True,
                                            encoding=encoding)
        for col in format_['columns']:
            col['name'] = smart_unicode(col['name'],
                                        errors='replace',
                                        encoding=encoding)

    elif file_format['inputFormat'] == 'table':
        sample = get_api(request, {
            'type': 'hive'
        }).get_sample_data({'type': 'hive'},
                           database=file_format['databaseName'],
                           table=file_format['tableName'])
        db = dbms.get(request.user)
        table_metadata = db.get_table(database=file_format['databaseName'],
                                      table_name=file_format['tableName'])

        format_ = {
            "sample":
            sample['rows'][:4],
            "columns": [
                Field(col.name,
                      HiveFormat.FIELD_TYPE_TRANSLATE.get(col.type,
                                                          'string')).to_dict()
                for col in table_metadata.cols
            ]
        }
    elif file_format['inputFormat'] == 'query':
        query_id = file_format['query']['id'] if file_format['query'].get(
            'id') else file_format['query']

        notebook = Notebook(document=Document2.objects.document(
            user=request.user, doc_id=query_id)).get_data()
        snippet = notebook['snippets'][0]
        db = get_api(request, snippet)

        if file_format.get('sampleCols'):
            columns = file_format.get('sampleCols')
            sample = file_format.get('sample')
        else:
            snippet['query'] = snippet['statement']
            try:
                sample = db.fetch_result(notebook, snippet, 4,
                                         start_over=True)['rows'][:4]
            except Exception as e:
                LOG.warning(
                    'Skipping sample data as query handle might be expired: %s'
                    % e)
                sample = [[], [], [], [], []]
            columns = db.autocomplete(snippet=snippet, database='', table='')
            columns = [
                Field(
                    col['name'],
                    HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'],
                                                        'string')).to_dict()
                for col in columns['extended_columns']
            ]
        format_ = {
            "sample": sample,
            "columns": columns,
        }
    elif file_format['inputFormat'] == 'rdbms':
        api = _get_api(request)
        sample = api.get_sample_data(None,
                                     database=file_format['rdbmsDatabaseName'],
                                     table=file_format['tableName'])

        format_ = {
            "sample":
            list(sample['rows'])[:4],
            "columns": [
                Field(col['name'], col['type']).to_dict()
                for col in sample['full_headers']
            ]
        }
    elif file_format['inputFormat'] == 'stream':
        if file_format['streamSelection'] == 'kafka':
            data = get_topic_data(request.user,
                                  file_format.get('kafkaSelectedTopics'))

            kafkaFieldNames = [col['name'] for col in data['full_headers']]
            kafkaFieldTypes = [col['type'] for col in data['full_headers']]
            topics_data = data['rows']

            format_ = {
                "sample":
                topics_data,
                "columns": [
                    Field(col, 'string', unique=False).to_dict()
                    for col in kafkaFieldNames
                ]
            }


#       data = """%(kafkaFieldNames)s
# %(data)s""" % {
#         'kafkaFieldNames': ','.join(kafkaFieldNames),
#         'data': '\n'.join([','.join(cols) for cols in topics_data])
#       }
#       stream = string_io()
#       stream.write(data)

#       _convert_format(file_format["format"], inverse=True)

#       indexer = MorphlineIndexer(request.user, request.fs)

#       format_ = indexer.guess_field_types({
#         "file": {
#             "stream": stream,
#             "name": file_format['path']
#         },
#         "format": file_format['format']
#       })
#       type_mapping = dict(
#         list(
#           zip(kafkaFieldNames, kafkaFieldTypes)
#         )
#       )

#       for col in format_['columns']:
#         col['keyType'] = type_mapping[col['name']]
#         col['type'] = type_mapping[col['name']]
        elif file_format['streamSelection'] == 'flume':
            if 'hue-httpd/access_log' in file_format['channelSourcePath']:
                columns = [{
                    'name': 'id',
                    'type': 'string',
                    'unique': True
                }, {
                    'name': 'client_ip',
                    'type': 'string'
                }, {
                    'name': 'time',
                    'type': 'date'
                }, {
                    'name': 'request',
                    'type': 'string'
                }, {
                    'name': 'code',
                    'type': 'plong'
                }, {
                    'name': 'bytes',
                    'type': 'plong'
                }, {
                    'name': 'method',
                    'type': 'string'
                }, {
                    'name': 'url',
                    'type': 'string'
                }, {
                    'name': 'protocol',
                    'type': 'string'
                }, {
                    'name': 'app',
                    'type': 'string'
                }, {
                    'name': 'subapp',
                    'type': 'string'
                }]
            else:
                columns = [{'name': 'message', 'type': 'string'}]

            format_ = {
                "sample": [['...'] * len(columns)] * 4,
                "columns": [
                    Field(col['name'],
                          HiveFormat.FIELD_TYPE_TRANSLATE.get(
                              col['type'], 'string'),
                          unique=col.get('unique')).to_dict()
                    for col in columns
                ]
            }
    elif file_format['inputFormat'] == 'connector':
        if file_format['connectorSelection'] == 'sfdc':
            sf = Salesforce(username=file_format['streamUsername'],
                            password=file_format['streamPassword'],
                            security_token=file_format['streamToken'])
            table_metadata = [{
                'name': column['name'],
                'type': column['type']
            } for column in sf.restful('sobjects/%(streamObject)s/describe/' %
                                       file_format)['fields']]
            query = 'SELECT %s FROM %s LIMIT 4' % (', '.join(
                [col['name']
                 for col in table_metadata]), file_format['streamObject'])
            print(query)

            try:
                records = sf.query_all(query)
            except SalesforceRefusedRequest as e:
                raise PopupException(message=str(e))

            format_ = {
                "sample":
                [list(row.values())[1:] for row in records['records']],
                "columns": [
                    Field(
                        col['name'],
                        HiveFormat.FIELD_TYPE_TRANSLATE.get(
                            col['type'], 'string')).to_dict()
                    for col in table_metadata
                ]
            }
        else:
            raise PopupException(
                _('Connector format not recognized: %(connectorSelection)s') %
                file_format)
    else:
        raise PopupException(
            _('Input format not recognized: %(inputFormat)s') % file_format)

    return JsonResponse(format_)
Example #2
0
File: api3.py Project: hkj123/hue
def guess_format(request):
    file_format = json.loads(request.POST.get('fileFormat', '{}'))

    if file_format['inputFormat'] == 'file':
        path = urllib_unquote(file_format["path"])
        indexer = MorphlineIndexer(request.user, request.fs)
        if not request.fs.isfile(path):
            raise PopupException(
                _('Path %(path)s is not a file') % file_format)

        stream = request.fs.open(path)
        format_ = indexer.guess_format(
            {"file": {
                "stream": stream,
                "name": path
            }})
        _convert_format(format_)
    elif file_format['inputFormat'] == 'table':
        db = dbms.get(request.user)
        try:
            table_metadata = db.get_table(database=file_format['databaseName'],
                                          table_name=file_format['tableName'])
        except Exception as e:
            raise PopupException(
                e.message if hasattr(e, 'message') and e.message else e)
        storage = {}
        for delim in table_metadata.storage_details:
            if delim['data_type']:
                if '=' in delim['data_type']:
                    key, val = delim['data_type'].split('=', 1)
                    storage[key] = val
                else:
                    storage[delim['data_type']] = delim['comment']
        if table_metadata.details['properties']['format'] == 'text':
            format_ = {
                "quoteChar": "\"",
                "recordSeparator": '\\n',
                "type": "csv",
                "hasHeader": False,
                "fieldSeparator": storage.get('field.delim', ',')
            }
        elif table_metadata.details['properties']['format'] == 'parquet':
            format_ = {
                "type": "parquet",
                "hasHeader": False,
            }
        else:
            raise PopupException(
                'Hive table format %s is not supported.' %
                table_metadata.details['properties']['format'])
    elif file_format['inputFormat'] == 'query':
        format_ = {
            "quoteChar": "\"",
            "recordSeparator": "\\n",
            "type": "csv",
            "hasHeader": False,
            "fieldSeparator": "\u0001"
        }
    elif file_format['inputFormat'] == 'rdbms':
        format_ = {"type": "csv"}
    elif file_format['inputFormat'] == 'stream':
        if file_format['streamSelection'] == 'kafka':
            format_ = {
                "type": "json",
                # "fieldSeparator": ",",
                # "hasHeader": True,
                # "quoteChar": "\"",
                # "recordSeparator": "\\n",
                'topics': get_topics(request.user)
            }
        elif file_format['streamSelection'] == 'flume':
            format_ = {
                "type": "csv",
                "fieldSeparator": ",",
                "hasHeader": True,
                "quoteChar": "\"",
                "recordSeparator": "\\n"
            }
    elif file_format['inputFormat'] == 'connector':
        if file_format['connectorSelection'] == 'sfdc':
            sf = Salesforce(username=file_format['streamUsername'],
                            password=file_format['streamPassword'],
                            security_token=file_format['streamToken'])
            format_ = {
                "type":
                "csv",
                "fieldSeparator":
                ",",
                "hasHeader":
                True,
                "quoteChar":
                "\"",
                "recordSeparator":
                "\\n",
                'objects': [
                    sobject['name']
                    for sobject in sf.restful('sobjects/')['sobjects']
                    if sobject['queryable']
                ]
            }
        else:
            raise PopupException(
                _('Input format %(inputFormat)s connector not recognized: $(connectorSelection)s'
                  ) % file_format)
    else:
        raise PopupException(
            _('Input format not recognized: %(inputFormat)s') % file_format)

    format_['status'] = 0
    return JsonResponse(format_)
Example #3
0
    else:
      raise PopupException('Hive table format %s is not supported.' % table_metadata.details['properties']['format'])
  elif file_format['inputFormat'] == 'query':
    format_ = {"quoteChar": "\"", "recordSeparator": "\\n", "type": "csv", "hasHeader": False, "fieldSeparator": "\u0001"}
  elif file_format['inputFormat'] == 'rdbms':
    format_ = RdbmsIndexer(request.user, file_format['rdbmsType']).guess_format()
  elif file_format['inputFormat'] == 'stream':
    if file_format['streamSelection'] == 'kafka':
      format_ = {"type": "csv", "fieldSeparator": ",", "hasHeader": True, "quoteChar": "\"", "recordSeparator": "\\n", 'topics': get_topics()}
    elif file_format['streamSelection'] == 'sfdc':
      sf = Salesforce(
          username=file_format['streamUsername'],
          password=file_format['streamPassword'],
          security_token=file_format['streamToken']
      )
      format_ = {"type": "csv", "fieldSeparator": ",", "hasHeader": True, "quoteChar": "\"", "recordSeparator": "\\n", 'objects': [sobject['name'] for sobject in sf.restful('sobjects/')['sobjects'] if sobject['queryable']]}

  format_['status'] = 0
  return JsonResponse(format_)


def guess_field_types(request):
  file_format = json.loads(request.POST.get('fileFormat', '{}'))

  if file_format['inputFormat'] == 'file':
    indexer = MorphlineIndexer(request.user, request.fs)
    path = urllib.unquote(file_format["path"])
    stream = request.fs.open(path)
    encoding = chardet.detect(stream.read(10000)).get('encoding')
    stream.seek(0)
    _convert_format(file_format["format"], inverse=True)
Example #4
0
                            password=file_format['streamPassword'],
                            security_token=file_format['streamToken'])
            format_ = {
                "type":
                "csv",
                "fieldSeparator":
                ",",
                "hasHeader":
                True,
                "quoteChar":
                "\"",
                "recordSeparator":
                "\\n",
                'objects': [
                    sobject['name']
                    for sobject in sf.restful('sobjects/')['sobjects']
                    if sobject['queryable']
                ]
            }

    format_['status'] = 0
    return JsonResponse(format_)


def guess_field_types(request):
    file_format = json.loads(request.POST.get('fileFormat', '{}'))

    if file_format['inputFormat'] == 'file':
        indexer = MorphlineIndexer(request.user, request.fs)
        path = urllib.unquote(file_format["path"])
        stream = request.fs.open(path)
Example #5
0
File: api3.py Project: mapr/hue
def guess_format(request):
    file_format = json.loads(request.POST.get('fileFormat', '{}'))
    file_type = file_format['file_type']
    path = urllib_unquote(file_format["path"])

    if sys.version_info[0] < 3 and (file_type == 'excel' or path[-3:] == 'xls'
                                    or path[-4:] == 'xlsx'):
        return JsonResponse({
            'status':
            -1,
            'message':
            'Python2 based Hue does not support Excel file importer'
        })

    if file_format['inputFormat'] == 'localfile':
        if file_type == 'excel':
            format_ = {"type": "excel", "hasHeader": True}
        else:
            format_ = {
                "quoteChar": "\"",
                "recordSeparator": '\\n',
                "type": "csv",
                "hasHeader": True,
                "fieldSeparator": ","
            }

    elif file_format['inputFormat'] == 'file':
        if path[-3:] == 'xls' or path[-4:] == 'xlsx':
            file_obj = request.fs.open(path)
            if path[-3:] == 'xls':
                df = pd.read_excel(file_obj.read(1024 * 1024 * 1024),
                                   engine='xlrd')
            else:
                df = pd.read_excel(file_obj.read(1024 * 1024 * 1024),
                                   engine='openpyxl')
            _csv_data = df.to_csv(index=False)

            path = excel_to_csv_file_name_change(path)
            request.fs.create(path, overwrite=True, data=_csv_data)

        indexer = MorphlineIndexer(request.user, request.fs)
        if not request.fs.isfile(path):
            raise PopupException(
                _('Path %(path)s is not a file') % file_format)

        stream = request.fs.open(path)
        format_ = indexer.guess_format(
            {"file": {
                "stream": stream,
                "name": path
            }})
        _convert_format(format_)

        if file_format["path"][-3:] == 'xls' or file_format["path"][
                -4:] == 'xlsx':
            format_ = {
                "quoteChar": "\"",
                "recordSeparator": '\\n',
                "type": "excel",
                "hasHeader": True,
                "fieldSeparator": ","
            }

    elif file_format['inputFormat'] == 'table':
        db = dbms.get(request.user)
        try:
            table_metadata = db.get_table(database=file_format['databaseName'],
                                          table_name=file_format['tableName'])
        except Exception as e:
            raise PopupException(
                e.message if hasattr(e, 'message') and e.message else e)
        storage = {}
        for delim in table_metadata.storage_details:
            if delim['data_type']:
                if '=' in delim['data_type']:
                    key, val = delim['data_type'].split('=', 1)
                    storage[key] = val
                else:
                    storage[delim['data_type']] = delim['comment']
        if table_metadata.details['properties']['format'] == 'text':
            format_ = {
                "quoteChar": "\"",
                "recordSeparator": '\\n',
                "type": "csv",
                "hasHeader": False,
                "fieldSeparator": storage.get('field.delim', ',')
            }
        elif table_metadata.details['properties']['format'] == 'parquet':
            format_ = {
                "type": "parquet",
                "hasHeader": False,
            }
        else:
            raise PopupException(
                'Hive table format %s is not supported.' %
                table_metadata.details['properties']['format'])
    elif file_format['inputFormat'] == 'query':
        format_ = {
            "quoteChar": "\"",
            "recordSeparator": "\\n",
            "type": "csv",
            "hasHeader": False,
            "fieldSeparator": "\u0001"
        }
    elif file_format['inputFormat'] == 'rdbms':
        format_ = {"type": "csv"}
    elif file_format['inputFormat'] == 'stream':
        if file_format['streamSelection'] == 'kafka':
            format_ = {
                "type": "json",
                # "fieldSeparator": ",",
                # "hasHeader": True,
                # "quoteChar": "\"",
                # "recordSeparator": "\\n",
                'topics': get_topics(request.user)
            }
        elif file_format['streamSelection'] == 'flume':
            format_ = {
                "type": "csv",
                "fieldSeparator": ",",
                "hasHeader": True,
                "quoteChar": "\"",
                "recordSeparator": "\\n"
            }
    elif file_format['inputFormat'] == 'connector':
        if file_format['connectorSelection'] == 'sfdc':
            sf = Salesforce(username=file_format['streamUsername'],
                            password=file_format['streamPassword'],
                            security_token=file_format['streamToken'])
            format_ = {
                "type":
                "csv",
                "fieldSeparator":
                ",",
                "hasHeader":
                True,
                "quoteChar":
                "\"",
                "recordSeparator":
                "\\n",
                'objects': [
                    sobject['name']
                    for sobject in sf.restful('sobjects/')['sobjects']
                    if sobject['queryable']
                ]
            }
        else:
            raise PopupException(
                _('Input format %(inputFormat)s connector not recognized: $(connectorSelection)s'
                  ) % file_format)
    else:
        raise PopupException(
            _('Input format not recognized: %(inputFormat)s') % file_format)

    format_['status'] = 0
    return JsonResponse(format_)
Example #6
0
def guess_field_types(request):
    file_format = json.loads(request.POST.get('fileFormat', '{}'))

    if file_format['inputFormat'] == 'file':
        indexer = MorphlineIndexer(request.user, request.fs)
        path = urllib_unquote(file_format["path"])
        stream = request.fs.open(path)
        encoding = chardet.detect(stream.read(10000)).get('encoding')
        stream.seek(0)
        _convert_format(file_format["format"], inverse=True)

        format_ = indexer.guess_field_types({
            "file": {
                "stream": stream,
                "name": path
            },
            "format": file_format['format']
        })

        # Note: Would also need to set charset to table (only supported in Hive)
        if 'sample' in format_ and format_['sample']:
            format_['sample'] = escape_rows(format_['sample'],
                                            nulls_only=True,
                                            encoding=encoding)
        for col in format_['columns']:
            col['name'] = smart_unicode(col['name'],
                                        errors='replace',
                                        encoding=encoding)

    elif file_format['inputFormat'] == 'table':
        sample = get_api(request, {
            'type': 'hive'
        }).get_sample_data({'type': 'hive'},
                           database=file_format['databaseName'],
                           table=file_format['tableName'])
        db = dbms.get(request.user)
        table_metadata = db.get_table(database=file_format['databaseName'],
                                      table_name=file_format['tableName'])

        format_ = {
            "sample":
            sample['rows'][:4],
            "columns": [
                Field(col.name,
                      HiveFormat.FIELD_TYPE_TRANSLATE.get(col.type,
                                                          'string')).to_dict()
                for col in table_metadata.cols
            ]
        }
    elif file_format['inputFormat'] == 'query':
        query_id = file_format['query']['id'] if file_format['query'].get(
            'id') else file_format['query']

        notebook = Notebook(document=Document2.objects.document(
            user=request.user, doc_id=query_id)).get_data()
        snippet = notebook['snippets'][0]
        db = get_api(request, snippet)

        if file_format.get('sampleCols'):
            columns = file_format.get('sampleCols')
            sample = file_format.get('sample')
        else:
            snippet['query'] = snippet['statement']
            try:
                sample = db.fetch_result(notebook, snippet, 4,
                                         start_over=True)['rows'][:4]
            except Exception as e:
                LOG.warn(
                    'Skipping sample data as query handle might be expired: %s'
                    % e)
                sample = [[], [], [], [], []]
            columns = db.autocomplete(snippet=snippet, database='', table='')
            columns = [
                Field(
                    col['name'],
                    HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'],
                                                        'string')).to_dict()
                for col in columns['extended_columns']
            ]
        format_ = {
            "sample": sample,
            "columns": columns,
        }
    elif file_format['inputFormat'] == 'rdbms':
        api = _get_api(request)
        sample = api.get_sample_data(None,
                                     database=file_format['rdbmsDatabaseName'],
                                     table=file_format['tableName'])

        format_ = {
            "sample":
            list(sample['rows'])[:4],
            "columns": [
                Field(col['name'], col['type']).to_dict()
                for col in sample['full_headers']
            ]
        }
    elif file_format['inputFormat'] == 'stream':
        if file_format['streamSelection'] == 'kafka':
            if file_format.get(
                    'kafkaSelectedTopics') == 'NavigatorAuditEvents':
                kafkaFieldNames = [
                    'id', 'additionalInfo', 'allowed', 'collectionName',
                    'databaseName', 'db', 'DELEGATION_TOKEN_ID', 'dst',
                    'entityId', 'family', 'impersonator', 'ip', 'name',
                    'objectType', 'objType', 'objUsageType', 'operationParams',
                    'operationText', 'op', 'opText', 'path', 'perms',
                    'privilege', 'qualifier', 'QUERY_ID', 'resourcePath',
                    'service', 'SESSION_ID', 'solrVersion', 'src', 'status',
                    'subOperation', 'tableName', 'table', 'time', 'type',
                    'url', 'user'
                ]
                kafkaFieldTypes = ['string'] * len(kafkaFieldNames)
                kafkaFieldNames.append('timeDate')
                kafkaFieldTypes.append('date')
            else:
                # Note: mocked here, should come from SFDC or Kafka API or sampling job
                kafkaFieldNames = file_format.get('kafkaFieldNames',
                                                  '').split(',')
                kafkaFieldTypes = file_format.get('kafkaFieldTypes',
                                                  '').split(',')

            data = """%(kafkaFieldNames)s
%(data)s""" % {
                'kafkaFieldNames': ','.join(kafkaFieldNames),
                'data': '\n'.join(
                    [','.join(['...'] * len(kafkaFieldTypes))] * 5)
            }
            stream = string_io()
            stream.write(data)

            _convert_format(file_format["format"], inverse=True)

            indexer = MorphlineIndexer(request.user, request.fs)
            format_ = indexer.guess_field_types({
                "file": {
                    "stream": stream,
                    "name": file_format['path']
                },
                "format": file_format['format']
            })
            type_mapping = dict(list(zip(kafkaFieldNames, kafkaFieldTypes)))

            for col in format_['columns']:
                col['keyType'] = type_mapping[col['name']]
                col['type'] = type_mapping[col['name']]
        elif file_format['streamSelection'] == 'flume':
            if 'hue-httpd/access_log' in file_format['channelSourcePath']:
                columns = [{
                    'name': 'id',
                    'type': 'string',
                    'unique': True
                }, {
                    'name': 'client_ip',
                    'type': 'string'
                }, {
                    'name': 'time',
                    'type': 'date'
                }, {
                    'name': 'request',
                    'type': 'string'
                }, {
                    'name': 'code',
                    'type': 'plong'
                }, {
                    'name': 'bytes',
                    'type': 'plong'
                }, {
                    'name': 'method',
                    'type': 'string'
                }, {
                    'name': 'url',
                    'type': 'string'
                }, {
                    'name': 'protocol',
                    'type': 'string'
                }, {
                    'name': 'app',
                    'type': 'string'
                }, {
                    'name': 'subapp',
                    'type': 'string'
                }]
            else:
                columns = [{'name': 'message', 'type': 'string'}]

            format_ = {
                "sample": [['...'] * len(columns)] * 4,
                "columns": [
                    Field(col['name'],
                          HiveFormat.FIELD_TYPE_TRANSLATE.get(
                              col['type'], 'string'),
                          unique=col.get('unique')).to_dict()
                    for col in columns
                ]
            }
    elif file_format['inputFormat'] == 'connector':
        if file_format['connectorSelection'] == 'sfdc':
            sf = Salesforce(username=file_format['streamUsername'],
                            password=file_format['streamPassword'],
                            security_token=file_format['streamToken'])
            table_metadata = [{
                'name': column['name'],
                'type': column['type']
            } for column in sf.restful('sobjects/%(streamObject)s/describe/' %
                                       file_format)['fields']]
            query = 'SELECT %s FROM %s LIMIT 4' % (', '.join(
                [col['name']
                 for col in table_metadata]), file_format['streamObject'])
            print(query)

            try:
                records = sf.query_all(query)
            except SalesforceRefusedRequest as e:
                raise PopupException(message=str(e))

            format_ = {
                "sample":
                [list(row.values())[1:] for row in records['records']],
                "columns": [
                    Field(
                        col['name'],
                        HiveFormat.FIELD_TYPE_TRANSLATE.get(
                            col['type'], 'string')).to_dict()
                    for col in table_metadata
                ]
            }
        else:
            raise PopupException(
                _('Connector format not recognized: %(connectorSelection)s') %
                file_format)
    else:
        raise PopupException(
            _('Input format not recognized: %(inputFormat)s') % file_format)

    return JsonResponse(format_)