def index(request): template = loader.get_template('dataset_importer.html') jobs = DatasetImport.objects.all() archive_formats = collect_map_entries(extractor_map) single_document_formats = collect_map_entries(entity_reader_map) document_collection_formats = collect_map_entries(collection_reader_map) database_formats = collect_map_entries(database_reader_map) # preprocessors = collect_map_entries(preprocessor_map) # enabled_preprocessors = [preprocessor for preprocessor in preprocessors if preprocessor['is_enabled'] is True] datasets = Datasets().get_allowed_datasets(request.user) language_models = Task.objects.filter( task_type=TaskTypes.TRAIN_MODEL.value).filter( status__iexact=Task.STATUS_COMPLETED).order_by('-pk') analyzers = ES_Manager.get_analyzers() context = { # 'enabled_input_types': DATASET_IMPORTER_CONF['enabled_input_types'], 'archive_formats': archive_formats, 'single_document_formats': single_document_formats, 'document_collection_formats': document_collection_formats, 'database_formats': database_formats, 'language_models': language_models, 'allowed_datasets': datasets, 'jobs': jobs, 'analyzers': analyzers # 'enabled_preprocessors': enabled_preprocessors } return HttpResponse(template.render(context, request))
def index(request): template = loader.get_template('dataset_importer.html') jobs = DatasetImport.objects.all() archive_formats = collect_map_entries(extractor_map) single_document_formats = collect_map_entries(entity_reader_map) document_collection_formats = collect_map_entries(collection_reader_map) database_formats = collect_map_entries(database_reader_map) # preprocessors = collect_map_entries(preprocessor_map) # enabled_preprocessors = [preprocessor for preprocessor in preprocessors if preprocessor['is_enabled'] is True] datasets = Datasets().get_allowed_datasets(request.user) language_models =Task.objects.filter(task_type=TaskTypes.TRAIN_MODEL.value).filter(status__iexact=Task.STATUS_COMPLETED).order_by('-pk') analyzers = ES_Manager.get_analyzers() context = { # 'enabled_input_types': DATASET_IMPORTER_CONF['enabled_input_types'], 'archive_formats': archive_formats, 'single_document_formats': single_document_formats, 'document_collection_formats': document_collection_formats, 'database_formats': database_formats, 'language_models': language_models, 'allowed_datasets': datasets, 'jobs': jobs, 'analyzers': analyzers # 'enabled_preprocessors': enabled_preprocessors } return HttpResponse(template.render(context, request))
def check_if_analyzer_exists(self): ELASTICSEARCH_ANALYZERS = ES_Manager.get_analyzers() user_sent_analyzer = self.post_dict["analyzer"] available_analyzer_names = list( map(lambda x: x["analyzer"], ELASTICSEARCH_ANALYZERS)) if user_sent_analyzer not in available_analyzer_names: raise ValueError( "Analyzer '{0}' not available. Available analyzers are: '{1}'". format(user_sent_analyzer, available_analyzer_names))
def check_if_analyzer_exists(self): ELASTICSEARCH_ANALYZERS = ES_Manager.get_analyzers() user_sent_analyzer = self.post_dict["analyzer"] available_analyzer_names = list(map(lambda x: x["analyzer"], ELASTICSEARCH_ANALYZERS)) if user_sent_analyzer not in available_analyzer_names: raise ValueError("Analyzer '{0}' not available. Available analyzers are: '{1}'".format(user_sent_analyzer, available_analyzer_names))
def get_analyzer_names(request): ELASTICSEARCH_ANALYZERS = ES_Manager.get_analyzers() analyzer_names = list(map(lambda x: x["analyzer"], ELASTICSEARCH_ANALYZERS)) return JsonResponse({"analyzers": analyzer_names})