def get_resource_class(self, usage): if usage == 'import': return self.import_export_args.get('import_resource_class') if self.import_export_args.get( 'import_resource_class') else modelresource_factory(self.model) elif usage == 'export': return self.import_export_args.get('export_resource_class') if self.import_export_args.get( 'export_resource_class') else modelresource_factory(self.model) else: return modelresource_factory(self.model)
def get_resource_for_model(model, **kwargs): """ Finds or generates the resource to use for :param:`model`. """ # TODO: settings to map model to resource model_name = u'{app_label}.{name}'.format(app_label=model._meta.app_label, name=model.__name__) resource_class = ExportModelResource export_conf = settings.EXPORTDB_EXPORT_CONF.get('models') if export_conf is not None: model_conf = export_conf.get(model_name) if model_conf is not None: # support custom resource titles kwargs['title'] = model_conf.get('title') # support custom resource classes if 'resource_class' in model_conf: resource_class = import_string(model_conf['resource_class']) # specify fields to be exported fields = model_conf.get('fields') if fields is not None: # use own factory return modelresource_factory(model, resource_class=resource_class, fields=fields)(**kwargs) return resources.modelresource_factory( model, resource_class=resource_class)(**kwargs)
def get_resource_for_model(model, **kwargs): """ Finds or generates the resource to use for :param:`model`. """ # TODO: settings to map model to resource model_name = u'{app_label}.{name}'.format( app_label=model._meta.app_label, name=model.__name__ ) resource_class = ExportModelResource export_conf = settings.EXPORTDB_EXPORT_CONF.get('models') if export_conf is not None: model_conf = export_conf.get(model_name) if model_conf is not None: # support custom resource titles kwargs['title'] = model_conf.get('title') # support custom resource classes if 'resource_class' in model_conf: resource_class = import_string(model_conf['resource_class']) # specify fields to be exported fields = model_conf.get('fields') if fields is not None: # use own factory return modelresource_factory(model, resource_class=resource_class, fields=fields)(**kwargs) return resources.modelresource_factory(model, resource_class=resource_class)(**kwargs)
def __init__(self, app_label=None, model_name=None, resource=None): self.model = apps.get_model(app_label=app_label, model_name=model_name) log.debug(resource) if resource: self.resource = resource() else: self.resource = modelresource_factory(self.model)
def get_resource_class(self): """ Automatically creates a resources class if it was not specified """ if not self.resource_class: return modelresource_factory(self.model) else: return self.resource_class
def import_todo(request): todo_resource = resources.modelresource_factory(model=Todo)() dataset = tablib.Dataset(['id', 'New Todo'], headers=['text', 'is_completed']) result = todo_resource.import_data(dataset, dry_run=True) print(result.has_errors()) result = todo_resource.import_data(dataset, dry_run=False) return redirect('/')
class EnrollmentAdmin(ImportMixin, admin.ModelAdmin): resource_class = resources.modelresource_factory(model=Enrollment) def get_import_formats(self): return [EnrollImportFormat] def get_confirm_import_form(self): return CustomConfirmImportForm
def get_resource_class(self, resource_class, model_name): from django.utils.module_loading import import_string from import_export.resources import modelresource_factory if not resource_class: return modelresource_factory(django_apps.get_model(model_name)) else: return import_string(resource_class)
def setUp(self): self.resource = resources.modelresource_factory(Book)() self.dataset = tablib.Dataset(headers=['id', 'name', 'author_email']) self.book = Book.objects.create(name="Some book") self.book2 = Book.objects.create(name="Some other book") row = [str(self.book.pk), 'Some book', '*****@*****.**'] self.dataset.append(row) self.instance_loader = instance_loaders.CachedInstanceLoader( self.resource, self.dataset)
def get_import_resource(self): """ Returns self.import_resource or default """ if not self.import_resource is None: return self.import_resource else: model = self.get_model() return modelresource_factory(model)
def get_resource(model_name): """Return resource for import model.""" resource_name = model_name + 'Resource' resource = getattr(ralph_resources, resource_name, None) if not resource: model_class = APP_MODELS[model_name.lower()] resource = resources.modelresource_factory( model=model_class, resource_class=RalphModelResource) return resource()
def loadall(): team_resource = resources.modelresource_factory(model=Team)() dataset = tablib.Dataset( ['', 'New Team', 'just added this one', '1', '1'], headers=['id', 'name', 'description', 'league', 'division']) result = team_resource.import_data(dataset, dry_run=True) # print(result) print(result.has_errors()) result = team_resource.import_data(dataset, dry_run=False)
def generate_import_resources(models=None): """Create list of resources to import""" if not models: models = _models resources_to_import = {} for model in models: resources_to_import[ model._meta.model_name] = resources.modelresource_factory( model=model)() return resources_to_import
def handle(self, *args, **options): """ This method handles the command. It creates database entries for the specified model. If a file path is not provided default data will be loaded from 'protondx/dashboard/fixtures/'. """ # Check if model to load was specified if options.get("obj_type", None) is not None: # If diagnostics are to be loaded, do so based on randomly generated data. if options['obj_type'] == LOAD_DIAGNOSTIC: load_diagnostics(options['entries'] or 1000) return model_info = { LOAD_PATIENT: (resources.modelresource_factory(model=Patient)(), '../../fixtures/patient_mock.csv'), LOAD_CENTRE: (resources.modelresource_factory(model=TestingCentre)(), '../../fixtures/centre_mock.csv'), LOAD_DIAGNOSTIC: (resources.modelresource_factory(model=DiagnosticTest)(), '../../fixtures/diagnostic_mock.csv'), } # Check if a path was given, use default if not if options['path']: file_path = options['path'] else: module_dir = os.path.dirname(__file__) # get current directory file_path = os.path.join(module_dir, model_info[options['obj_type']][1]) # get data directory resource = model_info[options['obj_type']][0] # open data source with open(file_path, 'r') as f: dataset = Dataset().load(f.read(), format='csv') # import resource.import_data(dataset, dry_run=False, raise_errors=True) else: print("Please provide the model type of data: [--patient] [-p] [--centre] [-c] [--diagnostic] [-d]")
def test_arrayfield(self): dataset_headers = ["id", "name", "chapters"] chapters = ["Introduction", "Middle Chapter", "Ending"] dataset_row = ["1", "Book With Chapters", ",".join(chapters)] dataset = tablib.Dataset(headers=dataset_headers) dataset.append(dataset_row) book_with_chapters_resource = resources.modelresource_factory(model=BookWithChapters)() result = book_with_chapters_resource.import_data(dataset, dry_run=False) self.assertFalse(result.has_errors()) book_with_chapters = list(BookWithChapters.objects.all())[0] self.assertListEqual(book_with_chapters.chapters, chapters)
def Role_imp(request): if request.method == 'POST': roles_resource = resources.modelresource_factory(model=models.Roles)() dataset = tablib.Dataset() dataset.csv = request.FILES['myfile'].read() result = roles_resource.import_data(dataset, dry_run=True) # Test the data import if not result.has_errors(): roles_resource.import_data(dataset, dry_run=False) # Actually import now return render(request, 'Icarus/Role/Role_imp.html')
def simple_upload(request): if request.method == 'POST': person_resource = resources.modelresource_factory(model=Person)() dataset = Dataset() new_persons = request.FILES['myfile'] imported_data = dataset.load(new_persons.read()) result = person_resource.import_data( dataset, dry_run=True) # Test the data import if not result.has_errors(): person_resource.import_data(dataset, dry_run=False) # Actually import now return render(request, 'lottery/import.html')
def test_import_model_instance(): user = User.objects.get(username=G.user2_name) import_status = import_model_instance( { 'name': 'math', 'public': '0', 'owner': user.id, 'id': None }, resources.modelresource_factory(model=Tag)()) try: object_created = bool( Tag.objects.get(owner__id=user.id, name='math', public='0')) except ObjectDoesNotExist: object_created = False assert object_created assert 'Imported Successfully: tag : math.' == import_status
def create_admin(self, Model): name = Model._meta.object_name ro_fields = self.get_readonly_fields(Model) fields = self.get_fields(Model) resource = modelresource_factory(model=Model)() inheritance = ( NoEditMixin, ExportMixin, admin.ModelAdmin, ) return type( "%sAdmin" % name, inheritance, { # "form": create_taggable_form(Model), "resource_class": resource, # "fields": fields, # "readonly_fields": ro_fields, })
def import_figure_pnl(path): path_file = os.path.join(path, 'figurePnl.csv') if not os.path.isfile(path_file): print('not find figurePnl.csv') return df = pd.read_csv(path_file, names=['date', 'comparer_id', 'figure_id', 'item', 'pnl']) columns = ('date', 'comparer_id', 'figure_id', 'item', 'pnl') data = df.to_dict('split')['data'] # data = [tuple(i) for i in data] figure_pnl_resource = resources.modelresource_factory(model=FigurePnl)() dataset = tablib.Dataset( *data, headers=columns # headers=['figure_id', 'date', 'pnl', 'comparer_id'] ) result = figure_pnl_resource.import_data(dataset, dry_run=False) print('importing figurePnl: %s' % result)
def import_comparer(path): path_file = os.path.join(path, 'comparer.csv') if not os.path.isfile(path_file): print('not find comparer.csv') return df = pd.read_csv(path_file, names=['comparer_id', 'level', 'super_comparer']) columns = ('comparer_id', 'level', 'super_comparer_id') data = df.to_dict('split')['data'] # data = [tuple(i) for i in data] comparer_resource = resources.modelresource_factory(model=Comparer)() dataset = tablib.Dataset( *data, headers=columns # headers=['comparer_id', 'level', 'super_comparer_id'] ) result = comparer_resource.import_data(dataset, dry_run=False) print(result.has_errors)
def set_resource(model): resource = resources.modelresource_factory(model=model)() resource.Meta.model = model resource.Meta.fields = [] concrete_fields = model._meta.concrete_fields for field in model._meta.get_fields(): if field in concrete_fields and not field.many_to_many: resource.Meta.fields.append(field.name) if field.many_to_many: resource.Meta.fields.append(field.name) related_model = field.related_model command = f'resource.{field.name} = fields.Field(widget=ManyToManyWidget(related_model))' data = { 'resource': resource, 'fields': fields, 'related_model': related_model, 'ManyToManyWidget': ManyToManyWidget } exec(command, data) return resource
def uploadData(request): if request.method == 'POST': # data_resource = AccidentResource() data_resource = resources.modelresource_factory( model=models.Accident)() dataset = Dataset() new_data = request.FILES['importData'] imported_data = dataset.load(new_data.read().decode('utf-8'), format='csv') result = data_resource.import_data(dataset, dry_run=True) # Testing data import if not result.has_errors(): data_resource.import_data(dataset, dry_run=False) # Actually import now wilayaform = wilaya() data = Accident.objects.all().values() total = len(data) context = { 'data': data, 'wilayaform': wilayaform, 'total': total, } return render(request, 'home/bdd.html', context)
def convert_df_to_django_model(df, model, rewrite=False, rows_at_a_time=250): """ Import a given dataframe to Django's ORM with a specified model :df: pandas.Dataframe to convert :model: django.db.models.Model's name. The ORM takes care of which table to put the data in :rewrite: boolean representing wether to delete the old entries or not, default: False :rows_at_a_time: int representing the amount of rows to import at the same time, default: 250 """ if os.getenv('DJANGO_SETTINGS_MODULE'): from import_export import resources else: raise Exception('This function can only be used in Django projects.') if rewrite: _clear_model_table(model) try: # Since Django's ORM uses incremental IDs by default # we need to go and take the next 'available' one # if the query returns none, then we start at 0 query = model.objects.values('id').order_by('-id').first() last_id = query['id'] + 1 if query is not None else 0 dataset = _convert_df_to_dataset(df, last_id) p_resource = resources.modelresource_factory(model=model)() for i in range(0, len(dataset), rows_at_a_time): data = tablib.Dataset(*dataset[i:i+rows_at_a_time], headers=dataset.headers) p_resource.import_data(data) except Exception as err: return print(err)
def import_schemas(data: dict): for param in _ie_resources: model_resource = modelresource_factory(model=param['model'])() dataset = tablib.Dataset() dataset.dict = data[param['name']] result = model_resource.import_data(dataset, dry_run=True) if result.has_errors(): row_errors = result.row_errors() row_errors = { row_error[0]: [error_obj.error for error_obj in row_error[1]] for row_error in row_errors } base_errors = result.base_errors base_errors = [error_obj.error for error_obj in base_errors] return { param['name']: { 'base_errors': base_errors, 'row_errors': row_errors } } model_resource.import_data(dataset, dry_run=False)
from import_init import * import tablib from import_export import resources from radar_access.models import Startup startup_resource = resources.modelresource_factory(model=Startup)() f = open("startups_all.csv") csv = "" for line in f: csv += line dataset = tablib.Dataset() dataset.csv = csv result = startup_resource.import_data(dataset, dry_run=False)
def test_create(self): BookResource = resources.modelresource_factory(Book) self.assertIn('id', BookResource.fields) self.assertEqual(BookResource._meta.model, Book)
def test_export_field_with_appropriate_format(self): resource = resources.modelresource_factory( model=BookWithChapters)() result = resource.export(BookWithChapters.objects.all()) assert result[0][3] == json.dumps(self.json_data)
from django.test import TestCase import tablib from import_export import resources from .models import Industry # Create your tests here. industry_resource = resources.modelresource_factory(model=Industry)() dataset = tablib.Dataset(['', 'New book'], headers=['id', 'industry', 'industry_code']) result = industry_resource.import_data(dataset, dry_run=True) print(result.has_errors())
import tablib from import_export import fields, resources, widgets from . import models CategoryResource = resources.modelresource_factory(models.Category) QuestionResource = resources.modelresource_factory(models.Question) class ChecklistResource(resources.ModelResource): questions = fields.Field( column_name='questions', widget=widgets.JSONWidget(), ) category_data = fields.Field(column_name='category_data', widget=widgets.JSONWidget()) def before_import_row(self, row, **kwargs): if row.get('category_data'): dataset = tablib.Dataset().load(row.get('category_data'), 'json') CategoryResource().import_data(dataset) def dehydrate_category_data(self, checklist): if checklist.category: dataset = CategoryResource().export( queryset=models.Category.objects.filter( pk=checklist.category.pk)) return dataset.json def dehydrate_questions(self, checklist): dataset = QuestionResource().export(queryset=checklist.questions.all())
def get_resource_class(self): if not self.resource_class: return modelresource_factory(self.model) else: return self.resource_class
def test_export_field_with_appropriate_format(self): resource = resources.modelresource_factory(model=BookWithChapters)() result = resource.export(BookWithChapters.objects.all()) assert result[0][3] == json.dumps(self.json_data)
import tablib from import_export import resources from dataVisualizer.resources import AluResource alu_resource = resources.modelresource_factory(model=AluResource)() dataset = tablib.Dataset( ['', '', 'New book', 'Bla', 'Bla'], headers=['id', 'SampleCode', 'SampleType', 'AVG_Norm', 'contIndex']) result = alu_resource.import_data(dataset, dry_run=True) print(result.has_errors()) result = alu_resource.import_data(dataset, dry_run=False) dataset = AluResource().export() print(dataset.csv)
import tablib from import_export import resources from import_export.admin import ImportExportModelAdmin from studio.models import contamination class con_resource(resources.ModelResource): class Meta: model = contamination con_resource = resources.modelresource_factory(model=contamination)() dataset = tablib.dataset('experiment.csv', headers=False) result = con_resource.import_data(dataset, dry_run=False)