def RequestImport(request): if request.method == 'POST': request_resource = RequestResource() dataset = Dataset() new_requests = request.FILES['request_file'] imported_data = dataset.load(new_requests.read()) result = request_resource.import_data(dataset, dry_run=True) if not result.has_errors(): request_resource.import_data(dataset, dry_run=True) return render(request, 'user_console/import_requests.html')
def bulk_import_daily_values(resource_cls: DailyValueResource.__class__, query): ds = Dataset() ds.dict = query resource = resource_cls() result = resource.import_data(ds, dry_run=False) if result.has_errors(): logger.error("Import failed. Showing first 10 errors.") for row in result[:10]: for error in row.errors: logger.error(error.error) else: logger.info("Import success! :: %s", str(result.totals))
def main(argv): dataset = Dataset(argv.data) data = tablib.Dataset(headers=['Question', 'Answer']) for q, a in dataset: data.append((q, a)) if argv.format == 'json': with open(argv.output, 'w') as f: json.dump(data.export('json'), f, indent=4) else: with open(argv.output, 'wb') as f: f.write(data.export('xls'))
def __init__(self, target, thread=100, path=None, format='csv'): Module.__init__(self) self.subdomains = set() self.module = 'Check' self.source = 'Takeover' self.target = target self.thread = thread self.path = path self.format = format self.fingerprints = None self.subdomainq = Queue() self.cnames = list() self.results = Dataset()
def clean_csv_file(self, *args, **kwargs): csv_file = self.cleaned_data['csv_file'] csv_file.seek(0) dataset = Dataset().load(csv_file.read().decode('utf-8'), format='csv') for idx, row in enumerate(dataset, start=2): try: self.importer.validate_row(row) except ValidationError as e: raise forms.ValidationError('Line {}: {}'.format( idx, '\n'.join(e.messages))) return csv_file
def inventoris_upload(request): if request.method == 'POST': inventori_resource = InventoriResource() dataset = Dataset() new_inventoris = request.FILES['myfile'] dataset.load(new_inventoris.read()) result = inventori_resource.import_data(dataset, dry_run=True) if not result.has_errors(): inventori_resource.import_data(dataset, dry_run=False) return render(request, 'templates/import.html')
def create_consumption_data(self, path, file): contents = self.open_csv_file(path, file) dataset = Dataset().load(contents) with transaction.atomic(): for datetime, consumption in dataset: user_id = file.replace('.csv', '') user = UserData.objects.get(user_id=user_id) if user: Consumption.objects.create( user=user, datetime=datetime, consumption=int(float(consumption)), )
def to_dataset(observations: AnyObservations) -> Dataset: """Convert observations to a generic tabular dataset. This can be converted to any of the `formats supported by tablib <https://tablib.readthedocs.io/en/stable/formats>`_. """ if isinstance(observations, Dataset): return observations flat_observations = flatten_observations(observations, flatten_lists=True) dataset = Dataset() headers, flat_observations = _fix_dimensions(flat_observations) dataset.headers = headers dataset.extend([item.values() for item in flat_observations]) return dataset
def simple_upload(request): if request.method == 'POST': umuryango_resource = UmuryangoResource() dataset = Dataset() new_umuryangos = request.FILES['myfile'] imported_data = dataset.load(new_umuryangos.read()) result = umuryango_resource.import_data(dataset, dry_run=True) if not result.has_errors(): umuryango_resource.import_data(dataset, dry_run=False) return render(request, 'core/import.html')
def simple_upload(request): if request.method == 'POST': resource = EmployeeResource() dataset = Dataset() new_employees = request.FILES['myfile'] imported_data = dataset.load(new_employees.read()) result = resource.import_data(dataset, dry_run=True) # Test the data import if not result.has_errors(): resource.import_data(dataset, dry_run=False) # Actually import now return redirect('/')
def onExport(self): try: data = Dataset(*self._items, **{'headers': self._titleLine}) fileName = time.strftime('%m-%d-%H_%M_%S', time.localtime()) + '-performance.xls' with open(fileName, 'wb') as (f): f.write(data.export('xls')) QMessageBox.information(self, '导出成功!', 'Excel文件名为' + fileName) except Exception as err: try: QMessageBox.warning(self, '导出异常!', str(err)) finally: err = None del err
def simple_upload(request): if request.method == 'POST': person_resource = Camerapost() dataset = Dataset() new_persons = request.FILES['myfile'] imported_data = dataset.load(new_persons.read()) result = person_resource.import_data(dataset, dry_run=True) # Test the data import if not result.has_errors(): person_resource.import_data(imported_data, dry_run=False) # Actually import now return render(request, 'upload_form.html')گ
def _import_file(fpath, resource_class, do_raise=True): try: log.info(_("Importing file {}.").format(fpath)) with open(fpath, "r") as json_file: data = Dataset().load(json_file.read(), format="json") resource = resource_class() log.info( _("...Importing resource {}.").format( resource.__class__.__name__)) return resource.import_data(data, raise_errors=do_raise) except AttributeError: log.error(_("FAILURE importing file {}!").format(fpath)) raise
def simple_upload(request): if request.method == 'POST': sender_resource = SenderResource() dataset = Dataset() new_senders = request.FILES['csvfile'] imported_data = dataset.load(new_senders.read()) result = sender_resource.import_data(dataset, dry_run=True) if not result.has_errors(): sender_resource.import_data(dataset, dry_run=False) return render(request, 'core/simple_upload.html')
def product_mst_upload(request): if request.method == 'POST': prodmst_resource = prodMstResource() dataset = Dataset() uploaded_file = request.FILES['myfile'] decoded_data = uploaded_file.read().decode('UTF-8') io_string = io.StringIO(decoded_data) next(io_string) for column in csv.reader(io_string, delimiter=',', quotechar="|"): created = prod_mst.objects.create( Product_Code = column[0] ) return render(request, 'core/simple_upload.html')
def simple_upload(request): if request.method == 'POST': person_resource = PersonResource() dataset = Dataset() new_persons = request.FILES['myfile'] imported_data = dataset.load(new_persons.read()) result = person_resource.import_data(dataset, dry_run=True) if not result.has_errors(): person_resource.import_data(dataset, dry_run=False) return render(request, 'simpleapp/import.html')
def simple_upload(request): if request.method == 'POST': organization_resource = OrganizationResource() dataset = Dataset() new_orgs = request.FILES['myfile'] imported_data = dataset.load(new_orgs.read()) result = organization_resource.import_data(dataset, dry_run=True) # Test the data import if not result.has_errors(): organization_resource.import_data(dataset, dry_run=False) # Actually import now return render(request, 'core/simple_upload.html')
def simple_upload(request): html = TemplateResponse(request, 'upload_category.html') if request.method == 'POST': category_resource = CategoryResource dataset = Dataset() new_categories = request.FILES['myfile'] imported_data = dataset.load(new_categories.read()) result = category_resource.import_data(imported_data, dry_run=True) if not result.has_errors(): category_resource.import_data(imported_data, dry_run=False) return HttpResponse(html.render())
def simple_upload(request): if request.method == 'POST': #Define time to calculate execution time start_time = time() dataset = Dataset() file = request.FILES['myfile'] if file.name.endswith('.xlsx'): imported_data = dataset.load(file.read(), format='xlsx') elif file.name.endswith('.xls'): imported_data = dataset.load(file.read(), format='xls') elif file.name.endswith('.csv'): imported_data = dataset.load(file.read().decode('utf-8'), format='csv') else: return render( request, 'create-student.html', { "error": "Accpeted file extentions : (xlsx),(xls),(csv)", "data_sources": Data_source.objects.all() }) number_of_records = 0 for data in imported_data: number_of_records += 1 if data[7] == "yes": it_background = 1 elif data[7] == "no": it_background = 0 value = Student(name=data[0], number=data[1], email=data[2], linkedin=data[3], github=data[4], gender=data[5], education_level=data[6], it_background=it_background, address=data[8], data_source_id=request.POST.get("data_source")) value.save() #save to activity log data_source = value.data_source.name execution_time = ((time() - start_time)) job = ActivityLog(file_name=file.name, execution_time_in_seconds=execution_time, data_source_name=data_source, number_of_records=number_of_records) job.save() return redirect("/show")
def fsapl(request): user = request.user data_1 = Input.objects.filter(user=user) error = '' for i in data_1: pid = i.project id = request.GET['id'] data = FSAPL.objects.filter(project=id) if request.method == 'POST': input_resource = FSAPLResource() dataset = Dataset() new_input = request.FILES[ 'myFile'] if 'myFile' in request.FILES else None if new_input is None: error = 'Please choose file!' else: imported_data = dataset.load(new_input.read()) for i in imported_data['project']: vari = i break any_data = FSAPL.objects.filter(project=vari) if not any_data: result = input_resource.import_data( dataset, dry_run=True) # Test the data import if result.has_errors(): error = "Invalid Input Data!" if not result.has_errors(): input_resource.import_data( dataset, dry_run=False) # Actually import now return redirect(user_details) else: error = 'Invalid Input Data!' # data = FSAPL.objects.all() # for i in data: # obj = YOYGrowth() # obj.project = i.project # obj.report = i.report # obj.year_neg_1 = i.yoy_growth_neg_1 # obj.year_0 = i.yoy_growth_0 # obj.year_pos_1 = i.yoy_growth_pos_1 # obj.year_pos_2 = i.yoy_growth_pos_2 # obj.year_pos_3 = i.yoy_growth_pos_3 # obj.year_pos_4 = i.yoy_growth_pos_4 # obj.year_pos_5 = i.yoy_growth_pos_5 # obj.save() return render(request, 'fsapl.html', { 'data': data, 'pid': pid, 'error': error })
def analyze(): name = request.args.get("myselect") option_var = request.args.get("myoption") if name not in header: flash("please select the header and option") return redirect("/") elif option_var not in option: flash("option not found ") return redirect("/") df = pandas.read_csv(file_name) try: if option_var == "mean": set_data = df[name].mean() elif option_var == "sum": set_data = df[name].sum() elif option_var == "max": set_data = df[name].max() elif option_var == "count": set_data = df[name].count() elif option_var == "std": set_data = df[name].std() elif option_var == "var": set_data = df[name].var() elif option_var == "min": set_data = df[name].min() except: flash("pleas make sure use the option with the valid header you can't use some option with string value !") return redirect("/") imported_data = Dataset().load(open(file_name).read()) data = imported_data[name] new_list = [] for d in data: try: if d.isdigit(): new_list.append(int(d)) elif type(d) == str: new_list.append(float(d)) except: flash("this option only for Number") return redirect("/") graph = pygal.Line() graph.title = "Full customization option For " + str(name) graph.x_labels = [] graph.add(name, new_list) graph_data = graph.render_data_uri() return render_template("analyze.html", set_data=set_data, option_var=option_var, name=name, graph_data=graph_data)
def exportDivSchedulesRefFormat(self, startgameday, prefix=""): headers = [ 'Game#', 'Game#', 'Tourn Match#', 'Date', 'Day', 'Time', 'Division', 'Round', 'Home', 'Visitor', 'Field', 'cr_trust', 'ar_trust', 'm_trust' ] datasheet = Dataset(title=prefix) datasheet.headers = list(headers) schedule_list = self.dbinterface.findDivisionSchedulePHMSARefFormat( startgameday) tabformat_list = [ (_offset + x[match_id_CONST], x[match_id_CONST], tournMapGamedayIdToCalendar(x[gameday_id_CONST]), tournMapGamedayIdToDate(x[gameday_id_CONST]), datetime.strptime(x[start_time_CONST], "%H:%M").strftime("%I:%M %p"), x[age_CONST] + x[gen_CONST], x[round_CONST], x[home_CONST], x[away_CONST], self.fieldinfo[self.findexerGet(x[venue_CONST])]['name'], _reftrust_level[_rindexerGet( getTournDivID(x[age_CONST], x[gen_CONST]))]['cr'], _reftrust_level[_rindexerGet( getTournDivID(x[age_CONST], x[gen_CONST]))]['ar'], _reftrust_level[_rindexerGet( getTournDivID(x[age_CONST], x[gen_CONST]))]['ment']) for x in schedule_list ] if prefix else [ (mapGamedayIdToCalendar(x[gameday_id_CONST], format=1), 'Saturday', datetime.strptime(x[start_time_CONST], "%H:%M").strftime("%I:%M %p"), x[age_CONST] + x[gen_CONST], x[home_CONST], x[away_CONST], self.fieldinfo[self.findexerGet(x[venue_CONST])]['name']) for x in schedule_list ] if prefix: atabformat_list = [ (_offset + i, j[0], j[1], j[2], j[3], j[4], j[5], j[6], j[7], j[8], j[9], j[10], j[11], j[12]) for i, j in enumerate(tabformat_list) ] else: atabformat_list = tabformat_list for tabformat in atabformat_list: datasheet.append(tabformat) sheet_xls_relpath = prefix + '_RefFormat.xls' sheet_xls_abspath = os.path.join( '/home/henry/workspace/datagraph/bottle_baseball/download/xls', sheet_xls_relpath) with open(sheet_xls_abspath, 'wb') as f: f.write(datasheet.xls) f.close()
def su(request): if request.method == 'POST': person_resource = PersonResource() dataset = Dataset() new_persons = request.FILES['login'] imported_data = dataset.load(new_persons.read(), format='xls') # print(imported_data) for data in imported_data: print(data) value = Login( id=int(data[0]), loginid=int(data[1]), loginname=data[2], password=int(data[3]), ) value.save() person_resource = PersonResource() dataset = Dataset() new_persons = request.FILES['product'] imported_data = dataset.load(new_persons.read(), format='xls') # print(imported_data) for data in imported_data: print(data) value = product(id=int(data[0]), pname=data[1], decs=data[2], date=data[3], ca=data[4], sca=data[5], price=int(data[6]), image=data[7]) value.save() return render(request, 'shop/su.html')
def vader_analyse(file_input): """Labels the dataset with vader sentiment tool""" sentences = getdata_from_db(1000) print("Working on %d tweets" % (len(sentences))) headers = ('text', 'label', 'score') analyzed_data = [] sid = SentimentIntensityAnalyzer() for line in sentences: text = pre.clean(line) scores = sid.polarity_scores(text) analyzed_data.append((text, getlabel(scores), scores['compound'])) save_data_to_db(analyzed_data) analyzed = Dataset(*analyzed_data, headers=headers) return analyzed
def to_xls(root: Path, output_file: Path): data = Dataset() data.title = f"{root.name} CMS" data.headers = ['name', 'de', 'en', 'fr', 'it', 'uri'] rows = to_dict_table(collect_all(root)) for row in to_row_tuples(rows): data.append(row) if output_file is None: output_file = Path.cwd() / 'output.xls' with open(output_file, 'wb') as out: out.write(data.export('xls'))
def upload(request, id=None): data_source_name = '' if request.method == 'POST': start_time = time.time() # Take data_source Id From select id_source_selected = request.POST['data_source'] # data_resource = DataResources() dataset = Dataset() if not request.FILES: return redirect('/manage_store') new_data = request.FILES['myfile'] # To Get File Name file_name = request.FILES['myfile'].name if not new_data.name.endswith('xls') and not new_data.name.endswith( 'xlsx'): messages.info(request, 'wrong format') return redirect('/manage_store') if new_data.name.endswith('xls'): import_data = dataset.load(new_data.read(), format='xls') elif new_data.name.endswith('xlsx'): import_data = dataset.load(new_data.read(), format='xlsx') for p in Data_source.objects.raw( 'SELECT * FROM managestore_data_source where id= %s' % id_source_selected): data_source_name = p.name if not Manage_store.objects.all().filter(file_name=file_name).exists(): ManageStoreId = upload_manage_source(data_source_name, file_name, 0, 0, id_source_selected) else: return redirect('/manage_store') for data in import_data: value = Data(data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12], data[13], ManageStoreId) value.save() excution_time_to_upload_sheet = time.time() - start_time Manage_store.objects.all().filter(id=ManageStoreId).update( execution_time=excution_time_to_upload_sheet, number_of_records=len( Data.objects.all().filter(manage_store_id=ManageStoreId))) data = Data.objects.all() read_data_source = Data_source.objects.all() return render( request, 'app/manage_store.html', { 'Data': data, 'read_data_source': read_data_source, 'data_source_name': data_source_name, 'manageStore': Manage_store.objects.all(), 'manageStoreId': Manage_store.objects.all().filter(id=id) })
def vv(request,res): dataset = Dataset() new = request.FILES['myfile'] imported_data = dataset.load(new.read()) result = res.import_data(dataset, dry_run=True) # Test the data import if not result.has_errors(): value=True res.import_data(dataset, dry_run=False) # Actually import now else: value=False return value
def upload_file(request): if request.method == 'POST': brand_resource = BrandResource() new_brands = request.FILES['myfile'] print(f"file: {new_brands}") imported_data = Dataset().load(new_brands.read().decode('UTF-8'), format='csv') print(f"data: {imported_data}") result = brand_resource.import_data(imported_data, dry_run=True) if not result.has_errors(): brand_resource.import_data(imported_data, dry_run=False) return render(request, 'pages/upload_file.html')
def post_import(request): if request.method == 'POST': post_resource = PostResource() dataset = Dataset() new_posts = request.FILES['myfile'] imported_data = dataset.load(new_posts.read()) result = post_resource.import_data( dataset, dry_run=True) # Test the data import if not result.has_errors(): post_resource.import_data(dataset, dry_run=False) # Actually import now return render(request, 'blog/post_import.html')
def test_logentry_creation_with_import_obj_exception(self): # from https://mail.python.org/pipermail/python-dev/2008-January/076194.html def monkeypatch_method(cls): def decorator(func): setattr(cls, func.__name__, func) return func return decorator # Cause an exception in import_row, but only after import is confirmed, # so a failure only occurs when ImportMixin.process_import is called. class R(BookResource): def import_obj(self, obj, data, dry_run): if dry_run: super().import_obj(obj, data, dry_run) else: raise Exception @monkeypatch_method(BookAdmin) def get_resource_class(self): return R # Verify that when an exception occurs in import_row, when raise_errors is False, # the returned row result has a correct import_type value, # so generating log entries does not fail. @monkeypatch_method(BookAdmin) def process_dataset(self, dataset, confirm_form, request, *args, **kwargs): resource = self.get_import_resource_class()(**self.get_import_resource_kwargs(request, *args, **kwargs)) return resource.import_data(dataset, dry_run=False, raise_errors=False, file_name=confirm_form.cleaned_data['original_file_name'], user=request.user, **kwargs) dataset = Dataset(headers=["id","name","author_email"]) dataset.append([1, "Test 1", "*****@*****.**"]) input_format = '0' content = dataset.csv f = SimpleUploadedFile("data.csv", content.encode(), content_type="text/csv") data = { "input_format": input_format, "import_file": f, } response = self.client.post('/admin/core/book/import/', data) self.assertEqual(response.status_code, 200) confirm_form = response.context['confirm_form'] data = confirm_form.initial response = self.client.post('/admin/core/book/process_import/', data, follow=True) self.assertEqual(response.status_code, 200)