def download_raw_extracted_records(request, pk): datagroup = DataGroup.objects.get(pk=pk) et = ExtractedText.objects.filter( data_document__data_group=datagroup).first() columnlist = [ "extracted_text_id", "id", "raw_cas", "raw_chem_name", "raw_min_comp", "raw_central_comp", "raw_max_comp", "unit_type__title", ] if et: qs = ExtractedChemical.objects.filter( extracted_text__data_document__data_group=datagroup).values( *columnlist) return render_to_csv_response( qs, filename=(datagroup.get_name_as_slug() + "_raw_extracted_records.csv"), field_header_map={"id": "ExtractedChemical_id"}, use_verbose_names=False, ) else: qs = ExtractedChemical.objects.filter( extracted_text__data_document__id=pk).values(*columnlist) return render_to_csv_response(qs, filename="raw_extracted_records.csv", use_verbose_names=False)
def data_group_registered_records_csv(request, pk): columnlist = ['filename','title','document_type','url','organization'] dg = DataGroup.objects.filter(pk=pk).first() if dg: columnlist.insert(0, "id") qs = DataDocument.objects.filter(data_group_id=pk).values(*columnlist) return render_to_csv_response(qs, filename=(dg.get_name_as_slug() + "_registered_records.csv"), field_header_map={"id": "DataDocument_id"}, use_verbose_names=False) else: qs = DataDocument.objects.filter(data_group_id=0).values(*columnlist) return render_to_csv_response(qs, filename="registered_records.csv", use_verbose_names=False)
def download_registry(request): csv_export = contacts.objects.values('First_Name', 'Middle_Name', 'Last_Name', 'Date_of_Birth', 'Gender', 'Island', 'Date_of_Baptism', 'Province', 'Congregation', 'Status', 'Comments') return djqscsv.render_to_csv_response(csv_export)
def list(self, request, *args, **kwargs): """ Patient List `without_facility` accepts boolean - default is false - if true: shows only patients without a facility mapped if false (default behaviour): shows only patients with a facility mapped `disease_status` accepts - string and int - SUSPECTED = 1 POSITIVE = 2 NEGATIVE = 3 RECOVERY = 4 RECOVERED = 5 EXPIRED = 6 """ if settings.CSV_REQUEST_PARAMETER in request.GET: queryset = self.filter_queryset(self.get_queryset()).values( *PatientRegistration.CSV_MAPPING.keys()) return render_to_csv_response( queryset, field_header_map=PatientRegistration.CSV_MAPPING, field_serializer_map=PatientRegistration.CSV_MAKE_PRETTY, ) return super(PatientViewSet, self).list(request, *args, **kwargs)
def dg_raw_extracted_records(request, pk): columnlist = ['extracted_text_id','id','raw_cas','raw_chem_name','raw_min_comp','raw_central_comp','raw_max_comp','unit_type__title'] dg = DataGroup.objects.get(pk=pk) et = ExtractedText.objects.filter(data_document__data_group = dg).first() if et: dg_name = dg.get_name_as_slug() qs = ExtractedChemical.objects.filter(extracted_text__data_document__data_group_id=pk).values(*columnlist) #print('Writing %s records to csv' % len(qs) ) return render_to_csv_response(qs, filename=(dg_name + "_raw_extracted_records.csv"), field_header_map={"id": "ExtractedChemical_id"}, use_verbose_names=False) else: qs = ExtractedChemical.objects.filter(extracted_text__data_document__id=pk).values(*columnlist) return render_to_csv_response(qs, filename='raw_extracted_records.csv' , use_verbose_names=False)
def list(self, request, *args, **kwargs): """ Facility List Supported filters - `name` - supports for ilike match - `facility_type` - ID - `district` - ID - `district_name` - supports for ilike match - `local_body` - ID - `local_body_name` - supports for ilike match - `state_body` - ID - `state_body_name` - supports for ilike match Other query params - `all` - bool. Returns all facilities with a limited dataset, accessible to all users. - `search_text` - string. Searches across name, district name and state name. """ if settings.CSV_REQUEST_PARAMETER in request.GET: mapping = Facility.CSV_MAPPING.copy() pretty_mapping = Facility.CSV_MAKE_PRETTY.copy() if self.FACILITY_CAPACITY_CSV_KEY in request.GET: mapping.update(FacilityCapacity.CSV_RELATED_MAPPING.copy()) pretty_mapping.update(FacilityCapacity.CSV_MAKE_PRETTY.copy()) elif self.FACILITY_DOCTORS_CSV_KEY in request.GET: mapping.update(HospitalDoctors.CSV_RELATED_MAPPING.copy()) pretty_mapping.update(HospitalDoctors.CSV_MAKE_PRETTY.copy()) elif self.FACILITY_TRIAGE_CSV_KEY in request.GET: mapping.update(FacilityPatientStatsHistory.CSV_RELATED_MAPPING.copy()) pretty_mapping.update(FacilityPatientStatsHistory.CSV_MAKE_PRETTY.copy()) queryset = self.filter_queryset(self.get_queryset()).values(*mapping.keys()) return render_to_csv_response(queryset, field_header_map=mapping, field_serializer_map=pretty_mapping) return super(FacilityViewSet, self).list(request, *args, **kwargs)
def format_codes_csv(query_set): """ Given a QuerySet of DownloadCode objects, format it as a CSV file. Returns an HttpResponse object containing a CSV file as an attachment. """ header_map = { 'id': 'download_code', 'created_date': 'code_created_date', 'batch__work__artist__name': 'artist', 'batch__work__title': 'title', 'batch__label': 'batch_label', 'batch__created_date': 'batch_created_date', 'batch__private_note': 'batch_private_note', 'batch__id': 'batch_id', 'batch__work__artist__id': 'artist_id', 'batch__work__id': 'work_id', } serializer_map = { 'created_date': (lambda x: x.strftime('%Y/%m/%d')), 'batch__created_date': (lambda x: x.strftime('%Y/%m/%d')), } return render_to_csv_response(query_set.values( 'id', 'created_date', 'batch__work__artist__name', 'batch__work__title', 'max_uses', 'times_used', 'last_used_date', 'batch__label', 'batch__private_note', 'batch__created_date', 'batch__id', 'batch__work__artist__id', 'batch__work__id'), field_header_map=header_map, field_serializer_map=serializer_map)
def list(self, request, *args, **kwargs): """ Facility List Supported filters - `name` - supports for ilike match - `facility_type` - ID - `district` - ID - `district_name` - supports for ilike match - `local_body` - ID - `local_body_name` - supports for ilike match - `state_body` - ID - `state_body_name` - supports for ilike match Other query params - `all` - bool. Returns all facilities with a limited dataset, accessible to all users. - `search_text` - string. Searches across name, district name and state name. """ if settings.CSV_REQUEST_PARAMETER in request.GET: queryset = self.filter_queryset( self.get_queryset()).values(*Facility.CSV_MAPPING.keys()) return render_to_csv_response( queryset, field_header_map=Facility.CSV_MAPPING, field_serializer_map=Facility.CSV_MAKE_PRETTY) return super(FacilityViewSet, self).list(request, *args, **kwargs)
def get(self, *args, **kwargs): form_state = kwargs.get('state') tally_id = kwargs.get('tally_id') if form_state: if form_state == ALL: form_list = ResultForm.objects.filter(tally__id=tally_id) else: form_state = FormState[form_state.upper()] form_list = ResultForm.forms_in_state(form_state.value, tally_id=tally_id) form_list = form_list.values( 'barcode', 'form_state', 'gender', 'station_number', 'center__sub_constituency__code', 'center__code', 'ballot__race_type').order_by('barcode') return render_to_csv_response(form_list) return self.render_to_response( self.get_context_data(header_text=_('Form List'), remote_url=reverse( 'form-list-data', kwargs={'tally_id': tally_id}), tally_id=tally_id, show_create_form_button=True))
def download_datadocuments(request, pk): datagroup = DataGroup.objects.get(pk=pk) documents = DataDocument.objects.filter(data_group=datagroup) filename = datagroup.get_name_as_slug() + "_documents.csv" return render_to_csv_response(documents, filename=filename, append_datestamp=True)
def subscribers(request): blog = get_object_or_404(Blog, user=request.user) if not resolve_subdomain(request.META['HTTP_HOST'], blog): return redirect(f"{blog.useful_domain()}/dashboard") if request.GET.get("delete", ""): Subscriber.objects.filter(blog=blog, pk=request.GET.get("delete", "")).delete() subscribers = Subscriber.objects.filter(blog=blog) if request.GET.get("export", ""): subscribers = subscribers.values('email_address', 'subscribed_date') return djqscsv.render_to_csv_response(subscribers) if request.POST.get("email_addresses", ""): email_addresses = re.findall( r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", request.POST.get("email_addresses", "")) for email in email_addresses: Subscriber.objects.get_or_create(blog=blog, email_address=email) return render(request, "dashboard/subscribers.html", { "blog": blog, "subscribers": subscribers, })
def get(self, request, **kwargs): # check for format query key in url (my/url/?format=csv) self.format = request.GET.get('format', False) if self.format == 'csv': return render_to_csv_response(self.get_queryset()) return super(SampleHomeView, self).get(request, **kwargs)
def get_profile_file(request): if request.user.is_superuser: queryset = Profile.objects.all().values('user__username', 'name', 'batch', 'user__email', 'phone', 'college', 'profession', 'linkedin', 'github', 'okr', 'points', 'stars').order_by('name') return render_to_csv_response(queryset, filename='Sushiksha-Profiles' + str(datetime.date.today()), field_header_map={ 'user__username': '******', 'name': 'Name', 'batch': 'batch', 'user__email': 'email', 'phone': 'phone number', 'college': 'college', 'profession': 'profession', 'linkedin': 'linked in', 'github': 'github', 'okr': 'OKR', 'points': 'Total Points', 'stars': 'Stars' })
def get(self, *args, **kwargs): format_ = kwargs.get('format') if format_ == 'csv': form_list = ResultForm.forms_in_state(FormState.UNSUBMITTED).\ values('id', 'created_date', 'modified_date', 'ballot_id__number', 'center_id__code', 'user_id__username', 'created_user_id__username', 'audited_count', 'barcode', 'date_seen', 'form_stamped', 'form_state', 'gender', 'name', 'office_id__number', 'rejected_count', 'serial_number', 'skip_quarantine_checks', 'station_number', 'is_replacement', 'intake_printed', 'clearance_printed') return render_to_csv_response(form_list) return self.render_to_response( self.get_context_data(header_text=_('Forms Not Received'), custom=True, remote_url='form-not-received-data'))
def export_csv(request, app, model): try: model = apps.get_model(app, model) except (AttributeError, LookupError) as e: raise Http404(str(e)) kwargs = request.GET.dict() q = kwargs.pop('q', '') objects_list = model.objects.all() if kwargs: objects_list = model.objects.filter(**kwargs) # Using one of Django's private API's (_registry). May break with updates. model_admin = accession_admin._registry[model] results, _ = model_admin.get_search_results(request, objects_list, q) # Retrieve the configuration object for the given model. csv_config = get_csv_config(model) # Use the fields list from the configuration object to filter the fields # that are shown in the queryset. results = results.values(*csv_config.fields) # Use the two dicts from the configuration object to define new columns in # the csv doc and rename the fields to be more readable. return render_to_csv_response( results, field_header_map=csv_config.header_map, field_serializer_map=csv_config.serializer_map, streaming=False, )
def get(self, *args, **kwargs): tally_id = kwargs.get('tally_id') format_ = kwargs.get('format') if format_ and format_ == 'csv': station_list = Station.objects.filter(center__tally__id=tally_id) station_list = station_list.values( 'center__office__name', 'sub_constituency__code', 'center__name', 'center__code', 'station_number', 'gender', 'registrants', 'percent_received', 'percent_archived', ).order_by('center__code') header_map = { 'center__office__name': 'office name', 'sub_constituency__code': 'subconstituency code', 'center__name': 'center name', 'center__code': 'center code', } return render_to_csv_response(station_list, filename='centers_and_station', append_datestamp=True, field_header_map=header_map) return self.render_to_response( self.get_context_data(remote_url=reverse('center-list-data', kwargs=kwargs), tally_id=tally_id))
def get(self, *args, **kwargs): tally_id = kwargs.get('tally_id') format_ = kwargs.get('format') if format_ and format_ == 'csv': station_list = Station.objects.filter(center__tally__id=tally_id) station_list = station_list.values( 'center__office__name', 'sub_constituency__code', 'center__name', 'center__code', 'station_number', 'gender', 'registrants', 'percent_received', 'percent_archived', ).order_by('center__code') header_map = { 'center__office__name': 'office name', 'sub_constituency__code': 'subconstituency code', 'center__name': 'center name', 'center__code': 'center code', } return render_to_csv_response(station_list, filename='centers_and_station', append_datestamp=True, field_header_map=header_map) return self.render_to_response(self.get_context_data( remote_url=reverse('center-list-data', kwargs=kwargs), tally_id=tally_id))
def download(self, request, *args, **kwargs): project_id = self.kwargs['pk'] items = GeoSearch.objects.filter(project__id=project_id) qs = GeoSearch.objects.filter(project__id=project_id).values('name', 'address', 'lat', 'lng', 'status') return render_to_csv_response(qs)
def Api_Pixel_Data(request, format, pk, data_model, start_day, start_month, start_year, final_day, final_month, final_year): startDate = datetime.date(start_year, start_month, start_day) finalDate = datetime.date(final_year, final_month, final_day) try: pixel = Pixel.objects.get(pk=pk) data = pixel.pixel_data.filter(Q(data_model__name='Histórico, 5Km') | Q(data_model__name=data_model), date__gte=startDate, date__lte=finalDate).order_by('date') except Pixel.DoesNotExist: return JsonResponse({'mensagem': 'Pixel não encontrado.'}, status=404) except PixelData.DoesNotExist: return JsonResponse({'mensagem': 'PixelData não encontrado.'}, status=404) if (format == 'json'): queryset_serialized = serializers.serialize('json', data) response = HttpResponse(queryset_serialized, content_type="application/json") return response elif format == 'csv': qs_csv = data.values() return render_to_csv_response(qs_csv)
def download_excel(requst): data = forensics.objects.all() print(type(data)) data = data.values("id", "title", "venue", "year", "citation", "abstract", "url") print(type(data)) return render_to_csv_response(data, filename="table_download.csv")
def export(request, suc_id): sucursal_id = Utilidades().validarIngresoNum(suc_id) detalle_productos_sucursal = DetalleSucursalAlmacen.objects.filter( sucursal_id=sucursal_id) data = detalle_productos_sucursal.values( 'id', 'producto_id__tipo_producto__nombre', 'producto_id__marca__nombre', 'producto_id__codigo', 'producto_id__color', 'stock', 'producto_id__precio_x_menor', 'producto_id__precio_x_mayor') #field_header_map={'producto_id__tipo_producto__nombre': 'TIPO','producto_id__marca__nombre':'MARCA','producto_id__codigo': 'MODELO' , 'producto_id__color':'COLOR', 'producto_id__precio_x_menor': 'PRECIO por Menor' , 'producto_id__precio_x_mayor': 'Precio por Mayor'} # qs = Producto.objects.all() return djqscsv.render_to_csv_response( data, field_header_map={ 'producto_id__tipo_producto__nombre': 'TIPO', 'producto_id__marca__nombre': 'MARCA', 'producto_id__codigo': 'MODELO', 'producto_id__color': 'COLOR', 'producto_id__precio_x_menor': 'PRECIO por Menor', 'producto_id__precio_x_mayor': 'Precio por Mayor' })
def sample_lead_file(request): _export_leads = Lead.objects.values( 'domain__id', 'domain__domain_common', 'domain__ssl_expire', 'domain__ssl_issuer_name__name', 'domain__ssl_url', 'domain__domain_expire', 'domain__domain_registrar', 'domain__site_ip') return djqscsv.render_to_csv_response(_export_leads)
def asset_list_filter(request): if request.GET: filter = AssetFilter(request.GET, queryset=Asset.objects.all()) makecsv = request.GET.get("makecsv") if makecsv == "1": # User selected CSV output on form output = filter.qs.values("asset_id", "amrc_equipment_id", "asset_status__status_name", "asset_description", "asset_details", "asset_manufacturer", "asset_model", "asset_serial_number", "person_responsible", "person_responsible_email", "amrc_group_responsible__group_name", "requires_insurance", "requires_unforseen_damage_insurance", "asset_value", "charge_out_rate", "charge_code", "purchase_order_ref", "grn_id", "funded_by", "acquired_on", "disposal_date", "disposal_method", "dispatch_note_id", "requires_safety_checks", "safety_notes", "requires_environmental_checks", "environmental_aspects__aspect", "environmental_notes", "emergency_response_information", "requires_planned_maintenance", "maintenance_records", "maintenance_notes", "requires_calibration", "calibration_frequency", "passed_calibration", "calibration_date_prev", "calibration_date_next", "calibration_status__status_name", "calibration_type", "asset_location_building__building_name", "asset_location_room", "edited_by__username", "edited_on") return render_to_csv_response(output, filename="Custom_Filtered_Assets_{}.csv".format(str(timezone.now().date()))) else: # User selected Website output on form number = len(filter.qs) if not number: number = "0" return render(request, "assetregister/asset_list_filtered.html", {"filter": filter, "number": number}) else: # This is a bit hacky, but should work basically forever filter = AssetFilter(request.GET, queryset=Asset.objects.filter(asset_status="99999")) return render(request, "assetregister/asset_list_filtered.html", {"filter": filter})
def okr_weekly(request): if request.POST: form = RangeRequestForm(request.POST) if form.is_valid(): beginning = form.cleaned_data['beginning'] end = form.cleaned_data['end'] queryset = Entry.objects.filter( date_time__gt=beginning, date_time__lte=end).values( 'user__username', 'user__profile__name', 'user__profile__batch', 'key_result__objective', 'key_result__key_result', 'update', 'date_time', 'time_spent').order_by('user__username') return render_to_csv_response( queryset, filename='Sushiksha-OKR' + str(datetime.date.today()), field_header_map={ 'user__username': '******', 'user__profile__name': 'Name', 'user__profile__batch': 'Batch', 'key_result__objective': 'Objective', 'key_result__key_result': 'KR', 'update': 'Update', 'date_time': "Date and Time", 'time_spent': 'Time Spent' }) else: form = RangeRequestForm() heading = "OKR Data" context = {'form': form, 'heading': heading} return render(request, 'analytics/logs-users.html', context=context)
def upload(request): qs = models.Elements.objects.all() filename = djqscsv.generate_filename(qs, append_datestamp=True) my_default_retry_params = gcs.RetryParams(initial_delay=0.2, max_delay=5.0, backoff_factor=2, max_retry_period=15) gcs.set_default_retry_params(my_default_retry_params) bucket_name = os.environ.get('BUCKET_NAME', app_identity.get_default_gcs_bucket_name()) bucket = '/' + bucket_name file_obj = djqscsv.render_to_csv_response(qs, filename) try: write_retry_params = gcs.RetryParams(backoff_factor=1.1) gcs_file = gcs.open(bucket+'/'+filename, 'w', content_type='text/csv', options={'x-goog-meta-foo': 'foo', 'x-goog-meta-bar': 'bar'}, retry_params=write_retry_params) gcs_file.write(file_obj.content) gcs_file.close() except Exception, e: # pylint: disable=broad-except logging.exception(e)
def qr_location_list(request): if request.GET: filter = QRLocationFilter(request.GET, queryset=QRLocation.objects.all()) makecsv = request.GET.get("makecsv") if makecsv == "1": # User selected CSV output output = filter.qs.values("location_id", "building__building_name", "building__EFM_building_code", "location_room") return render_to_csv_response(output, filename="Custom_Filtered_QR_Locations_{}.csv".format(str(timezone.now().date()))) else: # User selected Website output number = len(filter.qs) if not number: number = "0" # paginator = Paginator(filter.qs, 20) # page = request.GET.get('page') # try: # filter = paginator.page(page) # except PageNotAnInteger: # # If page is not an integer, deliver first page. # filter = paginator.page(1) # except EmptyPage: # # If page is out of range (e.g. 9999), deliver last page of results. # filter = paginator.page(paginator.num_pages) return render(request, "assetregister/location_qr_list_filtered.html", {"filter": filter, "number": number}) else: # This is a bit hacky, but should work basically forever filter = QRLocationFilter(request.GET, queryset=QRLocation.objects.all()) return render(request, "assetregister/location_qr_list_filtered.html", {"filter": filter})
def download_annual_leaves(request): csv_export = NewLeave.objects.values( 'user__first_name', 'user__last_name', 'Leave_type', 'department', 'Start_Date', 'End_Date', 'Total_working_days', 'Reason', 'Manager_Authorization_Status', 'Authorized_by_Manager', 'Director_Authorization_Status', 'Authorized_by_Director') return render_to_csv_response(csv_export)
def rates_csv_view(request, *args, **kwargs): ratecard = kwargs['ratecard'] qs = CustomerRates.objects.values( 'destination', 'prefix', 'rate', 'block_min_duration', 'minimal_time', 'init_block') try: usercompany = Person.objects.get(user=request.user) company = get_object_or_404(Company, name=usercompany.company) rc = CustomerRateCards.objects.filter( company=company.pk)\ .filter(ratecard__enabled=True)\ .order_by('priority') qs = qs.filter(ratecard__pk=ratecard) except Person.DoesNotExist: messages.error(request, _(u"""This user is not linked to a customer !""")) if ratecard and int(ratecard) and ratecard in rc: ratecard = int(ratecard) qs = qs.filter(ratecard__pk=ratecard) else: qs.none() return render_to_csv_response(qs, append_datestamp=True)
def BenchmarkChartView(request): if "btn-other" in request.GET: add_get = "&".join([x+"="+request.GET[x] for x in request.GET.keys() if x != "btn-other"]) return HttpResponseRedirect('/benchmark_table/?'+add_get) filter = BenchmarkFilter(request.GET) if "btn-download" in request.GET: # check for format query key in url (my/url/?format=tsv) download_qs = filter.qs.values('game__title','cpu_model','gpu_model','resolution','game_quality_preset','driver','operating_system','user__username','additional_notes','fps_data') return render_to_csv_response(download_qs, delimiter='\t',filename="") for f in filter.form.fields: filter.form.fields[f].help_text = "" table = BenchmarkChartTable(filter.qs.order_by("-upload_date")) RequestConfig(request).configure(table) context = { 'filter' : filter, 'table' : table, 'chart' : True } return render(request, "benchmark_chart_view.html", context)
def rates_csv_view(request, *args, **kwargs): ratecard = kwargs['ratecard'] qs = CustomerRates.objects.values('destination', 'prefix', 'rate', 'block_min_duration', 'minimal_time', 'init_block') try: usercompany = Person.objects.get(user=request.user) company = get_object_or_404(Company, name=usercompany.company) rc = CustomerRateCards.objects.filter( company=company.pk)\ .filter(ratecard__enabled=True)\ .order_by('priority') qs = qs.filter(ratecard__pk=ratecard) except Person.DoesNotExist: messages.error(request, _(u"""This user is not linked to a customer !""")) if ratecard and int(ratecard) and ratecard in rc: ratecard = int(ratecard) qs = qs.filter(ratecard__pk=ratecard) else: qs.none() return render_to_csv_response(qs, append_datestamp=True)
def csv_export(request): if request.method == 'POST': try: if request.POST['export_password'] == 'evergreen': qs = Attendee.objects.all() return render_to_csv_response(qs) except: pass return render(request, 'event/export.html') # class DataEntryView(generic.ListView): # template_name = 'event/entry_page.html' # context_object_name = 'entry_list' # def get_queryset(self): # # all_joined = Attendance.objects.select_related('attendee').all() # sql_query = """ # select a.id, a.first_name, a.last_name, a.email, b.showed_up # from event_attendee a # left join ( # select * from event_attendance order by created_at desc # ) # b on a.id = b.attendee_id # group by a.id # """ # all_joined = Attendee.objects.raw(sql_query) # return all_joined
def view_export_solved_csv(request, batch_id): from djqscsv import render_to_csv_response model = models.BatchSolved(batch_id) qs = model.objects.all() header = model.get_header_map() return render_to_csv_response(qs, field_header_map=header)
def Csv_view(request): """If user is not authenticated, go back to login. This will return the CSV file with the days of the shift, the next weekday, the user's schedule based on their id. """ if not request.user.is_staff: return redirect(reverse('login')) # each shift of the day of the week is accounted for along with the selected shift day def get_date(shifts): for i in shifts: for keys, vals in i.items(): if vals == 'monday': s_date = next_weekday(datetime.date.today(), 0) i['selected_shift__day'] = s_date if vals == 'tuesday': s_date = next_weekday(datetime.date.today(), 1) i['selected_shift__day'] = s_date if vals == 'wednesday': s_date = next_weekday(datetime.date.today(), 2) i['selected_shift__day'] = s_date if vals == 'thursday': s_date = next_weekday(datetime.date.today(), 3) i['selected_shift__day'] = s_date if vals == 'friday': s_date = next_weekday(datetime.date.today(), 4) i['selected_shift__day'] = s_date if vals == 'saturday': s_date = next_weekday(datetime.date.today(), 5) i['selected_shift__day'] = s_date if vals == 'sunday': s_date = next_weekday(datetime.date.today(), 6) i['selected_shift__day'] = s_date return shifts # this accounts for the next weekday def next_weekday(d, weekday): days_ahead = weekday - d.weekday() if days_ahead <= 0: days_ahead += 7 future_day = d + datetime.timedelta(days_ahead) future_day = future_day.strftime("%m/%d/%Y") return future_day # this will look through the user schedule's values based on their distinct id and return as a csv in this order: subject, start date, start time and end time shifts = User_Schedule.objects.values( 'selected_shift__user_schedule__user__first_name', 'selected_shift__day', 'selected_shift__start_time', 'selected_shift__end_time', ).distinct('id') get_date(shifts) return render_to_csv_response( shifts, field_order={'selected_shift__user_schedule__user__first_name'}, field_header_map={ 'selected_shift__user_schedule__user__first_name': 'Subject', 'selected_shift__day': 'Start Date', 'selected_shift__start_time': 'Start Time', 'selected_shift__end_time': 'End Time' })
def export_csv(self): if (self.model_admin is None) or not hasattr(self.model_admin, 'csv_export_fields'): data = self.queryset.all().values() else: data = self.queryset.all().values( *self.model_admin.csv_export_fields) return render_to_csv_response(data)
def download_sick_leaves(request): csv_export = SickLeave.objects.values( 'user__first_name', 'user__last_name', 'Leave_type', 'department', 'Date_illness_began', 'Date_illness_end', 'Total_working_days', 'Brief_explanation_of_illness', 'Manager_Authorization_Status', 'Authorized_by_Manager', 'Director_Authorization_Status', 'Authorized_by_Director') return render_to_csv_response(csv_export)
def as_csv(cls, request): resource = cls() if not hasattr(resource, "list_qs"): return HttpResponse( "list_qs not implemented for {}".format(cls.__name__)) resource.request = request return render_to_csv_response( resource.list_qs(), field_order=resource.VALUES_ARGS)
def export_all_calibrations(request): export = CalibrationRecord.objects.order_by("-calibration_date").values( "calibration_record_id", "asset", "asset__asset_description", "asset__asset_manufacturer", "calibration_description", "calibration_date", "calibration_date_next", "calibrated_by_internal__username", "calibrated_by_external", "calibration_outcome", "calibration_notes", "calibration_certificate", "calibration_entered_by__username", "calibration_entered_on") return render_to_csv_response(export, filename="All_Calibration_Records_{}.csv".format(str(timezone.now().date())))
def as_csv(self, request): resource = self() if not hasattr(resource, "list_qs"): return HttpResponse("list_qs not implemented for {}".format( self.__name__)) resource.request = request return render_to_csv_response(resource.list_qs(), field_order=resource.VALUES_ARGS)
def export_as_csv(self, request, queryset): queryset = User.objects.filter(is_superuser=False).values( *User.CSV_MAPPING.keys()) return render_to_csv_response( queryset, field_header_map=User.CSV_MAPPING, field_serializer_map=User.CSV_MAKE_PRETTY, )
def calibrated_asset_export_all(request): calibration_export = Asset.objects.filter(requires_calibration=True).order_by("calibration_date_next").values( "asset_id", "amrc_equipment_id", "requires_calibration", "asset_description", "asset_manufacturer", "asset_model", "asset_serial_number", "asset_status__status_name", "calibration_date_prev", "calibration_date_next", "calibration_procedure", "person_responsible", "person_responsible_email", "asset_location_building__building_name", "asset_location_room") return render_to_csv_response(calibration_export, filename="All_Assets_Needing_Calibration_{}.csv".format(str(timezone.now().date())))
def calibration_asset_export_nextmonth(request): plusonemonth = timezone.now() + timedelta(days=30) calibration_export = Asset.objects.filter( requires_calibration=True, calibration_date_next__lte=plusonemonth).order_by("calibration_date_next").values( "asset_id", "amrc_equipment_id", "requires_calibration", "asset_description", "asset_manufacturer", "asset_model", "asset_serial_number", "asset_status__status_name", "calibration_date_prev", "calibration_date_next", "calibration_procedure", "person_responsible", "person_responsible_email", "asset_location_building__building_name", "asset_location_room") return render_to_csv_response(calibration_export, filename="Assets_Due_Calibration_Before_{}.csv".format(str(plusonemonth.date())))
def environmental_export_all(request): export = Asset.objects.filter(requires_environmental_checks=True).order_by("asset_id").values( "asset_id", "requires_environmental_checks", "asset_description", "asset_manufacturer", "asset_model", "asset_serial_number", "asset_status__status_name", "person_responsible", "person_responsible_email", "asset_location_building__building_name", "asset_location_building__EFM_building_code", "asset_location_room", "handling_and_storage_instructions") return render_to_csv_response(export, filename="All_Assets_Needing_Environmental_Checks_{}.csv".format(str(timezone.now().date())))
def export_csv(cls): import djqscsv qs = Feed.objects.filter(num_subscribers__gte=20).values('id', 'feed_title', 'feed_address', 'feed_link', 'num_subscribers') csv = djqscsv.render_to_csv_response(qs).content f = open('feeds.csv', 'w+') f.write(csv) f.close()
def insurance_export_all(request): export = Asset.objects.filter(requires_insurance=True).order_by("asset_id").values( "asset_id", "requires_insurance", "asset_description", "asset_manufacturer", "asset_model", "asset_serial_number", "asset_status__status_name", "asset_value", "purchase_order_ref", "funded_by", "acquired_on", "person_responsible", "person_responsible_email", "asset_location_building__building_name", "asset_location_building__EFM_building_code", "asset_location_room", "handling_and_storage_instructions") return render_to_csv_response(export, filename="All_Assets_Needing_Insurance_{}.csv".format(str(timezone.now().date())))
def export(request , suc_id): sucursal_id = Utilidades().validarIngresoNum(suc_id) detalle_productos_sucursal = DetalleSucursalAlmacen.objects.filter(sucursal_id = sucursal_id) data = detalle_productos_sucursal.values('id','producto_id__tipo_producto__nombre','producto_id__marca__nombre','producto_id__codigo','producto_id__color','stock','producto_id__precio_x_menor','producto_id__precio_x_mayor') #field_header_map={'producto_id__tipo_producto__nombre': 'TIPO','producto_id__marca__nombre':'MARCA','producto_id__codigo': 'MODELO' , 'producto_id__color':'COLOR', 'producto_id__precio_x_menor': 'PRECIO por Menor' , 'producto_id__precio_x_mayor': 'Precio por Mayor'} # qs = Producto.objects.all() return djqscsv.render_to_csv_response(data,field_header_map = {'producto_id__tipo_producto__nombre': 'TIPO','producto_id__marca__nombre':'MARCA','producto_id__codigo': 'MODELO' , 'producto_id__color':'COLOR', 'producto_id__precio_x_menor': 'PRECIO por Menor' , 'producto_id__precio_x_mayor': 'Precio por Mayor'})
def get(self, *args, **kwargs): format_ = kwargs.get('format') if format_ == 'csv': form_list = ResultForm.forms_in_state(FormState.UNSUBMITTED) return render_to_csv_response(form_list) return self.render_to_response( self.get_context_data(header_text=_('Forms Not Received'), custom=True, remote_url='form-not-received-data'))
def get_cust_all(self, request): if "source" in request.GET and "draw" in request.GET and "start" in request.GET and "length" in request.GET: try: size = int(request.GET["length"]) page = int(request.GET["start"]) / size + 1 filename = request.GET["source"].replace(".csv", "").replace("_", "-") cust_set = self.Customer.objects.filter(source=request.GET["source"]) # Handle order if "order[0][column]" in request.GET: order = "-" if "order[0][dir]" in request.GET and request.GET["order[0][dir]"] == "desc" else "" keyword = request.GET["columns[" + request.GET["order[0][column]"] + "][data]"] filename += "_" + order + keyword.replace("_", "-") cust_set = cust_set.order_by(order + keyword) # Handle segment if "segment" in request.GET and request.GET["segment"] != "": filename += "_seg-" + request.GET["segment"] cust_set = cust_set.filter(segment__in=str(request.GET["segment"]).split(",")) # Handle active_rate_prev_83 if "active_rate_prev_83" in request.GET and request.GET["active_rate_prev_83"] != "": filename += "_active_rate_prev_83-" + request.GET["active_rate_prev_83"] active_rate_prev_83_range = request.GET["active_rate_prev_83"].split(",") cust_set = cust_set.filter(active_rate_previous_83__range=(float(active_rate_prev_83_range[0]), float(active_rate_prev_83_range[1]))) # Export if "csv" in request.GET and request.GET["csv"] == "true": return render_to_csv_response(cust_set, filename=filename + ".csv") elif "xlsx" in request.GET and request.GET["xlsx"] == "true": output = StringIO.StringIO() book = xlsxwriter.Workbook(output) sheet = book.add_worksheet() headers = self.Customer._meta.get_all_field_names() sheet.write_row(0, 0, headers) row_id = 0 for row in cust_set: row_id += 1 sheet.write_row(row_id, 0, [getattr(row, field) for field in headers]) book.close() # Construct response output.seek(0) response = HttpResponse(output.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet") response['Content-Disposition'] = "attachment; filename=" + filename + ".xlsx" return response else: cust_page = Paginator(cust_set, size).page(page) total = self.Customer.objects.filter(source=request.GET["source"]).count() data = self.CustomerSerializer(cust_page, many=True).data return Response({ "draw": int(request.GET["draw"]), "recordsTotal": total, "recordsFiltered": cust_set.count(), "data": data }) except ObjectDoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) else: return Response(status=status.HTTP_400_BAD_REQUEST)
def csv_view(request, *args, **kwargs): day = kwargs['day'] month = kwargs['month'] daymonth = None qs = CDR.objects.values('customer__name', 'caller_id_number', 'destination_number', 'start_stamp', 'billsec', 'prefix', 'sell_destination', 'rate', 'init_block', 'block_min_duration', 'total_sell', 'customer_ip', 'sip_user_agent', 'sip_charge_info' ) try: usercompany = Person.objects.get(user=request.user) company = get_object_or_404(Company, name=usercompany.company) qs = qs.filter(customer=company.pk)\ .exclude(effective_duration="0")\ .order_by('-start_stamp') except Person.DoesNotExist: messages.error(request, _(u"""This user is not linked to a customer !""")) if day and int(day) < 8 and int(day) > 0: day = int(day) start_date = datetime.date.today() - datetime.timedelta(days=int(day)) end_date = start_date + datetime.timedelta(days=1) daymonth = 'OK' if month and int(month) < 4 and int(month) > 0: month = int(month) dm = datetime.date.today() start_date = datetime.date(dm.year, dm.month, 1) - relativedelta(months=int(month)) end_date = start_date + relativedelta(months=1) end_date = end_date - datetime.timedelta(days=1) daymonth = 'OK' if daymonth: qs = qs.filter(start_stamp__range=(start_date, end_date)) else: qs.none() # import pdb; pdb.set_trace() return render_to_csv_response( qs, append_datestamp=True, field_header_map={'customer__name': 'Customer'})
def comments_csv(request, instance): comments = _comments(request, instance) qs = comments.values( 'id', 'user__username', 'comment', 'is_removed', 'is_archived', 'submit_date' ) return render_to_csv_response(qs)
def mailing_list(request): mailing_list_users = models.User\ .objects.filter(userprofile__mailing_list=True)\ .values('username', 'first_name', 'last_name', 'email') return render_to_csv_response(mailing_list_users, append_datestamp=True, filename='vegphilly_ml')
def get(self, request, *args, **kwargs): # get and return as CSV (only selected fields) # nw = NetworkService.objects.all() nw = NetworkService.objects.all().values('updated', 'protocol', 'address', 'port', 'service', 'state', 'reason', 'banner', 'nmap_report_meta_id') return render_to_csv_response(nw)
def vendor_list(request): vendors = models.Vendor.objects.approved()\ .values('name', 'address', 'neighborhood__name', 'phone', 'website', 'veg_level__name', 'notes') return render_to_csv_response(vendors, append_datestamp=True, filename='vegphilly_vendors')
def get(self, *args, **kwargs): format_ = kwargs.get('format') tally_id = kwargs.get('tally_id') form_list = ResultForm.objects.filter( form_state=FormState.CLEARANCE, tally__id=tally_id) if format_ == 'csv': return render_to_csv_response(form_list) forms = paging(form_list, self.request) return self.render_to_response(self.get_context_data( forms=forms, is_clerk=is_clerk(self.request.user), tally_id=tally_id))
def get(self, *args, **kwargs): format_ = kwargs.get('format') tally_id = kwargs.get('tally_id') user_is_clerk = is_clerk(self.request.user) form_list = forms_for_user(user_is_clerk, tally_id) if format_ == 'csv': return render_to_csv_response(form_list) forms = paging(form_list, self.request) return self.render_to_response(self.get_context_data( forms=forms, is_clerk=user_is_clerk, tally_id=tally_id))
def get(self, request, *args, **kwargs): event = self.get_object() filename = "event_%s_export" % event.slug.replace('-', '_') field_header_map = { 'author__username': '******', 'votes__rate__sum': 'Votes' } proposals = event.proposals.values( 'id', 'title', 'author__username').annotate(Sum('votes__rate')) return render_to_csv_response( proposals, append_datestamp=True, filename=filename, field_header_map=field_header_map )
def downloadALL(request): if request.method == 'GET': dl = request.GET.get('DL','') if dl == "dlNum": all = NumericTraits.objects.all().values('species__ord', 'species__fam', 'species_id','species__iucndata__iucn_status','traits','mean','range','uncertainty','units','cite__citation_name') elif dl == "dlChar": all = OtherTraits.objects.all().values('species__ord', 'species__fam', 'species_id','species__iucndata__iucn_status','species__iucndata__population_trend','variable','value','cite__citation_name') elif dl == "dlCite": X = [x.cite.citation_name for x in CitationNumerictraitSpecies.objects.all().distinct('cite__citation_name')] Y = [y.cite.citation_name for y in CitationOthertraitSpecies.objects.all().distinct('cite__citation_name')] Z = X + Y alist = list(set(Z)) all = Citation.objects.filter(citation_name__in = alist).values('citation_name','citation') return render_to_csv_response(all)
def get(self, request, *args, **kwargs): event = self.get_object() filename = "event_%s_export" % event.slug.replace('-', '_') field_header_map = { 'author__username': _('Author'), 'author__email': _('Author E-Mail'), 'votes__rate__sum': _('Vote Rate'), 'votes__count': _('Votes Count'), } proposals = event.get_votes_to_export() return render_to_csv_response( proposals, append_datestamp=True, filename=filename, field_header_map=field_header_map )
def calibration_asset_export_custom(request): if request.GET.get('days'): getdays = int(request.GET.get('days')) newdate = timezone.now() + timedelta(days=getdays) newdate = newdate.date() elif request.GET.get('date'): newdate = request.GET.get('date') else: return HttpResponseNotFound('<h2>No "days" or "date" selected!</h2>') calibration_export = Asset.objects.filter(requires_calibration=True, calibration_date_next__lte=newdate).order_by( "calibration_date_next" ).values( "asset_id", "requires_calibration", "asset_description", "asset_manufacturer", "asset_model", "asset_serial_number", "asset_status__status_name", "calibration_date_prev", "calibration_date_next", "calibration_procedure", "person_responsible", "person_responsible_email", "asset_location_building__building_name", "asset_location_room") return render_to_csv_response(calibration_export, filename="Assets_Due_Calibration_Before_{}.csv".format(str(newdate)))