def form_valid(self, form): report = int(form.cleaned_data["report"]) my_year = form.cleaned_data["year"] if form.cleaned_data[ "year"] else "None" my_cruise = form.cleaned_data["cruise"] if form.cleaned_data[ "cruise"] else "None" my_spp = listrify(form.cleaned_data["spp"]) if len( form.cleaned_data["spp"]) > 0 else "None" if report == 1: return HttpResponseRedirect(reverse( "diets:prey_summary_list")) # , kwargs={'year': my_year} if report == 2: return HttpResponseRedirect( reverse("diets:export_data_report", kwargs={ 'year': my_year, 'cruise': my_cruise, 'spp': my_spp })) else: messages.error( self.request, "Report is not available. Please select another report.") return HttpResponseRedirect(reverse("diet:report_search"))
def as_dict(self): return { "from": self.get_from_email(), "to": listrify(self.get_recipient_list()), "subject": self.get_subject(), "message": self.get_message() }
def from_project_to_reviewer(): from projects import models as omodels projects = omodels.Project.objects.all() for old_p in projects: qs = models.ProjectYear.objects.filter(project_id=old_p.id) if qs.exists(): if qs.count() > 1: print( "problem, more than one project year of this project exists: ", old_p.project_title, " (", old_p.id, ") Going to choose this one: ", qs.first(), " of ", listrify(qs)) new_py = qs.first() review, created = models.Review.objects.get_or_create( project_year=new_py, ) review.allocated_budget = new_py.allocated_budget review.approval_status = old_p.approved # will be 1, 0 , None review.approval_notification_email_sent = old_p.notification_email_sent review.general_comment = old_p.meeting_notes review.approver_comment = old_p.meeting_notes review.save() else: print("cannot find matching project:", old_p.id, old_p.project_title)
def species_list(self): spp = Species.objects.filter(observations__sweep=self).distinct() my_list = list([ f"{sp} ({sp.observations.filter(sweep=self).count()})" for sp in spp ]) my_list.sort() return mark_safe(listrify(my_list, "<br>"))
def get_attendees(self, instance): my_list = list() for a in instance.attendees: my_list.append( get_object_or_404(models.Invitee, pk=a["invitee"]).full_name) if len(my_list): return listrify(my_list) return gettext("None")
def contacts(self): if self.contact_users.exists(): return mark_safe( listrify([ f'<a href="mailto:{u.email}">{u.get_full_name()}</a>' for u in self.contact_users.all() ])) return mark_safe( f'<a href="mailto:{self.contact_email}">{self.contact_name}</a>')
def coding(self): if self.responsibility_center: rc = self.responsibility_center.code else: rc = "xxxxx" if self.allotment_code: ac = self.allotment_code.code else: ac = "xxx" if self.existing_project_codes.count() >= 1: pc = listrify([project_code.code for project_code in self.existing_project_codes.all()]) if self.existing_project_codes.count() > 1: pc = "[" + pc + "]" else: pc = "xxxxx" return "{}-{}-{}".format(rc, ac, pc)
def form_valid(self, form): # ais_species_list = str(form.cleaned_data["ais_species"]).replace("[", "").replace("]", "").replace(" ", "").replace("'","").replace('"',"") report = int(form.cleaned_data["report"]) my_year = form.cleaned_data["year"] if form.cleaned_data[ "year"] else "None" my_sites = listrify(form.cleaned_data["sites"]) if len( form.cleaned_data["sites"]) > 0 else "None" if report == 1: return HttpResponseRedirect( reverse("trapnet:sample_report", kwargs={ "year": my_year, "sites": my_sites })) elif report == 2: return HttpResponseRedirect( reverse("trapnet:entry_report", kwargs={ "year": my_year, "sites": my_sites })) elif report == 3: return HttpResponseRedirect( reverse("trapnet:od1_report", kwargs={ "year": my_year, "sites": my_sites })) elif report == 4: return HttpResponseRedirect(reverse("trapnet:od1_dictionary")) elif report == 7: return HttpResponseRedirect(reverse("trapnet:od_spp_list")) elif report == 5: return HttpResponseRedirect( reverse("trapnet:od1_wms", kwargs={"lang": 1})) elif report == 6: return HttpResponseRedirect( reverse("trapnet:od1_wms", kwargs={"lang": 2})) else: messages.error( self.request, "Report is not available. Please select another report.") return HttpResponseRedirect(reverse("trapnet:report_search"))
def generate_user_report(): """Returns a generator for an HTTP Streaming Response""" # write the header fields = [ "first_name", "last_name", "email", "date_joined", "last_login", "pageviews", "apps_used", "cumulative_users", ] qs = User.objects.filter(last_login__isnull=False).order_by("date_joined") pseudo_buffer = Echo() pseudo_buffer.write(u'\ufeff'.encode( 'utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly) writer = csv.writer(pseudo_buffer) yield writer.writerow(fields) i = 1 for obj in qs: data_row = [] # starter for field in fields: if field == "pageviews": data_row.append( sum([v.page_visits for v in obj.visitsummary_set.all()])) elif field == "apps_used": data_row.append( listrify( set([ v.application_name for v in obj.visitsummary_set.all() ]))) elif field == "cumulative_users": data_row.append(i) else: data_row.append(get_field_value(obj, field, display_time=True)) yield writer.writerow(data_row) i += 1
def form_valid(self, form): fiscal_year = str(form.cleaned_data["fiscal_year"]) report = int(form.cleaned_data["report"]) programs = listrify(form.cleaned_data["programs"]) if programs == "": sections = "None" if report == 1: return HttpResponseRedirect( reverse("projects:report_neg", kwargs={ 'fy': fiscal_year, 'programs': programs, })) else: messages.error( self.request, "Report is not available. Please select another report.") return HttpResponseRedirect(reverse("ihub:report_search"))
def orgs_str(self): return listrify([org for org in self.organizations.all()])
def funding_sources(self): return listrify(self.get_funding_sources())
def species_list(self): my_list = list(set([str(obs.species) for obs in self.entries.all()])) my_list.sort() return mark_safe(listrify(my_list, "<br>"))
def get_organizations(self, instance): return listrify(instance.organizations.all())
def generate_open_data_ver_1_wms_report(year, lang): """ Simple report for web mapping service on FGP """ # Botrylloïdes violaceus, Botryllus shlosseri, Caprella mutica, Ciona intestinalis, Codium fragile, Membranipora membranacea, Styela clava species_id_list = [48, 24, 47, 23, 55, 59, 25] species_qs = models.Species.objects.filter(id__in=species_id_list) filename = "station_summary_report_eng.csv" if lang == 1 else "station_summary_report_fra.csv" # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="{}"'.format( filename) response.write(u'\ufeff'.encode( 'utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly) writer = csv.writer(response) yes = "yes" if lang == 1 else "oui" no = "no" if lang == 1 else "non" header_row = [ 'seasons' if lang == 1 else "saisons", 'station_code' if lang == 1 else "code_de_station", 'station_name' if lang == 1 else "nom_de_station", 'station_province' if lang == 1 else "province_de_station", 'station_description' if lang == 1 else "description_de_station", 'station_latitude' if lang == 1 else "latitude_de_station", 'station_longitude' if lang == 1 else "longitude_de_station", 'list_of_other_species_observed' if lang == 1 else "liste_des_espèces_observées", ] for species in species_qs: first_name = species.scientific_name.split(" ")[0][:1].upper() if len(species.scientific_name.split(" ")) > 2: second_name = " ".join(species.scientific_name.split(" ")[1:]) else: second_name = species.scientific_name.split(" ")[1] display_name = "{}_{}".format( first_name, second_name, ) my_str = "detected" if lang == 1 else "détecté" header_row.append("{}_{}".format(display_name, my_str)) writer.writerow(header_row) samples = models.Sample.objects.all() # if there is a year provided, filter by only this year if year and year != "None": samples = samples.filter(season=int(year)) stations = [ models.Station.objects.get(pk=obj["station"]) for obj in samples.order_by("station").values("station").distinct() ] # make sure to exclude the lost lines and surfaces; this is sort of redundant since if a line is line, all surfaces should also be labelled as lost. surfacespecies = models.SurfaceSpecies.objects.filter( surface__line__sample_id__in=[ obj["id"] for obj in samples.order_by("id").values("id").distinct() ], surface__line__is_lost=False, surface__is_lost=False, ) for station in stations: other_spp = listrify([ models.Species.objects.get(pk=obj["species"]).name_plaintext for obj in surfacespecies.filter( surface__line__sample__station=station).order_by( "species").values("species").distinct() if obj["species"] not in species_id_list ]) seasons = listrify([ obj["surface__line__sample__season"] for obj in surfacespecies.filter( surface__line__sample__station=station).order_by( "surface__line__sample__season").values( "surface__line__sample__season").distinct() ]) data_row = [ seasons, station.id, station.station_name, station.province.abbrev_eng if lang == 1 else station.province.abbrev_fre, station.site_desc, station.latitude_n, station.longitude_w, other_spp, ] for species in species_qs: spp_count = surfacespecies.filter( surface__line__sample__station=station, species=species, ).count() if spp_count > 0: data_row.append(yes) else: data_row.append(no) writer.writerow(data_row) return response
def consultation_instructions_export_spreadsheet(orgs=None): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'ihub', 'temp') target_file = "temp_data_export_{}.xlsx".format( timezone.now().strftime("%Y-%m-%d")) target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'ihub', 'temp', target_file) # first, filter out the "none" placeholder orgs = None if orgs == "None" else orgs # if there are some organizations that are specified, if orgs: # we have to refine the queryset to only the selected orgs object_list = ml_models.ConsultationInstruction.objects.filter( organization_id__in=orgs.split(",")) else: # else return all orgs object_list = ml_models.ConsultationInstruction.objects.all() # create workbook and worksheets workbook = xlsxwriter.Workbook(target_file_path) # create formatting title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', 'bg_color': '#a6cbf5', "align": 'normal', "text_wrap": True, "valign": 'top', }) normal_format = workbook.add_format({ "align": 'left', "text_wrap": True, 'num_format': 'mm/dd/yyyy', "valign": 'top', }) # define the header header = [ "Community", "Address Letter To", "cc: on Bottom of Letter", "Mailing Address", "Chief/Primary Point of Contact Name", "Chief/Primary Point of Contact Email", "Chief/Primary Point of Contact Phone", "Paper Copy", "To", "Cc", "Cc Commercial", ] my_ws = workbook.add_worksheet(name="mail_merge") # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] my_ws.write_row(0, 0, header, header_format) i = 1 for obj in object_list.all(): data_row = [ str(obj.organization), obj.letter_to, obj.letter_cc, obj.organization.full_address, obj.organization.chief.person.full_name if obj.organization.chief else "", obj.organization.chief.person.email_1 if obj.organization.chief else "", obj.organization.chief.person.phone_1 if obj.organization.chief else "", obj.paper_copy, listrify([ consultee.member.person.email_1 for consultee in obj.to_email_recipients.all() ], "; "), listrify([ consultee.member.person.email_1 for consultee in obj.cc_email_recipients.all() ], "; "), listrify([ consultee.member.person.email_1 for consultee in obj.cc_commercial_email_recipients.all() ], "; "), ] # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value j = 0 for d in data_row: # if new value > stored value... replace stored value if len(str(d)) > col_max[j]: if len(str(d)) < 75: col_max[j] = len(str(d)) else: col_max[j] = 75 j += 1 my_ws.write_row(i, 0, data_row, normal_format) # set column widths for j in range(0, len(col_max)): my_ws.set_column(j, j, width=col_max[j] * 1.1) workbook.close() return target_url
def generate_consultation_report(orgs, sectors, statuses, from_date, to_date, entry_note_types, entry_note_statuses, org_regions, entry_regions): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'temp') target_file = "temp_data_export_{}.xlsx".format( timezone.now().strftime("%Y-%m-%d")) target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'temp', target_file) # first, filter out the "none" placeholder if sectors == "None": sectors = None if orgs == "None": orgs = None if statuses == "None": statuses = None if from_date == "None": from_date = None if to_date == "None": to_date = None if org_regions == "None": org_regions = None if entry_regions == "None": entry_regions = None if entry_note_types == "None": entry_note_types = None else: entry_note_types = [int(i) for i in entry_note_types.split(",") ] if entry_note_types else None if entry_note_statuses == "None": entry_note_statuses = None else: entry_note_statuses = [int(i) for i in entry_note_statuses.split(",") ] if entry_note_statuses else None # get an entry list for the fiscal year (if any) entry_list = models.Entry.objects.filter( entry_type__name__icontains="consultation").order_by( "status", "-initial_date") if org_regions: # we have to refine the queryset to only the selected orgs region_list = [ Region.objects.get(pk=int(o)) for o in org_regions.split(",") ] entry_list = entry_list.filter(organizations__regions__in=region_list) if entry_regions: # we have to refine the queryset to only the selected orgs region_list = [ Region.objects.get(pk=int(o)) for o in entry_regions.split(",") ] entry_list = entry_list.filter(regions__in=region_list) if orgs: # we have to refine the queryset to only the selected orgs org_list = [ ml_models.Organization.objects.get(pk=int(o)) for o in orgs.split(",") ] entry_list = entry_list.filter(organizations__in=org_list) if sectors: # we have to refine the queryset to only the selected sectors sector_list = [ ml_models.Sector.objects.get(pk=int(s)) for s in sectors.split(",") ] entry_list = entry_list.filter(sectors__in=sector_list) if statuses: # we have to refine the queryset to only the selected statuses status_list = [ models.Status.objects.get(pk=int(o)) for o in statuses.split(",") ] entry_list = entry_list.filter(status__in=status_list) if from_date or to_date: id_list = [] d0_start = datetime.strptime(from_date, "%Y-%m-%d").replace( tzinfo=timezone.get_current_timezone()) if from_date else None d0_end = datetime.strptime(to_date, "%Y-%m-%d").replace( tzinfo=timezone.get_current_timezone()) if to_date else None for e in entry_list: d1_start = e.initial_date d1_end = e.anticipated_end_date if get_date_range_overlap(d0_start, d0_end, d1_start, d1_end) > 0: id_list.append(e.id) entry_list = entry_list.filter(id__in=id_list) entry_list.distinct() workbook = xlsxwriter.Workbook(target_file_path) # create formatting title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', 'bg_color': '#a6cbf5', "align": 'normal', "text_wrap": True, "valign": 'top', }) normal_format = workbook.add_format({ "align": 'left', "text_wrap": True, 'num_format': 'mm/dd/yyyy', "valign": 'top', }) highlighted_format = workbook.add_format({ "align": 'left', "text_wrap": True, 'num_format': 'mm/dd/yyyy', "valign": 'top', "bg_color": "yellow" }) # we want a sheet for every sector sector_ids = [] for e in entry_list: for s in e.sectors.all(): sector_ids.append(s.id) sectors = ml_models.Sector.objects.filter(id__in=sector_ids) # there is a problem: some of the sectors have duplicate names, especially when truncated.. for s in sectors: sector_name = truncate(s.name, 30, False) if sectors.filter(name__icontains=sector_name).count() > 1: sector_name = truncate(s.name, 25, False) + f" ({s.id})" my_ws = workbook.add_worksheet(name=sector_name) entries = s.entries.filter(id__in=[e.id for e in entry_list]) # define the header header = [ "Title", "Organizations", "Status", "Persons/lead", "DFO programs involved", "letter sent", "Response Requested by", "Proponent", "FAA triggered (Yes/No)", "Comments", ] # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] my_ws.write_row(0, 0, header, header_format) i = 1 for e in entries.all(): people = nz(listrify([p for p in e.people.all()], "\n\n"), "") notes = "" if e.notes.exists(): for n in e.notes.all(): if not entry_note_types or (n.type in entry_note_types): if not entry_note_statuses or (n.status_id in entry_note_statuses): if len(notes): notes += "\n\n*************************\n" + str( n) else: notes = str(n) data_row = [ e.title, e.orgs_str, str(e.status), people, e.sectors_str, e.initial_date.strftime("%m/%d/%Y") if e.initial_date else " ---", e.response_requested_by.strftime("%m/%d/%Y") if e.response_requested_by else " ---", e.proponent, yesno(e.is_faa_required, "yes,no,no"), notes.replace("\\r\\n", "\r\n"), ] # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value j = 0 for d in data_row: # if new value > stored value... replace stored value if len(str(d)) > col_max[j]: if len(str(d)) < 75: col_max[j] = len(str(d)) else: col_max[j] = 75 j += 1 format = normal_format if e.sectors.count() > 1: format = highlighted_format my_ws.write_row(i, 0, data_row, format) i += 1 # set column widths for j in range(0, len(col_max)): my_ws.set_column(j, j, width=col_max[j] * 1.1) workbook.close() return target_url
def generate_sara_application(project, lang): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'temp') target_file = "temp_export.docx" target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'temp', target_file) if lang == "fr": template_file_path = os.path.join(settings.BASE_DIR, 'projects2', 'static', "projects2", "sara_template.docx") else: template_file_path = os.path.join(settings.BASE_DIR, 'projects2', 'static', "projects2", "sara_template.docx") with open(template_file_path, 'rb') as f: source_stream = BytesIO(f.read()) document = Document(source_stream) source_stream.close() year = project.years.last() priorities = str() priorities += f'{year.priorities}\n\n' milestones = str() for milestone in models.Activity.objects.filter(project_year=year, type=1): mystr = f'{date(milestone.target_date)} -> {milestone.name}: {milestone.description}. Responsible Parties: {milestone.responsible_party}' milestones += f'{mystr}\n' deliverables = str() for deliverable in models.Activity.objects.filter(project_year=year, type=2): mystr = f'{date(deliverable.target_date)} -> {deliverable.name}: {deliverable.description}. Responsible Parties: {deliverable.responsible_party}' deliverables += f'{mystr}\n' total_cost = 0 om_costs = str() for cost in models.OMCost.objects.filter( project_year=year, funding_source__name__icontains="sara", amount__gt=0): mystr = f'{cost.om_category} --> {cost.description} Amount: {currency(cost.amount)}' om_costs += f'{mystr}\n' total_cost += cost.amount capital_costs = str() for cost in models.CapitalCost.objects.filter( project_year=year, funding_source__name__icontains="sara", amount__gt=0): mystr = f'{cost.get_category_display()} --> {cost.description} Amount: {currency(cost.amount)}' capital_costs += f'{mystr}\n' total_cost += cost.amount salary_costs = str() for staff in models.Staff.objects.filter( project_year=year, funding_source__name__icontains="sara", amount__gt=0): mystr = f'{nz(staff.smart_name)} ({nz(staff.level)}) --> Duration in weeks: {nz(staff.duration_weeks)} Amount: {currency(staff.amount)}' salary_costs += f'{mystr}\n' total_cost += staff.amount field_dict = dict( TAG_TITLE=project.title, TAG_OM_COSTS=om_costs, TAG_CAPITAL_COSTS=capital_costs, TAG_SALARY_COSTS=salary_costs, TAG_TOTAL_COST=currency(total_cost, True), TAG_LEADS=listrify(project.lead_staff.all()), TAG_TAGS=listrify(project.tags.all()), TAG_OVERVIEW=project.overview, TAG_ADDITIONAL_NOTES=year.additional_notes, TAG_DELIVERABLES=deliverables, TAG_MILESTONES=milestones, TAG_PRIORITIES_METHODS=priorities, TAG_REPORTING=project.reporting_mechanism, TAG_FUNDING=project.future_funding_needs, ) for item in field_dict: # replace the tagged placeholders in tables for table in document.tables: for row in table.rows: for cell in row.cells: for paragraph in cell.paragraphs: if item in paragraph.text: try: paragraph.text = paragraph.text.replace( item, str(field_dict[item])) except Exception as E: print(E, field_dict[item]) paragraph.text = "MISSING!" # replace the tagged placeholders in paragraphs for paragraph in document.paragraphs: if item in paragraph.text: try: paragraph.text = paragraph.text.replace( item, field_dict[item]) except: paragraph.text = "MISSING!" document.save(target_file_path) return target_url
def get_regions(self, instance): return listrify(instance.regions.all())
def generate_sar_workplan(year, region): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'temp') target_file = "temp_export.xlsx" target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'temp', target_file) template_file_path = os.path.join(settings.BASE_DIR, 'projects2', 'static', "projects2", "sar_workplan_template.xlsx") year_txt = str(FiscalYear.objects.get(pk=year)) # get all project years that are not in the following status: draft, not approved, cancelled # and that are a part of a project whose default funding source has an english name containing "csrf" qs = models.ProjectYear.objects.filter( project__default_funding_source__name__contains="SAR", fiscal_year=year) if region != "None": qs = qs.filter(project__section__division__branch__region_id=region) wb = load_workbook(filename=template_file_path) # to order workshees so the first sheet comes before the template sheet, rename the template and then copy the # renamed sheet, then rename the copy to template so it exists for other sheets to be created from ws = wb['template'] ws.title = year_txt wb.copy_worksheet(ws).title = str("template") try: ws = wb[year_txt] except KeyError: print(year_txt, "is not a valid name of a worksheet") # start writing data at row 3 in the sheet row_count = 3 for item in qs: ws['A' + str(row_count)].value = listrify( [t.name for t in item.project.tags.all()]) ws['J' + str(row_count)].value = ws['A' + str(row_count)].value ws['B' + str(row_count)].value = sum([c.amount for c in item.costs]) ws['C' + str(row_count)].value = listrify([ "{} {}".format(u.first_name, u.last_name) for u in item.get_project_leads_as_users() ]) ws['H' + str(row_count)].value = listrify( [f.name for f in item.project.files.all()]) ws['I' + str(row_count)].value = item.project.id if item.priorities: ws['K' + str(row_count)].value = html2text(item.priorities) ws['L' + str(row_count)].value = item.project.title if item.project.overview_html: ws['M' + str(row_count)].value = html2text( item.project.overview_html) activities = [ html2text(act.description) for act in item.activities.filter(type=1) ] ws['N' + str(row_count)].value = listrify(activities) activities = [ html2text(act.description) for act in item.activities.filter(type=2) ] ws['O' + str(row_count)].value = listrify(activities) row_count += 1 wb.save(target_file_path) return target_url
def get_sectors(self, instance): return listrify(instance.sectors.all())
def generate_culture_committee_report(): from publications import models as pi_models # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'temp') target_file = "temp_data_export_{}.xlsx".format( timezone.now().strftime("%Y-%m-%d")) target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'temp', target_file) # create workbook and worksheets workbook = xlsxwriter.Workbook(target_file_path) # create formatting variables title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', "align": 'normal', "text_wrap": True }) total_format = workbook.add_format({ 'bold': True, "align": 'left', "text_wrap": True, 'num_format': '$#,##0' }) normal_format = workbook.add_format({ "align": 'left', "text_wrap": False, 'border': 1, 'border_color': 'black', }) currency_format = workbook.add_format({'num_format': '#,##0.00'}) date_format = workbook.add_format({ 'num_format': "yyyy-mm-dd", "align": 'left', }) # get the dive list field_list = [ "Project Id", "Title", "Description", "Years", "Keywords", "Leads", "Region", "Program / Funding Source", "Source", ] # get_cost_comparison_dict # define the header header = field_list title = "DM Apps Science Culture Committee Report" # define a worksheet my_ws = workbook.add_worksheet(name="projects") my_ws.write(0, 0, title, title_format) my_ws.write_row(2, 0, header, header_format) i = 3 projects = models.Project.objects.filter( default_funding_source__is_competitive=True, years__status=4).distinct() for project in projects.order_by("id"): # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] j = 0 for field in field_list: my_val = None if "Project Id" in field: my_val = project.id my_ws.write(i, j, my_val, normal_format) elif "Title" in field: my_val = project.title my_ws.write(i, j, my_val, normal_format) elif "Description" in field: my_val = html2text(project.overview_html) my_ws.write(i, j, my_val, normal_format) elif "Years" in field: my_val = listrify( [y.fiscal_year for y in project.years.filter(status=4)]) my_ws.write(i, j, my_val, normal_format) elif "Keywords" in field: my_val = listrify([str(t) for t in project.tags.all()]) my_ws.write(i, j, my_val, normal_format) elif "Leads" in field: my_val = listrify( [str(staff) for staff in project.lead_staff.all()]) my_ws.write(i, j, my_val, normal_format) elif "Region" in field: my_val = project.section.division.branch.region.tname my_ws.write(i, j, my_val, normal_format) elif "Program / Funding Source" in field: my_val = str(project.default_funding_source) my_ws.write(i, j, my_val, normal_format) elif field == "Source": my_val = "Project Planning" my_ws.write(i, j, my_val, normal_format) # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value # if new value > stored value... replace stored value if len(str(my_val)) > col_max[j]: if len(str(my_val)) < 50: col_max[j] = len(str(my_val)) else: col_max[j] = 50 j += 1 i += 1 archived_projects = pi_models.Project.objects.all() for project in archived_projects.order_by("id"): # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] j = 0 for field in field_list: my_val = None if "Project Id" in field: my_val = project.id my_ws.write(i, j, my_val, normal_format) elif "Title" in field: my_val = project.title my_ws.write(i, j, my_val, normal_format) elif "Description" in field: my_val = project.abstract my_ws.write(i, j, my_val, normal_format) elif "Years" in field: my_val = project.year my_ws.write(i, j, my_val, normal_format) elif "Keywords" in field: my_val = listrify([str(t) for t in project.theme.all()]) my_ws.write(i, j, my_val, normal_format) elif "Leads" in field: my_val = listrify( [str(staff) for staff in project.dfo_contact.all()]) my_ws.write(i, j, my_val, normal_format) elif "Region" in field: if project.division.exists(): regions = Region.objects.filter(id__in=[ p.branch.region.id for p in project.division.all() ]) my_val = listrify([r.tname for r in regions]) else: my_val = "n/a" my_ws.write(i, j, my_val, normal_format) elif "Program / Funding Source" in field: my_val = listrify( [str(item) for item in project.program_linkage.all()]) my_ws.write(i, j, my_val, normal_format) elif field == "Source": my_val = "Project Inventory" my_ws.write(i, j, my_val, normal_format) # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value # if new value > stored value... replace stored value if len(str(my_val)) > col_max[j]: if len(str(my_val)) < 50: col_max[j] = len(str(my_val)) else: col_max[j] = 50 j += 1 i += 1 # set column widths for j in range(0, len(col_max)): my_ws.set_column(j, j, width=col_max[j] * 1.1) workbook.close() return target_url
def generate_open_data_ver_1_wms_report(lang): """ Simple report for web mapping service on FGP """ # It is important that we remove any samples taken at MAtapedia River since these data do not belong to us. qs = models.Entry.objects.all().filter( sample__site__exclude_data_from_site=False) filename = "site_summary_report_eng.csv" if lang == 1 else "site_summary_report_fra.csv" # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="{}"'.format( filename) response.write(u'\ufeff'.encode( 'utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly) writer = csv.writer(response) # headers are based on csv provided by GD species_list = [ models.Species.objects.get(pk=obj["species"]) for obj in qs.order_by("species").values("species").distinct() ] select_species_dict = { "fish_gr_1": { "codes": [1732], "eng": "Altantic_salmon_smolts", "fra": "de_saumons_atlantiques_saumoneaux", }, "fish_gr_2": { "codes": [3410], "eng": "American_eels", "fra": "d_anguilles_d_Amériques", }, "fish_gr_3": { "codes": [140, 150, 151, 152], "eng": "lampreys", "fra": "de_lamproies", }, "fish_gr_4": { "codes": [2621, 2631, 2620, 2630, 2640], "eng": "dace", "fra": "de_vandoise", }, } header_row = [ 'Site_name' if lang == 1 else "Nom_de_site", 'Site_latitude' if lang == 1 else "Latitude_de_site", 'Site_longitude' if lang == 1 else "Longitude_de_site", 'Seasons_in_operation' if lang == 1 else "Saisons_en_opération", 'List_of_species_caught' if lang == 1 else "Liste_des_espèces_capturées", 'Total_number_of_fish_caught' if lang == 1 else "Nombre_total_de_poisson_capturées", 'Mean_annual_number_of_fish_caught' if lang == 1 else "Nombre_annuel_moyen_de_poissons_capturés", ] for key in select_species_dict: if lang == 1: header_row.extend([ 'Total_number_of_{}_caught'.format( select_species_dict[key]["eng"]), 'Mean_annual_number_of_{}_caught'.format( select_species_dict[key]["eng"]), ]) else: header_row.extend([ 'Nombre_total_{}_capturées'.format( select_species_dict[key]["fra"]), 'Nombre_annuel_moyen_{}_capturés'.format( select_species_dict[key]["fra"]), ]) writer.writerow(header_row) # lets start by getting a list of samples and years # samples = [models.Sample.objects.get(pk=obj["sample"]) for obj in qs.order_by("sample").values("sample").distinct()] sites = [ models.RiverSite.objects.get(pk=obj["sample__site"]) for obj in qs.order_by("sample__site").values("sample__site").distinct() ] for site in sites: seasons = listrify([ obj["sample__season"] for obj in qs.filter(sample__site=site).order_by( "sample__season").values("sample__season").distinct() ]) if lang == 1: spp_list = listrify([ models.Species.objects.get(pk=obj["species"]).common_name_eng for obj in qs.filter(sample__site=site).order_by( "species").values("species").distinct() ]) else: spp_list = listrify([ models.Species.objects.get(pk=obj["species"]).common_name_fre for obj in qs.filter(sample__site=site).order_by( "species").values("species").distinct() ]) total_freq = qs.filter( sample__site=site, ).values("frequency").order_by( "frequency").aggregate(dsum=Sum("frequency"))["dsum"] avg_freq = floatformat(int(total_freq) / len(seasons.split(",")), 2) data_row = [ site, site.latitude_n, site.longitude_w, seasons, spp_list, total_freq, avg_freq, ] for key in select_species_dict: freq_sum = qs.filter( sample__site=site, species__code__in=select_species_dict[key]["codes"]).values( "frequency").order_by("frequency").aggregate( dsum=Sum("frequency"))["dsum"] freq_avg = floatformat(int(freq_sum) / len(seasons.split(",")), 2) data_row.extend([ freq_sum, freq_avg, ]) writer.writerow(data_row) return response
def generate_project_list(user, year, region, section): # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response.write(u'\ufeff'.encode( 'utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly) writer = csv.writer(response) status_choices = models.ProjectYear.status_choices fields = [ 'region', 'division', 'project.section|section', 'project.id|Project Id', 'fiscal_year', 'project.title|title', 'Overview', 'Overview word count', 'project.default_funding_source|Primary funding source', 'project.functional_group|Functional group', 'Project leads', 'status', 'updated_at|Last modified date', 'modified_by|Last modified by', 'Last modified description', 'Activity count', 'Staff count', 'Sum of staff FTE (weeks)', 'Sum of costs', ] if in_projects_admin_group(user): qs = ProjectYear.objects.filter(fiscal_year_id=year).distinct() if section != "None": qs = qs.filter(project__section_id=section) elif region != "None": qs = qs.filter( project__section__division__branch__region_id=region) else: sections = utils.get_manageable_sections(user) qs = ProjectYear.objects.filter( project__section__in=sections).distinct() header_row = [ get_verbose_label(ProjectYear.objects.first(), header) for header in fields ] writer.writerow(header_row) for obj in qs: data_row = list() for field in fields: if "division" in field: val = " ---" if obj.project.section: val = obj.project.section.division.tname elif "region" in field: val = " ---" if obj.project.section: val = obj.project.section.division.branch.region.tname elif "leads" in field: val = listrify(obj.get_project_leads_as_users()) elif "updated_at" in field: val = obj.updated_at.strftime("%Y-%m-%d") elif "Last modified description" in field: val = naturaltime(obj.updated_at) elif field == "Overview": val = html2text(nz(obj.project.overview_html, "")) elif field == "Overview word count": val = len( html2text(nz(obj.project.overview_html, "")).split(" ")) elif field == "Activity count": val = obj.activities.count() elif field == "Staff count": val = obj.staff_set.count() elif field == "Sum of staff FTE (weeks)": val = obj.staff_set.order_by("duration_weeks").aggregate( dsum=Sum("duration_weeks"))["dsum"] elif field == "Sum of costs": val = nz(obj.omcost_set.filter(amount__isnull=False).aggregate(dsum=Sum("amount"))["dsum"], 0) + \ nz(obj.capitalcost_set.filter(amount__isnull=False).aggregate(dsum=Sum("amount"))["dsum"], 0) + \ nz(obj.staff_set.filter(amount__isnull=False).aggregate(dsum=Sum("amount"))["dsum"], 0) else: val = get_field_value(obj, field) data_row.append(val) writer.writerow(data_row) return response
def regions(self): projects = self.projects.filter(section__isnull=False) return listrify( list(set([str(p.section.division.branch.region) for p in projects])))
def form_valid(self, form): sectors = listrify(form.cleaned_data["sectors"]) orgs = listrify(form.cleaned_data["organizations"]) orgs_w_consultation_instructions = listrify(form.cleaned_data["orgs_w_consultation_instructions"]) statuses = listrify(form.cleaned_data["statuses"]) entry_types = listrify(form.cleaned_data["entry_types"]) org = int(nz(form.cleaned_data["single_org"]), 0) fy = str(form.cleaned_data["fiscal_year"]) report_title = str(form.cleaned_data["report_title"]) report = int(form.cleaned_data["report"]) if report == 1: return HttpResponseRedirect(reverse("ihub:capacity_xlsx", kwargs= { "fy": nz(fy, "None"), "orgs": nz(orgs, "None"), "sectors": nz(sectors, "None"), })) elif report == 2: return HttpResponseRedirect(reverse("ihub:report_q", kwargs={"org": org})) elif report == 3: return HttpResponseRedirect(reverse("ihub:summary_xlsx", kwargs= { "fy": nz(fy, "None"), "orgs": nz(orgs, "None"), "sectors": nz(sectors, "None"), })) elif report == 4: return HttpResponseRedirect(reverse("ihub:summary_pdf", kwargs= { "fy": nz(fy, "None"), "orgs": nz(orgs, "None"), "sectors": nz(sectors, "None"), })) elif report == 5: return HttpResponseRedirect(reverse("ihub:consultation_log", kwargs= { "fy": nz(fy, "None"), "orgs": nz(orgs, "None"), "statuses": nz(statuses, "None"), "entry_types": nz(entry_types, "None"), "report_title": nz(report_title, "None"), })) elif report == 6: return HttpResponseRedirect(reverse("ihub:consultation_log_xlsx", kwargs= { "fy": nz(fy, "None"), "orgs": nz(orgs, "None"), "statuses": nz(statuses, "None"), "entry_types": nz(entry_types, "None"), "report_title": nz(report_title, "None"), })) elif report == 7: return HttpResponseRedirect( f'{reverse("ihub:consultation_instructions_pdf")}?orgs={orgs_w_consultation_instructions}' ) elif report == 8: return HttpResponseRedirect( f'{reverse("ihub:consultation_instructions_xlsx")}?orgs={orgs_w_consultation_instructions}' ) else: messages.error(self.request, "Report is not available. Please select another report.") return HttpResponseRedirect(reverse("ihub:report_search"))
def project_leads(self): return listrify( [staff for staff in self.staff_members.all() if staff.lead])
def generate_consultation_log_spreadsheet(fy, orgs, statuses, entry_types, report_title): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'ihub', 'temp') target_file = "temp_data_export_{}.xlsx".format( timezone.now().strftime("%Y-%m-%d")) target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'ihub', 'temp', target_file) # first, filter out the "none" placeholder if fy == "None": fy = None if orgs == "None": orgs = None if statuses == "None": statuses = None if entry_types == "None": entry_types = None # get an entry list for the fiscal year (if any) entry_list = models.Entry.objects.all().order_by("sectors", "status", "-initial_date") if fy: entry_list = models.Entry.objects.filter(fiscal_year=fy) if orgs: # we have to refine the queryset to only the selected orgs org_list = [ ml_models.Organization.objects.get(pk=int(o)) for o in orgs.split(",") ] # create the species query object: Q q_objects = Q() # Create an empty Q object to start with for o in org_list: q_objects |= Q(organizations=o) # 'or' the Q objects together # apply the filter entry_list = entry_list.filter(q_objects) if statuses: # we have to refine the queryset to only the selected orgs status_list = [ models.Status.objects.get(pk=int(o)) for o in statuses.split(",") ] # create the species query object: Q q_objects = Q() # Create an empty Q object to start with for o in status_list: q_objects |= Q(status=o) # 'or' the Q objects together # apply the filter entry_list = entry_list.filter(q_objects) if entry_types: # we have to refine the queryset to only the selected orgs entry_type_list = [ models.EntryType.objects.get(pk=int(o)) for o in entry_types.split(",") ] entry_list = entry_list.filter(entry_type__in=entry_type_list) # create workbook and worksheets workbook = xlsxwriter.Workbook(target_file_path) # create formatting title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', 'bg_color': '#a6cbf5', "align": 'normal', "text_wrap": True, "valign": 'top', }) normal_format = workbook.add_format({ "align": 'left', "text_wrap": True, 'num_format': 'mm/dd/yyyy', "valign": 'top', }) # define the header header = [ "Project / Location", "Proponent", "Date offer to Consult", "Departments Involved\n(Prov & Fed)", "Project Status/ Correspondence / Notes", "Follow-up Actions required", ] my_ws = workbook.add_worksheet(name="report") # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] my_ws.write(0, 0, report_title, title_format) my_ws.write_row(2, 0, header, header_format) i = 3 for e in entry_list.all(): col_1 = "TITLE: {}\n\nTYPE: {}\n\nLOCATION: {}".format( e.title, e.entry_type, nz(e.location, "----"), ) col_2 = "ORGANIZATIONS: {}\n\nDFO SECTORS: {}".format( e.orgs_str, e.sectors_str, ) people = listrify([p for p in e.people.all()], "\n") other_notes = "Overall status: {}".format(e.status) if e.other_notes.count() > 0: for n in e.other_notes.all(): other_notes += "\n\n*************************\n" + str(n) followups = "" for n in e.followups.all(): if len(followups) == 0: followups = str(n) else: followups += "\n\n*************************\n" + str(n) data_row = [ col_1, col_2, e.initial_date.strftime("%m/%d/%Y") if e.initial_date else "n/a", people, other_notes.replace("\\r\\n", "\r\n"), followups.replace("\\r\\n", "\r\n") if followups else "", ] # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value j = 0 for d in data_row: # if new value > stored value... replace stored value if len(str(d)) > col_max[j]: if len(str(d)) < 75: col_max[j] = len(str(d)) else: col_max[j] = 75 j += 1 my_ws.write_row(i, 0, data_row, normal_format) i += 1 # set column widths for j in range(0, len(col_max)): my_ws.set_column(j, j, width=col_max[j] * 1.1) workbook.close() return target_url
def sectors_str(self): return listrify([sec for sec in self.sectors.all()])
def generate_consultation_log_spreadsheet(orgs, sectors, statuses, entry_types, report_title, from_date, to_date, entry_note_types, entry_note_statuses): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'ihub', 'temp') target_file = "temp_data_export_{}.xlsx".format( timezone.now().strftime("%Y-%m-%d")) target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'ihub', 'temp', target_file) # first, filter out the "none" placeholder if sectors == "None": sectors = None if orgs == "None": orgs = None if statuses == "None": statuses = None if entry_types == "None": entry_types = None if from_date == "None": from_date = None if to_date == "None": to_date = None if entry_note_types == "None": entry_note_types = None else: entry_note_types = [int(i) for i in entry_note_types.split(",") ] if entry_note_types else None if entry_note_statuses == "None": entry_note_statuses = None else: entry_note_statuses = [int(i) for i in entry_note_statuses.split(",") ] if entry_note_statuses else None # get an entry list for the fiscal year (if any) entry_list = models.Entry.objects.all().order_by("status", "-initial_date") if orgs: # we have to refine the queryset to only the selected orgs org_list = [ ml_models.Organization.objects.get(pk=int(o)) for o in orgs.split(",") ] entry_list = entry_list.filter(organizations__in=org_list) if sectors: # we have to refine the queryset to only the selected sectors sector_list = [ ml_models.Sector.objects.get(pk=int(s)) for s in sectors.split(",") ] entry_list = entry_list.filter(sectors__in=sector_list) if statuses: # we have to refine the queryset to only the selected statuses status_list = [ models.Status.objects.get(pk=int(o)) for o in statuses.split(",") ] entry_list = entry_list.filter(status__in=status_list) if entry_types: # we have to refine the queryset to only the selected orgs entry_type_list = [ models.EntryType.objects.get(pk=int(o)) for o in entry_types.split(",") ] entry_list = entry_list.filter(entry_type__in=entry_type_list) if from_date or to_date: id_list = [] d0_start = datetime.strptime(from_date, "%Y-%m-%d").replace( tzinfo=timezone.get_current_timezone()) if from_date else None d0_end = datetime.strptime(to_date, "%Y-%m-%d").replace( tzinfo=timezone.get_current_timezone()) if to_date else None for e in entry_list: d1_start = e.initial_date d1_end = e.anticipated_end_date if get_date_range_overlap(d0_start, d0_end, d1_start, d1_end) > 0: id_list.append(e.id) entry_list = entry_list.filter(id__in=id_list) entry_list.distinct() # create workbook and worksheets workbook = xlsxwriter.Workbook(target_file_path) # create formatting title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', 'bg_color': '#a6cbf5', "align": 'normal', "text_wrap": True, "valign": 'top', }) normal_format = workbook.add_format({ "align": 'left', "text_wrap": True, 'num_format': 'mm/dd/yyyy', "valign": 'top', }) # define the header header = [ "Project title", "Proponent", "Location", "Type if interaction", "Indigenous Group(s)", "Date offer to Consult", "Departments Involved\n(Prov & Fed)", "Project Status/ Correspondence / Notes", "Follow-up Actions required", ] my_ws = workbook.add_worksheet(name="report") # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] my_ws.write(0, 0, report_title, title_format) my_ws.write_row(2, 0, header, header_format) i = 3 for e in entry_list.all(): sectors = f"\n\nDFO SECTORS: {e.sectors_str,}" people = nz(listrify([p for p in e.people.all()], "\n\n"), "") + nz( sectors, "") other_notes = "Overall status: {}".format(e.status) if e.other_notes.count() > 0: for n in e.other_notes.all(): if not entry_note_types or (n.type in entry_note_types): if not entry_note_statuses or (n.status_id in entry_note_statuses): other_notes += "\n\n*************************\n" + str( n) followups = "" for n in e.followups.all(): if len(followups) == 0: followups = str(n) else: followups += "\n\n*************************\n" + str(n) data_row = [ e.title, e.proponent, nz(e.location, "----"), str(e.entry_type), e.orgs_str, e.initial_date.strftime("%m/%d/%Y") if e.initial_date else "n/a", people, other_notes.replace("\\r\\n", "\r\n"), followups.replace("\\r\\n", "\r\n") if followups else "", ] # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value j = 0 for d in data_row: # if new value > stored value... replace stored value if len(str(d)) > col_max[j]: if len(str(d)) < 75: col_max[j] = len(str(d)) else: col_max[j] = 75 j += 1 my_ws.write_row(i, 0, data_row, normal_format) i += 1 # set column widths for j in range(0, len(col_max)): my_ws.set_column(j, j, width=col_max[j] * 1.1) workbook.close() return target_url