def save(self, *args, **kwargs): if self.obligation_cost: self.outstanding_obligation = self.obligation_cost - nz(self.invoice_cost, 0) else: self.outstanding_obligation = 0 # if a consignee code was given without a consignee suffix, we should assign a suffix # if self.consignee_code and not self.consignee_suffix: # # find the greatest suffix for that code # my_last_trans = Transaction.objects.filter(fiscal_year=self.fiscal_year, consignee_code=self.consignee_code, consignee_suffix__isnull=False).order_by( # "consignee_suffix").last() # if my_last_trans: # my_suffix = my_last_trans.consignee_suffix + 1 # else: # my_suffix = "{}0001".format(self.fiscal_year_id[-2:]) # self.consignee_suffix = my_suffix return super().save(*args, **kwargs)
def amount_outstanding(self): return nz(self.amount_approved, 0) - nz(self.amount_transferred, 0) - nz(self.amount_lapsed, 0)
def generate_consultation_report(orgs, sectors, statuses, from_date, to_date, entry_note_types, entry_note_statuses, org_regions, entry_regions): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'temp') target_file = "temp_data_export_{}.xlsx".format( timezone.now().strftime("%Y-%m-%d")) target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'temp', target_file) # first, filter out the "none" placeholder if sectors == "None": sectors = None if orgs == "None": orgs = None if statuses == "None": statuses = None if from_date == "None": from_date = None if to_date == "None": to_date = None if org_regions == "None": org_regions = None if entry_regions == "None": entry_regions = None if entry_note_types == "None": entry_note_types = None else: entry_note_types = [int(i) for i in entry_note_types.split(",") ] if entry_note_types else None if entry_note_statuses == "None": entry_note_statuses = None else: entry_note_statuses = [int(i) for i in entry_note_statuses.split(",") ] if entry_note_statuses else None # get an entry list for the fiscal year (if any) entry_list = models.Entry.objects.filter( entry_type__name__icontains="consultation").order_by( "status", "-initial_date") if org_regions: # we have to refine the queryset to only the selected orgs region_list = [ Region.objects.get(pk=int(o)) for o in org_regions.split(",") ] entry_list = entry_list.filter(organizations__regions__in=region_list) if entry_regions: # we have to refine the queryset to only the selected orgs region_list = [ Region.objects.get(pk=int(o)) for o in entry_regions.split(",") ] entry_list = entry_list.filter(regions__in=region_list) if orgs: # we have to refine the queryset to only the selected orgs org_list = [ ml_models.Organization.objects.get(pk=int(o)) for o in orgs.split(",") ] entry_list = entry_list.filter(organizations__in=org_list) if sectors: # we have to refine the queryset to only the selected sectors sector_list = [ ml_models.Sector.objects.get(pk=int(s)) for s in sectors.split(",") ] entry_list = entry_list.filter(sectors__in=sector_list) if statuses: # we have to refine the queryset to only the selected statuses status_list = [ models.Status.objects.get(pk=int(o)) for o in statuses.split(",") ] entry_list = entry_list.filter(status__in=status_list) if from_date or to_date: id_list = [] d0_start = datetime.strptime(from_date, "%Y-%m-%d").replace( tzinfo=timezone.get_current_timezone()) if from_date else None d0_end = datetime.strptime(to_date, "%Y-%m-%d").replace( tzinfo=timezone.get_current_timezone()) if to_date else None for e in entry_list: d1_start = e.initial_date d1_end = e.anticipated_end_date if get_date_range_overlap(d0_start, d0_end, d1_start, d1_end) > 0: id_list.append(e.id) entry_list = entry_list.filter(id__in=id_list) entry_list.distinct() workbook = xlsxwriter.Workbook(target_file_path) # create formatting title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', 'bg_color': '#a6cbf5', "align": 'normal', "text_wrap": True, "valign": 'top', }) normal_format = workbook.add_format({ "align": 'left', "text_wrap": True, 'num_format': 'mm/dd/yyyy', "valign": 'top', }) highlighted_format = workbook.add_format({ "align": 'left', "text_wrap": True, 'num_format': 'mm/dd/yyyy', "valign": 'top', "bg_color": "yellow" }) # we want a sheet for every sector sector_ids = [] for e in entry_list: for s in e.sectors.all(): sector_ids.append(s.id) sectors = ml_models.Sector.objects.filter(id__in=sector_ids) # there is a problem: some of the sectors have duplicate names, especially when truncated.. for s in sectors: sector_name = truncate(s.name, 30, False) if sectors.filter(name__icontains=sector_name).count() > 1: sector_name = truncate(s.name, 25, False) + f" ({s.id})" my_ws = workbook.add_worksheet(name=sector_name) entries = s.entries.filter(id__in=[e.id for e in entry_list]) # define the header header = [ "Title", "Organizations", "Status", "Persons/lead", "DFO programs involved", "letter sent", "Response Requested by", "Proponent", "FAA triggered (Yes/No)", "Comments", ] # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] my_ws.write_row(0, 0, header, header_format) i = 1 for e in entries.all(): people = nz(listrify([p for p in e.people.all()], "\n\n"), "") notes = "" if e.notes.exists(): for n in e.notes.all(): if not entry_note_types or (n.type in entry_note_types): if not entry_note_statuses or (n.status_id in entry_note_statuses): if len(notes): notes += "\n\n*************************\n" + str( n) else: notes = str(n) data_row = [ e.title, e.orgs_str, str(e.status), people, e.sectors_str, e.initial_date.strftime("%m/%d/%Y") if e.initial_date else " ---", e.response_requested_by.strftime("%m/%d/%Y") if e.response_requested_by else " ---", e.proponent, yesno(e.is_faa_required, "yes,no,no"), notes.replace("\\r\\n", "\r\n"), ] # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value j = 0 for d in data_row: # if new value > stored value... replace stored value if len(str(d)) > col_max[j]: if len(str(d)) < 75: col_max[j] = len(str(d)) else: col_max[j] = 75 j += 1 format = normal_format if e.sectors.count() > 1: format = highlighted_format my_ws.write_row(i, 0, data_row, format) i += 1 # set column widths for j in range(0, len(col_max)): my_ws.set_column(j, j, width=col_max[j] * 1.1) workbook.close() return target_url
def generate_consultation_log_spreadsheet(orgs, sectors, statuses, entry_types, report_title, from_date, to_date, entry_note_types, entry_note_statuses): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'ihub', 'temp') target_file = "temp_data_export_{}.xlsx".format( timezone.now().strftime("%Y-%m-%d")) target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'ihub', 'temp', target_file) # first, filter out the "none" placeholder if sectors == "None": sectors = None if orgs == "None": orgs = None if statuses == "None": statuses = None if entry_types == "None": entry_types = None if from_date == "None": from_date = None if to_date == "None": to_date = None if entry_note_types == "None": entry_note_types = None else: entry_note_types = [int(i) for i in entry_note_types.split(",") ] if entry_note_types else None if entry_note_statuses == "None": entry_note_statuses = None else: entry_note_statuses = [int(i) for i in entry_note_statuses.split(",") ] if entry_note_statuses else None # get an entry list for the fiscal year (if any) entry_list = models.Entry.objects.all().order_by("status", "-initial_date") if orgs: # we have to refine the queryset to only the selected orgs org_list = [ ml_models.Organization.objects.get(pk=int(o)) for o in orgs.split(",") ] entry_list = entry_list.filter(organizations__in=org_list) if sectors: # we have to refine the queryset to only the selected sectors sector_list = [ ml_models.Sector.objects.get(pk=int(s)) for s in sectors.split(",") ] entry_list = entry_list.filter(sectors__in=sector_list) if statuses: # we have to refine the queryset to only the selected statuses status_list = [ models.Status.objects.get(pk=int(o)) for o in statuses.split(",") ] entry_list = entry_list.filter(status__in=status_list) if entry_types: # we have to refine the queryset to only the selected orgs entry_type_list = [ models.EntryType.objects.get(pk=int(o)) for o in entry_types.split(",") ] entry_list = entry_list.filter(entry_type__in=entry_type_list) if from_date or to_date: id_list = [] d0_start = datetime.strptime(from_date, "%Y-%m-%d").replace( tzinfo=timezone.get_current_timezone()) if from_date else None d0_end = datetime.strptime(to_date, "%Y-%m-%d").replace( tzinfo=timezone.get_current_timezone()) if to_date else None for e in entry_list: d1_start = e.initial_date d1_end = e.anticipated_end_date if get_date_range_overlap(d0_start, d0_end, d1_start, d1_end) > 0: id_list.append(e.id) entry_list = entry_list.filter(id__in=id_list) entry_list.distinct() # create workbook and worksheets workbook = xlsxwriter.Workbook(target_file_path) # create formatting title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', 'bg_color': '#a6cbf5', "align": 'normal', "text_wrap": True, "valign": 'top', }) normal_format = workbook.add_format({ "align": 'left', "text_wrap": True, 'num_format': 'mm/dd/yyyy', "valign": 'top', }) # define the header header = [ "Project title", "Proponent", "Location", "Type if interaction", "Indigenous Group(s)", "Date offer to Consult", "Departments Involved\n(Prov & Fed)", "Project Status/ Correspondence / Notes", "Follow-up Actions required", ] my_ws = workbook.add_worksheet(name="report") # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] my_ws.write(0, 0, report_title, title_format) my_ws.write_row(2, 0, header, header_format) i = 3 for e in entry_list.all(): sectors = f"\n\nDFO SECTORS: {e.sectors_str,}" people = nz(listrify([p for p in e.people.all()], "\n\n"), "") + nz( sectors, "") other_notes = "Overall status: {}".format(e.status) if e.other_notes.count() > 0: for n in e.other_notes.all(): if not entry_note_types or (n.type in entry_note_types): if not entry_note_statuses or (n.status_id in entry_note_statuses): other_notes += "\n\n*************************\n" + str( n) followups = "" for n in e.followups.all(): if len(followups) == 0: followups = str(n) else: followups += "\n\n*************************\n" + str(n) data_row = [ e.title, e.proponent, nz(e.location, "----"), str(e.entry_type), e.orgs_str, e.initial_date.strftime("%m/%d/%Y") if e.initial_date else "n/a", people, other_notes.replace("\\r\\n", "\r\n"), followups.replace("\\r\\n", "\r\n") if followups else "", ] # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value j = 0 for d in data_row: # if new value > stored value... replace stored value if len(str(d)) > col_max[j]: if len(str(d)) < 75: col_max[j] = len(str(d)) else: col_max[j] = 75 j += 1 my_ws.write_row(i, 0, data_row, normal_format) i += 1 # set column widths for j in range(0, len(col_max)): my_ws.set_column(j, j, width=col_max[j] * 1.1) workbook.close() return target_url
def generate_summary_spreadsheet(orgs, sectors, from_date, to_date, entry_note_types, entry_note_statuses): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'ihub', 'temp') target_file = "temp_data_export_{}.xlsx".format( timezone.now().strftime("%Y-%m-%d")) target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'ihub', 'temp', target_file) # create workbook and worksheets workbook = xlsxwriter.Workbook(target_file_path) # create formatting title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', 'bg_color': '#D6D1C0', "align": 'normal', "text_wrap": True }) total_format = workbook.add_format({ 'bold': True, "align": 'left', "text_wrap": True, 'num_format': '$#,##0' }) normal_format = workbook.add_format({ "align": 'left', "text_wrap": True, 'num_format': '$#,##0' }) # first, filter out the "none" placeholder if sectors == "None": sectors = None if orgs == "None": orgs = None if from_date == "None": from_date = None if to_date == "None": to_date = None if entry_note_types == "None": entry_note_types = None else: entry_note_types = [int(i) for i in entry_note_types.split(",") ] if entry_note_types else None if entry_note_statuses == "None": entry_note_statuses = None else: entry_note_statuses = [int(i) for i in entry_note_statuses.split(",") ] if entry_note_statuses else None # build an entry list: entry_list = models.Entry.objects.all() if sectors: # we have to refine the queryset to only the selected sectors sector_list = [ ml_models.Sector.objects.get(pk=int(s)) for s in sectors.split(",") ] entry_list = entry_list.filter(sectors__in=sector_list) if orgs: # we have to refine the queryset to only the selected orgs org_list = [ ml_models.Organization.objects.get(pk=int(o)) for o in orgs.split(",") ] entry_list = entry_list.filter(organizations__in=org_list) else: # if no orgs were passed in to the report, we need to make an org list based on the orgs in the entries # this org_list will serve as basis for spreadsheet tabs org_id_list = list( set([ org.id for entry in entry_list for org in entry.organizations.all() ])) org_list = ml_models.Organization.objects.filter( id__in=org_id_list).order_by("abbrev") if from_date or to_date: id_list = [] d0_start = datetime.strptime(from_date, "%Y-%m-%d").replace( tzinfo=timezone.get_current_timezone()) if from_date else None d0_end = datetime.strptime(to_date, "%Y-%m-%d").replace( tzinfo=timezone.get_current_timezone()) if to_date else None for e in entry_list: d1_start = e.initial_date d1_end = e.anticipated_end_date if get_date_range_overlap(d0_start, d0_end, d1_start, d1_end) > 0: id_list.append(e.id) entry_list = entry_list.filter(id__in=id_list) entry_list.distinct() # define the header header = [ get_verbose_label(entry_list.first(), 'fiscal_year'), get_verbose_label(entry_list.first(), 'title'), get_verbose_label(entry_list.first(), 'organizations'), get_verbose_label(entry_list.first(), 'status'), get_verbose_label(entry_list.first(), 'sectors'), get_verbose_label(entry_list.first(), 'entry_type'), get_verbose_label(entry_list.first(), 'initial_date'), get_verbose_label(entry_list.first(), 'anticipated_end_date'), get_verbose_label(entry_list.first(), 'regions'), _("DFO Contacts"), _("Notes"), get_verbose_label(entry_list.first(), 'funding_program'), get_verbose_label(entry_list.first(), 'funding_needed'), get_verbose_label(entry_list.first(), 'funding_purpose'), get_verbose_label(entry_list.first(), 'amount_requested'), get_verbose_label(entry_list.first(), 'amount_approved'), get_verbose_label(entry_list.first(), 'amount_transferred'), get_verbose_label(entry_list.first(), 'amount_lapsed'), _("Amount outstanding"), ] # worksheets # ############## # each org should be represented on a separate worksheet # therefore determine an appropriate org list org_counter = 0 for org in org_list: org_abbrev = slugify( org.abbrev) if org.abbrev else f"missing_abbrev_{org_counter}" org_counter += 1 my_ws = workbook.add_worksheet(name=org_abbrev) # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] my_ws.write(0, 0, str(org), title_format) my_ws.write_row(2, 0, header, header_format) tot_requested = 0 tot_approved = 0 tot_transferred = 0 tot_lapsed = 0 tot_outstanding = 0 i = 3 for e in entry_list.filter(organizations=org): if e.organizations.count() > 0: orgs = str([str(obj) for obj in e.organizations.all() ]).replace("[", "").replace("]", "").replace( "'", "").replace('"', "").replace(', ', "\n") else: orgs = None if e.people.count() > 0: people = str([ "{} - {} ({})".format(obj.get_role_display(), obj, obj.organization) for obj in e.people.all() ]).replace("[", "").replace("]", "").replace("'", "").replace( '"', "").replace(', ', "\n") else: people = None note_qry = e.notes.all() if note_qry.count() > 0: notes = "" count = 0 max_count = note_qry.count() for obj in note_qry: if not entry_note_types or (obj.type in entry_note_types): if not entry_note_statuses or (obj.status_id in entry_note_statuses): notes += "{} - {} [STATUS: {}] (Created by {} {} on {})\n".format( obj.get_type_display().upper(), obj.note, obj.status, obj.author.first_name if obj.author else "", obj.author.last_name if obj.author else "", obj.creation_date.strftime("%Y-%m-%d"), ) if not count == max_count: notes += "\n" else: notes = None if e.sectors.count() > 0: sectors = str([str(obj) for obj in e.sectors.all() ]).replace("[", "").replace("]", "").replace( "'", "").replace('"', "").replace(', ', "\n") else: sectors = None if e.regions.count() > 0: regions = str([str(obj) for obj in e.regions.all()]).replace( "[", "").replace("]", "").replace("'", "").replace('"', "") else: regions = None data_row = [ e.fiscal_year, e.title, orgs, str(e.status), sectors, str(e.entry_type), e.initial_date.strftime("%Y-%m-%d") if e.initial_date else "n/a", e.anticipated_end_date.strftime("%Y-%m-%d") if e.anticipated_end_date else "", regions, people, notes, nz(str(e.funding_program), ""), yesno(e.funding_needed), nz(str(e.funding_purpose), ""), nz(e.amount_requested, 0), nz(e.amount_approved, 0), nz(e.amount_transferred, 0), nz(e.amount_lapsed, 0), nz(e.amount_outstanding, 0), ] tot_requested += nz(e.amount_requested, 0) tot_approved += nz(e.amount_approved, 0) tot_transferred += nz(e.amount_transferred, 0) tot_lapsed += nz(e.amount_lapsed, 0) tot_outstanding += nz(e.amount_outstanding, 0) # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value j = 0 for d in data_row: # if new value > stored value... replace stored value if len(str(d)) > col_max[j]: if len(str(d)) < 75: col_max[j] = len(str(d)) else: col_max[j] = 75 j += 1 my_ws.write_row(i, 0, data_row, normal_format) i += 1 # set column widths for j in range(0, len(col_max)): my_ws.set_column(j, j, width=col_max[j] * 1.1) # sum all the currency columns total_row = [ _("GRAND TOTAL:"), tot_requested, tot_approved, tot_transferred, tot_lapsed, tot_outstanding, ] try: my_ws.write_row(i + 2, header.index(_("Funding requested")) - 1, total_row, total_format) # set formatting for status for status in models.Status.objects.all(): my_ws.conditional_format( 0, header.index(_("status").title()), i, header.index(_("status").title()), { 'type': 'cell', 'criteria': 'equal to', 'value': '"{}"'.format(status.name), 'format': workbook.add_format({ 'bg_color': status.color, }), }) # set formatting for entry type for entry_type in models.EntryType.objects.all(): my_ws.conditional_format( 0, header.index(_("Entry Type").title()), i, header.index(_("Entry Type").title()), { 'type': 'cell', 'criteria': 'equal to', 'value': '"{}"'.format(entry_type.name), 'format': workbook.add_format({ 'bg_color': entry_type.color, }), }) except: print("problem with summary row") i += 1 workbook.close() return target_url
def generate_consultation_log_spreadsheet(fy, orgs, statuses, entry_types, report_title): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'ihub', 'temp') target_file = "temp_data_export_{}.xlsx".format( timezone.now().strftime("%Y-%m-%d")) target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'ihub', 'temp', target_file) # first, filter out the "none" placeholder if fy == "None": fy = None if orgs == "None": orgs = None if statuses == "None": statuses = None if entry_types == "None": entry_types = None # get an entry list for the fiscal year (if any) entry_list = models.Entry.objects.all().order_by("sectors", "status", "-initial_date") if fy: entry_list = models.Entry.objects.filter(fiscal_year=fy) if orgs: # we have to refine the queryset to only the selected orgs org_list = [ ml_models.Organization.objects.get(pk=int(o)) for o in orgs.split(",") ] # create the species query object: Q q_objects = Q() # Create an empty Q object to start with for o in org_list: q_objects |= Q(organizations=o) # 'or' the Q objects together # apply the filter entry_list = entry_list.filter(q_objects) if statuses: # we have to refine the queryset to only the selected orgs status_list = [ models.Status.objects.get(pk=int(o)) for o in statuses.split(",") ] # create the species query object: Q q_objects = Q() # Create an empty Q object to start with for o in status_list: q_objects |= Q(status=o) # 'or' the Q objects together # apply the filter entry_list = entry_list.filter(q_objects) if entry_types: # we have to refine the queryset to only the selected orgs entry_type_list = [ models.EntryType.objects.get(pk=int(o)) for o in entry_types.split(",") ] entry_list = entry_list.filter(entry_type__in=entry_type_list) # create workbook and worksheets workbook = xlsxwriter.Workbook(target_file_path) # create formatting title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', 'bg_color': '#a6cbf5', "align": 'normal', "text_wrap": True, "valign": 'top', }) normal_format = workbook.add_format({ "align": 'left', "text_wrap": True, 'num_format': 'mm/dd/yyyy', "valign": 'top', }) # define the header header = [ "Project / Location", "Proponent", "Date offer to Consult", "Departments Involved\n(Prov & Fed)", "Project Status/ Correspondence / Notes", "Follow-up Actions required", ] my_ws = workbook.add_worksheet(name="report") # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] my_ws.write(0, 0, report_title, title_format) my_ws.write_row(2, 0, header, header_format) i = 3 for e in entry_list.all(): col_1 = "TITLE: {}\n\nTYPE: {}\n\nLOCATION: {}".format( e.title, e.entry_type, nz(e.location, "----"), ) col_2 = "ORGANIZATIONS: {}\n\nDFO SECTORS: {}".format( e.orgs_str, e.sectors_str, ) people = listrify([p for p in e.people.all()], "\n") other_notes = "Overall status: {}".format(e.status) if e.other_notes.count() > 0: for n in e.other_notes.all(): other_notes += "\n\n*************************\n" + str(n) followups = "" for n in e.followups.all(): if len(followups) == 0: followups = str(n) else: followups += "\n\n*************************\n" + str(n) data_row = [ col_1, col_2, e.initial_date.strftime("%m/%d/%Y") if e.initial_date else "n/a", people, other_notes.replace("\\r\\n", "\r\n"), followups.replace("\\r\\n", "\r\n") if followups else "", ] # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value j = 0 for d in data_row: # if new value > stored value... replace stored value if len(str(d)) > col_max[j]: if len(str(d)) < 75: col_max[j] = len(str(d)) else: col_max[j] = 75 j += 1 my_ws.write_row(i, 0, data_row, normal_format) i += 1 # set column widths for j in range(0, len(col_max)): my_ws.set_column(j, j, width=col_max[j] * 1.1) workbook.close() return target_url
def generate_capacity_spreadsheet(fy, orgs, sectors): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'ihub', 'temp') target_file = "temp_data_export_{}.xlsx".format( timezone.now().strftime("%Y-%m-%d")) target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'ihub', 'temp', target_file) # create workbook and worksheets workbook = xlsxwriter.Workbook(target_file_path) # create formatting title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', 'bg_color': '#D6D1C0', "align": 'normal', "text_wrap": True }) total_format = workbook.add_format({ 'bold': True, "align": 'left', "text_wrap": True, 'num_format': '$#,##0' }) normal_format = workbook.add_format({ "align": 'left', "text_wrap": True, 'num_format': '$#,##0' }) # first, filter out the "none" placeholder if fy == "None": fy = None if orgs == "None": orgs = None if sectors == "None": sectors = None # build an entry list: entry_list = models.Entry.objects.all() if fy: entry_list = models.Entry.objects.filter(fiscal_year=fy) if sectors: # we have to refine the queryset to only the selected sectors sector_list = [ ml_models.Sector.objects.get(pk=int(s)) for s in sectors.split(",") ] entry_list = entry_list.filter(sectors__in=sector_list) if orgs: # we have to refine the queryset to only the selected orgs org_list = [ ml_models.Organization.objects.get(pk=int(o)) for o in orgs.split(",") ] entry_list = entry_list.filter(organizations__in=org_list) else: # if no orgs were passed in to the report, we need to make an org list based on the orgs in the entries # this org_list will serve as basis for spreadsheet tabs org_id_list = list( set([ org.id for entry in entry_list for org in entry.organizations.all() ])) org_list = ml_models.Organization.objects.filter( id__in=org_id_list).order_by("abbrev") # define the header header = [ get_verbose_label(entry_list.first(), 'fiscal_year'), get_verbose_label(entry_list.first(), 'title'), get_verbose_label(entry_list.first(), 'organizations'), get_verbose_label(entry_list.first(), 'status'), get_verbose_label(entry_list.first(), 'sectors'), get_verbose_label(entry_list.first(), 'entry_type'), get_verbose_label(entry_list.first(), 'initial_date'), get_verbose_label(entry_list.first(), 'anticipated_end_date'), get_verbose_label(entry_list.first(), 'regions'), get_verbose_label(entry_list.first(), 'funding_program'), get_verbose_label(entry_list.first(), 'funding_needed'), get_verbose_label(entry_list.first(), 'funding_purpose'), get_verbose_label(entry_list.first(), 'amount_requested'), get_verbose_label(entry_list.first(), 'amount_approved'), get_verbose_label(entry_list.first(), 'amount_transferred'), get_verbose_label(entry_list.first(), 'amount_lapsed'), _("Amount outstanding"), ] # worksheets # ############## for org in org_list: my_ws = workbook.add_worksheet(name=org.abbrev) # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] my_ws.write(0, 0, str(org), title_format) my_ws.write_row(2, 0, header, header_format) tot_requested = 0 tot_approved = 0 tot_transferred = 0 tot_lapsed = 0 tot_outstanding = 0 i = 3 for e in entry_list.filter(organizations=org): if e.organizations.count() > 0: orgs = str([str(obj) for obj in e.organizations.all() ]).replace("[", "").replace("]", "").replace( "'", "").replace('"', "") else: orgs = None if e.sectors.count() > 0: sectors = str([str(obj) for obj in e.sectors.all()]).replace( "[", "").replace("]", "").replace("'", "").replace('"', "") else: sectors = None if e.regions.count() > 0: regions = str([str(obj) for obj in e.regions.all()]).replace( "[", "").replace("]", "").replace("'", "").replace('"', "") else: regions = None data_row = [ e.fiscal_year, e.title, orgs, str(e.status), sectors, str(e.entry_type), e.initial_date.strftime("%Y-%m-%d") if e.initial_date else "n/a", e.anticipated_end_date.strftime("%Y-%m-%d") if e.anticipated_end_date else "", regions, nz(str(e.funding_program), ""), nz(str(e.funding_needed), ""), nz(str(e.funding_purpose), ""), nz(e.amount_requested, 0), nz(e.amount_approved, 0), nz(e.amount_transferred, 0), nz(e.amount_lapsed, 0), nz(e.amount_outstanding, 0), ] tot_requested += nz(e.amount_requested, 0) tot_approved += nz(e.amount_approved, 0) tot_transferred += nz(e.amount_transferred, 0) tot_lapsed += nz(e.amount_lapsed, 0) tot_outstanding += nz(e.amount_outstanding, 0) # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value j = 0 for d in data_row: # if new value > stored value... replace stored value if len(str(d)) > col_max[j]: if len(str(d)) < 100: col_max[j] = len(str(d)) else: col_max[j] = 100 j += 1 my_ws.write_row(i, 0, data_row, normal_format) i += 1 # set column widths for j in range(0, len(col_max)): my_ws.set_column(j, j, width=col_max[j] * 1.1) # sum all the currency columns total_row = [ _("GRAND TOTAL:"), tot_requested, tot_approved, tot_transferred, tot_lapsed, tot_outstanding, ] try: my_ws.write_row(i + 2, header.index(_("Funding requested")) - 1, total_row, total_format) # set formatting for status for status in models.Status.objects.all(): my_ws.conditional_format( 0, header.index(_("status").title()), i, header.index(_("status").title()), { 'type': 'cell', 'criteria': 'equal to', 'value': '"{}"'.format(status.name), 'format': workbook.add_format({ 'bg_color': status.color, }), }) # set formatting for entry type for entry_type in models.EntryType.objects.all(): my_ws.conditional_format( 0, header.index(_("Entry Type").title()), i, header.index(_("Entry Type").title()), { 'type': 'cell', 'criteria': 'equal to', 'value': '"{}"'.format(entry_type.name), 'format': workbook.add_format({ 'bg_color': entry_type.color, }), }) except: print("problem with summary row") workbook.close() return target_url
def form_valid(self, form): sectors = listrify(form.cleaned_data["sectors"]) orgs = listrify(form.cleaned_data["organizations"]) orgs_w_consultation_instructions = listrify(form.cleaned_data["orgs_w_consultation_instructions"]) statuses = listrify(form.cleaned_data["statuses"]) entry_types = listrify(form.cleaned_data["entry_types"]) org = int(nz(form.cleaned_data["single_org"]), 0) fy = str(form.cleaned_data["fiscal_year"]) report_title = str(form.cleaned_data["report_title"]) report = int(form.cleaned_data["report"]) if report == 1: return HttpResponseRedirect(reverse("ihub:capacity_xlsx", kwargs= { "fy": nz(fy, "None"), "orgs": nz(orgs, "None"), "sectors": nz(sectors, "None"), })) elif report == 2: return HttpResponseRedirect(reverse("ihub:report_q", kwargs={"org": org})) elif report == 3: return HttpResponseRedirect(reverse("ihub:summary_xlsx", kwargs= { "fy": nz(fy, "None"), "orgs": nz(orgs, "None"), "sectors": nz(sectors, "None"), })) elif report == 4: return HttpResponseRedirect(reverse("ihub:summary_pdf", kwargs= { "fy": nz(fy, "None"), "orgs": nz(orgs, "None"), "sectors": nz(sectors, "None"), })) elif report == 5: return HttpResponseRedirect(reverse("ihub:consultation_log", kwargs= { "fy": nz(fy, "None"), "orgs": nz(orgs, "None"), "statuses": nz(statuses, "None"), "entry_types": nz(entry_types, "None"), "report_title": nz(report_title, "None"), })) elif report == 6: return HttpResponseRedirect(reverse("ihub:consultation_log_xlsx", kwargs= { "fy": nz(fy, "None"), "orgs": nz(orgs, "None"), "statuses": nz(statuses, "None"), "entry_types": nz(entry_types, "None"), "report_title": nz(report_title, "None"), })) elif report == 7: return HttpResponseRedirect( f'{reverse("ihub:consultation_instructions_pdf")}?orgs={orgs_w_consultation_instructions}' ) elif report == 8: return HttpResponseRedirect( f'{reverse("ihub:consultation_instructions_xlsx")}?orgs={orgs_w_consultation_instructions}' ) else: messages.error(self.request, "Report is not available. Please select another report.") return HttpResponseRedirect(reverse("ihub:report_search"))
def generate_open_data_ver_1_report(year=None): """ This is a view designed for FGP / open maps view. :param year: int :return: http response """ # determine the filename based on whether we are looking at all years vs. a single year filename = "biofouling_monitoring_report_{}.csv".format( year ) if year and year != "None" else "biofouling_monitoring_report_all_years.csv" # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="{}"'.format( filename) response.write(u'\ufeff'.encode( 'utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly) writer = csv.writer(response) # Botrylloïdes violaceus, Botryllus shlosseri, Caprella mutica, Ciona intestinalis, Codium fragile, Membranipora membranacea, Styela clava species_id_list = [48, 24, 47, 23, 55, 59, 25] species_qs = models.Species.objects.filter(id__in=species_id_list) header_row = [ 'Sampling year', 'Station code', 'Station name', 'Date in', 'Date out', 'Weeks', 'Station Description', 'Collector Latitude', 'Collector Longitude', 'Collector ID', 'Surface Type', 'Surface ID', 'Probe Type', 'Probe Sample Date/Time', 'Probe Depth', 'Temperture C', 'Sal ppt', 'O2 percent', 'O2 mg-l', 'SpCond - mS', 'Spc - mS', 'pH', 'Turbidity', 'Weather Notes', 'Samplers', 'Other species', ] for species in species_qs: first_name = species.scientific_name.split(" ")[0][:1].upper() if len(species.scientific_name.split(" ")) > 2: second_name = " ".join(species.scientific_name.split(" ")[1:]) else: second_name = species.scientific_name.split(" ")[1] display_name = "{}. {}".format( first_name, second_name, ) header_row.append("{} % cover".format(display_name)) # if species id is 24 or 48, we want color morph notes as well if species.id in [24, 48]: header_row.append("{} Color Notes".format(display_name)) writer.writerow(header_row) samples = models.Sample.objects.all() # if there is a year provided, filter by only this year print(year) if year and year != "None": samples = samples.filter(season=year) # make sure to exclude the lost lines and surfaces; this is sort of redundant since if a line is line, all surfaces should also be labelled as lost. surfaces = models.Surface.objects.filter( line__sample_id__in=[ obj["id"] for obj in samples.order_by("id").values("id").distinct() ], line__is_lost=False, is_lost=False, ).order_by("line__sample__date_deployed") for surface in surfaces: # Try getting hold of the last probe sample taken my_probe = surface.line.sample.probe_data.order_by("time_date").last() if my_probe: probe = my_probe.probe time_date = my_probe.time_date.strftime("%Y-%m-%d %H:%M") probe_depth = my_probe.probe_depth temp_c = my_probe.temp_c sal = my_probe.sal_ppt o2p = my_probe.o2_percent o2m = my_probe.o2_mgl spcond = my_probe.sp_cond_ms spc = my_probe.spc_ms ph = my_probe.ph turb = my_probe.turbidity weather = my_probe.weather_notes else: probe = time_date = probe_depth = temp_c = sal = o2p = o2m = spcond = spc = ph = turb = weather = None # summarize all of the samplers samplers = listrify([ "{} ({})".format(obj, obj.organization) for obj in surface.line.sample.samplers.all() ]) # summarize all of the "other" species other_spp = listrify([ sp.name_plaintext for sp in surface.species.all() if sp.id not in species_id_list ]) data_row = [ surface.line.sample.season, surface.line.sample.station_id, surface.line.sample.station, surface.line.sample.date_deployed.strftime("%Y-%m-%d"), surface.line.sample.date_retrieved.strftime("%Y-%m-%d") if surface.line.sample.date_retrieved else None, surface.line.sample.weeks_deployed, surface.line.sample.station.site_desc, surface.line.latitude_n, surface.line.longitude_w, surface.line.collector, surface.get_surface_type_display(), surface.label, probe, time_date, probe_depth, temp_c, sal, o2p, o2m, spcond, spc, ph, turb, weather, samplers, other_spp, ] for species in species_qs: try: data_row.append( floatformat( nz( models.SurfaceSpecies.objects.get( species=species, surface=surface).percent_coverage, 0) * 100, 0)) except models.SurfaceSpecies.DoesNotExist: data_row.append(0) # if species id is 24 or 48, we want color morph notes as well if species.id in [24, 48]: try: data_row.append( models.SurfaceSpecies.objects.get( species=species, surface=surface).notes) except models.SurfaceSpecies.DoesNotExist: data_row.append(None) writer.writerow(data_row) return response
def generate_hlog(year): # grab a list of all samples for the year sample_list = [ s for s in models.Sample.objects.filter( season=year).order_by("sample_date") ] # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response[ 'Content-Disposition'] = 'attachment; filename="hlog{}.csv"'.format( year) response.write(u'\ufeff'.encode( 'utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly) writer = csv.writer(response) # these files have no headers so we jump straight into the date # here is where things get tricky.. each row should consist of 10 columns of data + metadata (5 cols) # lets define a few custom functions: # we will have to turn this into a fixed width padding_lengths = [ 5, 2, 2, 4, 20, 15, 6, 3, 3, 3, 3, 20, 4, 4, 6, 6, 6, 2, 8, 3, 3, 10, 15, 85, 4, 4, 7, 7 ] for sample in sample_list: # a) sample col_a = str(sample.id).rjust(padding_lengths[0]) # b) day col_b = str(nz(sample.sample_date.day, "")).rjust(padding_lengths[1]) # c) month col_c = str(nz(sample.sample_date.month, "")).rjust(padding_lengths[2]) # d) year col_d = str(nz(sample.sample_date.year, "")).rjust(padding_lengths[3]) # e) maps to PORT_NAME but will also contain survey ID from new database if sample.survey_id: my_var = sample.survey_id elif sample.port_id: if sample.port.alias_wharf_name: my_var = sample.port.alias_wharf_name else: my_var = "UNKNOWN" else: my_var = "" col_e = str(my_var).rjust(padding_lengths[4]) # f) sampler name (text) if sample.sampler: # if there is a missing first or last name if not sample.sampler.first_name or not sample.sampler.last_name: sampler = "{}{}".format(nz(sample.sampler.first_name, ""), nz(sample.sampler.last_name, "")) else: sampler = "{}. {}".format( sample.sampler.first_name.upper()[:1], sample.sampler.last_name.upper()) else: sampler = "" col_f = str(sampler).rjust(padding_lengths[5]) # g) sampler's ref number col_g = str(nz(sample.sampler_ref_number, "")).rjust(padding_lengths[6]) # h) number measured?? col_h = str(nz(sample.total_fish_measured, "")).rjust(padding_lengths[7]) # i) number kept col_i = str(nz(sample.total_fish_preserved, "")).rjust(padding_lengths[8]) # j) NAFO code if sample.fishing_area: nafo_code = sample.fishing_area.nafo_area_code else: nafo_code = "" col_j = str(nafo_code).rjust(padding_lengths[9]) # k) district id; maps to PORT_CODE in oracle db ** can also be research code # if it is experimental, we assign a research code if sample.experimental_net_used: # if gear is OTM, rc = 901 if sample.gear_id == 26: my_var = 901 # if gear is OTB, rc = 905 elif sample.gear_id == 25: my_var = 905 # if gear is GNS, rc = 908 elif sample.gear_id == 2: my_var = 908 # otherwise default to 908. else: my_var = 999 # if there is a port, we give a district number (i.e. province_code + district_code else: if sample.port: my_var = "{}{}".format(sample.port.province_code, sample.port.district_code) else: my_var = "" col_k = str(nz(my_var, "")).rjust(padding_lengths[10]) # l) cfvn col_l = str(nz(sample.vessel_cfvn, "")).rjust(padding_lengths[11]) # m) gear code (str) if sample.gear: gear_code = sample.gear.gear_code if sample.experimental_net_used: gear_code = gear_code + "*" else: gear_code = "" col_m = str(gear_code).rjust(padding_lengths[12]) # n) mesh size (float) if sample.mesh_size: mesh_size = "{:.2f}".format(sample.mesh_size.size_inches_decimal) else: mesh_size = "" col_n = str(mesh_size).rjust(padding_lengths[13]) # o) lat if sample.latitude_n: my_var = sample.latitude_n[:6] else: my_var = "" col_o = str(nz(my_var, "")).rjust(padding_lengths[14]) # p) long if sample.longitude_w: my_var = sample.longitude_w[:6] else: my_var = "" col_p = str(nz(my_var, "")).rjust(padding_lengths[15]) # q) landed wt. if sample.catch_weight_lbs: catch_wt = int(sample.catch_weight_lbs) else: catch_wt = "" col_q = str(catch_wt).rjust(padding_lengths[16]) # r) sampling protocol if sample.type == 2: # sea sample protocol = 8 else: # port sample if sample.experimental_net_used: # mesh selectivity protocol = 2 else: # vanilla port sampling protocol = 1 col_r = str(protocol).rjust(padding_lengths[17]) # s) blank col_s = str("").rjust(padding_lengths[18]) # t) length frequency bins col_t = str(0.5).rjust(padding_lengths[19]) # u) number processed col_u = str(nz(sample.total_fish_preserved, "")).rjust(padding_lengths[20]) # v) date processed col_v = str("").rjust(padding_lengths[21]) # w) ager name col_w = str("").rjust(padding_lengths[22]) # x) comment col_x = str("").rjust(padding_lengths[23]) # y) blank col_y = str("").rjust(padding_lengths[24]) # z) maps to WHARF_CODE in oracle db if sample.port: if sample.port.alias_wharf_id: my_var = sample.port.alias_wharf_id else: my_var = "" else: my_var = "" col_z = str(my_var).rjust(padding_lengths[25]) # aa) blank col_aa = str("").rjust(padding_lengths[26]) # ab) blank col_ab = str("").rjust(padding_lengths[27]) writer.writerow([ col_a, col_b, col_c, col_d, col_e, col_f, col_g, col_h, col_i, col_j, col_k, col_l, col_m, col_n, col_o, col_p, col_q, col_r, col_s, col_t, col_u, col_v, col_w, col_x, col_y, col_z, col_aa, col_ab, ]) return response
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) fy = shared_models.FiscalYear.objects.get(pk=self.kwargs['fiscal_year']) context["fiscal_year"] = fy rc = shared_models.ResponsibilityCenter.objects.get(pk=self.kwargs['rc']) context["rc"] = rc project_list = [shared_models.Project.objects.get(pk=rc["project"]) for rc in models.Transaction.objects.filter(fiscal_year=fy).filter( responsibility_center=rc.id).values( "project").order_by("project").distinct() if rc["project"] is not None] context["project_list"] = project_list ac_list = [shared_models.AllotmentCode.objects.get(pk=t["allotment_code"]) for t in models.Transaction.objects.filter(fiscal_year=fy).filter( responsibility_center=rc.id).values( "allotment_code").order_by("allotment_code").distinct()] context["ac_list"] = ac_list # will have to make a custom dictionary to send in my_dict = {} total_obligations = {} total_expenditures = {} total_allocations = {} total_adjustments = {} for ac in ac_list: total_obligations[ac.code] = 0 total_expenditures[ac.code] = 0 total_allocations[ac.code] = 0 total_adjustments[ac.code] = 0 for p in project_list: my_dict[p.code] = {} my_dict[p.code]["allocations"] = 0 my_dict[p.code]["adjustments"] = 0 my_dict[p.code]["obligations"] = 0 my_dict[p.code]["expenditures"] = 0 my_dict[p.code]["rcs"] = str( [rc["responsibility_center__code"] for rc in models.Transaction.objects.filter(project=p).values( "responsibility_center__code").order_by("responsibility_center__code").distinct()]).replace("'", "").replace( "[", "").replace("]", "") my_dict[p.code]["ac_cats"] = str([rc["allotment_code__allotment_category__name"] for rc in models.Transaction.objects.filter(project=p).values( "allotment_code__allotment_category__name").order_by( "allotment_code").distinct()]).replace("'", "").replace("[", "").replace( "]", "") for ac in ac_list: # project allocation try: project_allocations = \ nz(models.Transaction.objects.filter( project_id=p.id, exclude_from_rollup=False, fiscal_year=fy, transaction_type=1, allotment_code=ac ).values("invoice_cost").order_by("invoice_cost").aggregate(dsum=Sum("invoice_cost"))["dsum"], 0) except TypeError: project_allocations = 0 my_dict[p.code]["allocations"] += project_allocations # total allocations ## must be done by allotment code total_allocations[ac.code] += project_allocations # project adjustments try: project_adjustments = \ nz(models.Transaction.objects.filter( project_id=p.id, exclude_from_rollup=False, fiscal_year=fy, transaction_type=2, allotment_code=ac ).values("invoice_cost").order_by("invoice_cost").aggregate(dsum=Sum("invoice_cost"))["dsum"], 0) except TypeError: project_adjustments = 0 my_dict[p.code]["adjustments"] += project_adjustments # total allocations total_adjustments[ac.code] += project_adjustments # project obligations try: project_obligations = \ nz(models.Transaction.objects.filter( project_id=p.id, exclude_from_rollup=False, fiscal_year=fy, transaction_type=3, allotment_code=ac ).values("outstanding_obligation").order_by("outstanding_obligation").aggregate(dsum=Sum("outstanding_obligation"))[ "dsum"], 0) except TypeError: project_obligations = 0 my_dict[p.code]["obligations"] += project_obligations # total obligations total_obligations[ac.code] += project_obligations # project expenditures try: project_expenditures = \ nz(models.Transaction.objects.filter( project_id=p.id, exclude_from_rollup=False, fiscal_year=fy, transaction_type=3, allotment_code=ac ).values("invoice_cost").order_by("invoice_cost").aggregate(dsum=Sum("invoice_cost"))["dsum"], 0) except TypeError: project_expenditures = 0 my_dict[p.code]["expenditures"] += project_expenditures # total expenditures total_expenditures[ac.code] += project_expenditures my_dict["total_obligations"] = total_obligations my_dict["total_expenditures"] = total_expenditures my_dict["total_adjustments"] = total_adjustments my_dict["total_allocations"] = total_allocations context["my_dict"] = my_dict return context
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["my_object"] = models.Transaction.objects.first() fy = shared_models.FiscalYear.objects.get(pk=self.kwargs['fiscal_year']) context["fiscal_year"] = fy project = shared_models.Project.objects.get(pk=self.kwargs['project']) context["project"] = project context["field_list"] = [ 'fiscal_year', 'creation_date', 'responsibility_center.code', 'allotment_code', 'transaction_type', 'obligation_cost', 'invoice_cost', 'outstanding_obligation', 'supplier_description', ] # will have to make a custom dictionary to send in my_dict = {} my_dict["total_allocations"] = {} my_dict["total_adjustments"] = {} my_dict["total_obligations"] = {} my_dict["total_expenditures"] = {} qs = models.Transaction.objects.filter(project_id=self.kwargs["project"]).filter(exclude_from_rollup=False).filter(fiscal_year=fy) ac_list = [shared_models.AllotmentCode.objects.get(pk=t["allotment_code"]) for t in qs.values("allotment_code").order_by("allotment_code").distinct()] context["ac_list"] = ac_list for ac in ac_list: # project allocation try: project_allocations = \ nz(models.Transaction.objects.filter(project_id=self.kwargs["project"]).filter(exclude_from_rollup=False).filter( fiscal_year=fy).filter( transaction_type=1).filter(allotment_code=ac).values("project").order_by( "project").aggregate(dsum=Sum("invoice_cost"))["dsum"], 0) except TypeError: project_allocations = 0 my_dict["total_allocations"][ac.code] = project_allocations # project adjustments try: project_adjustments = \ models.Transaction.objects.filter(project_id=self.kwargs["project"]).filter(exclude_from_rollup=False).filter( fiscal_year=fy).filter( transaction_type=2).filter(allotment_code=ac).values( "project").order_by("project").aggregate(dsum=Sum("invoice_cost"))["dsum"] except TypeError: project_adjustments = 0 my_dict["total_adjustments"][ac.code] = project_adjustments # project obligations try: project_obligations = \ models.Transaction.objects.filter(project_id=self.kwargs["project"]).filter(exclude_from_rollup=False).filter( fiscal_year=fy).filter(transaction_type=3).filter(allotment_code=ac).values( "project").order_by("project").aggregate( dsum=Sum("outstanding_obligation"))["dsum"] except TypeError: project_obligations = 0 my_dict["total_obligations"][ac.code] = project_obligations # project expenditures try: project_expenditures = \ nz(models.Transaction.objects.filter(project_id=self.kwargs["project"]).filter(exclude_from_rollup=False).filter( fiscal_year=fy).filter( transaction_type=3).filter(allotment_code=ac).values( "project").order_by("project").aggregate(dsum=Sum("invoice_cost"))[ "dsum"], 0) except TypeError: project_expenditures = 0 my_dict["total_expenditures"][ac.code] = project_expenditures context["my_dict"] = my_dict return context
def form_valid(self, form): sectors = listrify(form.cleaned_data["sectors"]) orgs = listrify(form.cleaned_data["organizations"]) orgs_w_consultation_instructions = listrify(form.cleaned_data["orgs_w_consultation_instructions"]) statuses = listrify(form.cleaned_data["statuses"]) entry_types = listrify(form.cleaned_data["entry_types"]) org = int(nz(form.cleaned_data["single_org"]), 0) # fy = str(form.cleaned_data["fiscal_year"]) report_title = str(form.cleaned_data["report_title"]) report = int(form.cleaned_data["report"]) format = str(form.cleaned_data["format"]) from_date = nz(form.cleaned_data["from_date"], "None") to_date = nz(form.cleaned_data["to_date"], "None") entry_note_types = listrify(form.cleaned_data["entry_note_types"]) entry_note_statuses = listrify(form.cleaned_data["entry_note_statuses"]) if report == 1: # capacity report qry = f'?sectors={nz(sectors, "None")}&' \ f'from_date={nz(from_date, "None")}&' \ f'to_date={nz(to_date, "None")}&' \ f'orgs={nz(orgs, "None")}' return HttpResponseRedirect(reverse("ihub:capacity_xlsx") + qry) elif report == 2: return HttpResponseRedirect(reverse("ihub:report_q", kwargs={"org": org})) elif report == 3: qry = f'?sectors={nz(sectors, "None")}&' \ f'from_date={nz(from_date, "None")}&' \ f'to_date={nz(to_date, "None")}&' \ f'orgs={nz(orgs, "None")}&' \ f'entry_note_types={nz(entry_note_types, "None")}&' \ f'entry_note_statuses={nz(entry_note_statuses, "None")}' if format == 'pdf': return HttpResponseRedirect(reverse("ihub:summary_pdf") + qry) else: return HttpResponseRedirect(reverse("ihub:summary_xlsx") + qry) elif report == 6: # Engagement Update Log qry = f'?sectors={nz(sectors, "None")}&' \ f'from_date={nz(from_date, "None")}&' \ f'to_date={nz(to_date, "None")}&' \ f'orgs={nz(orgs, "None")}&' \ f'statuses={nz(statuses, "None")}&' \ f'entry_types={nz(entry_types, "None")}&' \ f'report_title={nz(report_title, "None")}&' \ f'entry_note_types={nz(entry_note_types, "None")}&' \ f'entry_note_statuses={nz(entry_note_statuses, "None")}' if format == 'pdf': return HttpResponseRedirect(reverse("ihub:consultation_log_pdf") + qry) else: return HttpResponseRedirect(reverse("ihub:consultation_log_xlsx") + qry) elif report == 7: return HttpResponseRedirect( f'{reverse("ihub:consultation_instructions_pdf")}?orgs={orgs_w_consultation_instructions}' ) elif report == 8: return HttpResponseRedirect( f'{reverse("ihub:consultation_instructions_xlsx")}?orgs={orgs_w_consultation_instructions}' ) else: messages.error(self.request, "Report is not available. Please select another report.") return HttpResponseRedirect(reverse("ihub:report_search"))
def generate_trip_list(fiscal_year, region, adm, from_date, to_date, site_url): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'temp') target_file = "temp.xlsx" target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'temp', target_file) # create workbook and worksheets workbook = xlsxwriter.Workbook(target_file_path) # create formatting variables title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', 'bg_color': '#D6D1C0', "align": 'normal', "text_wrap": True }) total_format = workbook.add_format({ 'bold': True, "align": 'left', "text_wrap": True, 'num_format': '$#,##0' }) normal_format = workbook.add_format({ "align": 'left', "text_wrap": True, }) currency_format = workbook.add_format({'num_format': '#,##0.00'}) if fiscal_year == "None": fiscal_year = None if adm == "None": adm = None if region == "None": region = None if from_date == "None": from_date = None else: fiscal_year = None # should not filter on both criteria if to_date == "None": to_date = None else: fiscal_year = None # should not filter on both criteria # get the trip list trip_list = models.Trip.objects.all() if fiscal_year: trip_list = trip_list.filter(fiscal_year=fiscal_year) # optional filter on trips for adm_approval_required if adm: adm = bool(int(adm)) trip_list = trip_list.filter(is_adm_approval_required=adm) # optional filter on trips for regional lead if region: # too dangerous to only filter by the lead field... we should look at each request / traveller and determine # if they are the correct region request_list = list() # for each trip for trip in trip_list: # look at a list of the requests... for request in trip.requests.all(): # if the traveller is in the region of interest, add the request tp the list if request.region.id == int(region): # add the request request_list.append(request) break trip_list = trip_list.filter(requests__in=request_list) if from_date: my_date = datetime.strptime( from_date, "%Y-%m-%d").replace(tzinfo=timezone.get_current_timezone()) trip_list = trip_list.filter(start_date__gte=my_date, ) if to_date: my_date = datetime.strptime( to_date, "%Y-%m-%d").replace(tzinfo=timezone.get_current_timezone()) trip_list = trip_list.filter(start_date__lt=my_date, ) field_list = [ "fiscal_year", "name", "status", "trip_subcategory", "is_adm_approval_required", "location", "start_date", "end_date", "number_of_days|Number of days", "travellers|Travellers (region)", "total_cost|Total trip cost", "total_non_dfo_cost|Total non-DFO funding", "total_non_dfo_funding_sources|Non-DFO funding sources", "total_dfo_cost|Total DFO cost", "non_res_total_cost|Total DFO cost (non RES)", ] # get_cost_comparison_dict # define the header header = [ get_verbose_label(trip_list.first(), field) for field in field_list ] # header.append('Number of projects tagged') title = "DFO Science Trips" if fiscal_year: title += f" for {shared_models.FiscalYear.objects.get(pk=fiscal_year)}" elif from_date and not to_date: title += f" from {from_date} Onwards" elif to_date and not from_date: title += f" up until {to_date}" elif to_date and from_date: title += f" ranging from {from_date} to {to_date}" if region: title += f" ({shared_models.Region.objects.get(pk=region)})" if adm is not None: if adm: title += " (Trip requiring ADM approval only)" else: title += " (Only trips NOT requiring ADM approval)" # define a worksheet my_ws = workbook.add_worksheet(name="trip list") my_ws.write(0, 0, title, title_format) my_ws.write_row(2, 0, header, header_format) i = 3 for trip in trip_list.order_by("start_date"): # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] data_row = list() j = 0 for field in field_list: if "travellers" in field: my_list = list() for t in trip.travellers.all(): my_list.append( f'{t.smart_name} ({t.request.region}) - {currency(t.total_dfo_funding)}' ) my_val = listrify(my_list, "\n") my_ws.write(i, j, my_val, normal_format) elif "fiscal_year" in field or "subcategory" in field or "status" in field: my_val = str(get_field_value(trip, field)) my_ws.write(i, j, my_val, normal_format) elif field == "name": my_val = str(get_field_value(trip, field)) my_ws.write_url( i, j, url= f'{site_url}/{reverse("travel:trip_detail", args=[trip.id])}', string=my_val) elif "cost" in field: my_val = nz(get_field_value(trip, field), 0) my_ws.write(i, j, my_val, currency_format) else: my_val = get_field_value(trip, field) my_ws.write(i, j, my_val, normal_format) # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value # if new value > stored value... replace stored value if len(str(my_val)) > col_max[j]: if len(str(my_val)) < 75: col_max[j] = len(str(my_val)) else: col_max[j] = 75 j += 1 i += 1 # set column widths for j in range(0, len(col_max)): my_ws.set_column(j, j, width=col_max[j] * 1.1) workbook.close() if settings.AZURE_STORAGE_ACCOUNT_NAME: utils.upload_to_azure_blob(target_file_path, f'temp/{target_file}') return target_url
def generate_cfts_spreadsheet(fiscal_year=None, region=None, trip_request=None, trip=None, user=None, from_date=None, to_date=None): # figure out the filename target_dir = os.path.join(settings.BASE_DIR, 'media', 'temp') target_file = "temp.xlsx" target_file_path = os.path.join(target_dir, target_file) target_url = os.path.join(settings.MEDIA_ROOT, 'temp', target_file) # create workbook and worksheets workbook = xlsxwriter.Workbook(target_file_path) ws = workbook.add_worksheet(name="CFTS report") # create formatting title_format = workbook.add_format({ 'bold': True, "align": 'normal', 'font_size': 24, }) header_format = workbook.add_format({ 'bold': True, 'border': 1, 'border_color': 'black', 'bg_color': '#8C96A0', "align": 'normal', "text_wrap": True }) normal_format = workbook.add_format({ "align": 'left', "valign": 'top', "text_wrap": True, 'num_format': '[$$-409]#,##0.00' }) if fiscal_year == "None": fiscal_year = None if region == "None": region = None if trip == "None": trip = None if user == "None": user = None if from_date == "None": from_date = None else: fiscal_year = None # should not filter on both criteria if to_date == "None": to_date = None else: fiscal_year = None # should not filter on both criteria include_trip_request_status = False # get a request list if fiscal_year or region or user or from_date or to_date: include_trip_request_status = True # if this report is being called from the reports page... travellers = models.Traveller.objects.all() if fiscal_year: travellers = travellers.filter(request__fiscal_year_id=fiscal_year) if region: travellers = travellers.filter( request__section__division__branch__region_id=region) if user: travellers = travellers.filter(user_id=user) if trip: travellers = travellers.filter(request__trip_id=trip) if from_date: my_date = datetime.strptime( from_date, "%Y-%m-%d").replace(tzinfo=timezone.get_current_timezone()) travellers = travellers.filter(start_date__gte=my_date) if to_date: my_date = datetime.strptime( to_date, "%Y-%m-%d").replace(tzinfo=timezone.get_current_timezone()) travellers = travellers.filter(start_date__lt=my_date) elif trip_request: travellers = get_object_or_404(models.TripRequest, pk=trip_request).travellers.all() elif trip: travellers = get_object_or_404(models.Trip, pk=trip).travellers.all() else: travellers = None # we need a list of ADM unapproved but recommended # group travellers need to be on one row header = [ "Name", "Region", "Primary Role of Traveller", "Primary Reason for Travel", "Event", "Location", "Start Date", "End Date", "Est. DFO Cost", "Est. Non-DFO Cost", "Purpose", "Part of Learning Plan", "Notes", ] if include_trip_request_status: header.insert(0, "Request Status") # create the col_max column to store the length of each header # should be a maximum column width to 100 col_max = [len(str(d)) if len(str(d)) <= 100 else 100 for d in header] title = "CFTS-styled Report on DFO Science Trip Requests" if fiscal_year: title += f" for {shared_models.FiscalYear.objects.get(pk=fiscal_year)}" elif from_date and not to_date: title += f" from {from_date} Onwards" elif to_date and not from_date: title += f" up until {to_date}" elif to_date and from_date: title += f" ranging from {from_date} to {to_date}" if region: title += f" ({shared_models.Region.objects.get(pk=region)})" if user: title += f" ({User.objects.get(pk=user)})" if trip: title += f" ({models.Trip.objects.get(pk=trip).tname})" ws.write(0, 0, title, title_format) ws.write_row(2, 0, header, header_format) if travellers: i = 3 for t in travellers.order_by("request__trip__start_date"): # Build the Notes field notes = "TRAVELLER COST BREAKDOWN: " + t.cost_breakdown if t.non_dfo_org: notes += "\n\nORGANIZATIONS PAYING NON-DFO COSTS: " + t.non_dfo_org if t.request.late_justification: notes += "\n\nJUSTIFICATION FOR LATE SUBMISSION: " + t.request.late_justification if t.request.funding_source: notes += "\n\nFUNDING SOURCE: {}".format( t.request.funding_source) # Request status my_status = str(t.request.get_status_display()) # DESTINATION my_dest = t.request.trip.location # START DATE OF TRAVEL my_start = t.start_date.strftime( "%d/%m/%Y") if t.start_date else "n/a" # END DATE OF TRAVEL my_end = t.end_date.strftime("%d/%m/%Y") if t.end_date else "n/a" # PURPOSE my_purpose = t.purpose_long_text my_role = "{}".format(nz(t.role, "MISSING"), ) my_name = "{}, {}".format(t.last_name, t.first_name) if t.is_research_scientist: my_name += " (RES)" data_row = [ my_name, str(t.request.region), my_role, str(t.request.trip.trip_subcategory), str(t.request.trip), my_dest, my_start, my_end, t.total_dfo_funding, t.total_non_dfo_funding, my_purpose, yesno(t.learning_plan), notes, ] if include_trip_request_status: data_row.insert(0, my_status) # adjust the width of the columns based on the max string length in each col ## replace col_max[j] if str length j is bigger than stored value j = 0 for d in data_row: # if new value > stored value... replace stored value if len(str(d)) > col_max[j]: if len(str(d)) < 100: col_max[j] = len(str(d)) else: col_max[j] = 100 j += 1 ws.write_row(i, 0, data_row, normal_format) i += 1 for j in range(0, len(col_max)): ws.set_column(j, j, width=col_max[j] * 1.1) workbook.close() if settings.AZURE_STORAGE_ACCOUNT_NAME: utils.upload_to_azure_blob(target_file_path, f'temp/{target_file}') return target_url