def default_detail_value_format(self, val): """Take a value and return as a formatted string based on its type""" if val is None: msg = "No Filter" return "<em>%s</em>" % msg if self.report_format not in [XLS, CSV ] else msg if isinstance(val, str): if val.lower() in relative_dates.ALL_DATE_RANGES: start, end = relative_dates(val).range() return "%s (%s - %s)" % (val, format_as_date(start), format_as_date(end)) return val if isinstance(val, timezone.datetime): return format_as_date(val) try: if len(val) > 0 and isinstance(val[0], timezone.datetime): joiner = " - " if len(val) == 2 else ", " return joiner.join(format_as_date(dt) for dt in val) except: # noqa: E722 # pragma: no cover pass try: return ', '.join(str(x) for x in val) except: # noqa: E722 # pragma: no cover pass return str(val)
def test_handle_unit_down_time(self): data = { 'problem_description': 'problem on unit 3 or 2', 'service_area': [self.usa3.service_area.name, self.usa2.service_area.name], 'unit': [self.usa3.unit.name, self.usa2.unit.name], 'unit__type': [self.u3.type.name, self.u2.type.name], 'daterange': '%s - %s' % (format_as_date( (timezone.now() - timezone.timedelta(days=30))), format_as_date(timezone.now())) } response = self.client.get(reverse('handle_unit_down_time'), data=data) csv = str(response.content) self.assertTrue('problem on unit 3 or 2' in csv) self.assertTrue(self.usa3.service_area.name in csv) self.assertTrue(self.usa2.service_area.name in csv) self.assertTrue('%s,%s,0,1,1.00,0.00,1.00,0.00,1' % (self.usa2.unit.name, self.usa2.unit.type.name) in csv) self.assertTrue('%s,%s,0,2,2.00,0.00,2.00,0.00,2' % (self.usa3.unit.name, self.usa3.unit.type.name) in csv) self.assertTrue('Totals:,0.0,3,3.00,0.00,3.00,0.00,3' in csv)
def history_display(history, unit, test_list, test, frequency=None): template = get_template("qa/history.html") # Set start / end dates of 1 year, or the span of the history elements, whichever is larger one_year = timezone.timedelta(days=365) end_date = end_of_day(timezone.now()) start_date = start_of_day(end_date - one_year) if history: start_date = history[-1][0].work_completed end_date = history[0][0].work_completed hist_covers_less_than_1_year = end_date - start_date < one_year if hist_covers_less_than_1_year: start_date = end_date - one_year date_range = "%s - %s" % (format_as_date(start_date), format_as_date(end_date)) c = { "history": history, "date_range": date_range, "unit": unit, "test_list": test_list, "test": test, "show_icons": settings.ICON_SETTINGS['SHOW_STATUS_ICONS_HISTORY'], 'frequency': frequency } return template.render(c)
def get_context(self): context = super().get_context() qs = self.filter_set.qs sites = self.filter_set.qs.order_by("unit__site__name").values_list( "unit__site", flat=True).distinct() sites_data = [] for site in sites: if site: # site can be None here since not all units may have a site site = umodels.Site.objects.get(pk=site) sites_data.append((site.name if site else "", [])) utcs = qs.filter( unit__site_id=(site if site else None), due_date__isnull=False, ).order_by("due_date", "unit__%s" % settings.ORDER_UNITS_BY) for utc in utcs: window = utc.window() if window: window = "%s - %s" % (format_as_date( window[0]), format_as_date(window[1])) sites_data[-1][-1].append({ 'utc': utc, 'unit_name': utc.unit.name, 'name': utc.name, 'window': window, 'frequency': utc.frequency.name if utc.frequency else _("Ad Hoc"), 'due_date': format_as_date(utc.due_date), 'assigned_to': utc.assigned_to.name, 'link': self.make_url(utc.get_absolute_url(), plain=True), }) context['sites_data'] = sites_data return context
def to_xlsx(self): context = self.get_context() f = BytesIO() wb = xlsxwriter.Workbook(f, {'in_memory': True}) ws = wb.add_worksheet(name="Report") row = 0 col = 0 for data_row in self.to_table(context): for data in data_row: # excel doesn't like urls longer than 255 chars, so write as string instead if isinstance(data, str) and "http" in data and len(data) > 255: ws.write_string(row, col, data) elif isinstance(data, timezone.datetime): ws.write_string(row, col, format_datetime(data)) elif isinstance(data, datetime.date): ws.write_string(row, col, format_as_date(data)) else: try: ws.write(row, col, data) except TypeError: ws.write(row, col, str(data)) col += 1 row += 1 col = 0 wb.close() f.seek(0) return f
def to_table(self, context): rows = super().to_table(context) rows.append([]) for site, site_rows in context['sites_data']: rows.extend([ [], [], [site if site else _("Other")], [ _("Unit"), _("Name"), _("Frequency"), _("Due Date"), _("Window"), _("Assigned To"), _("Perform") ], ]) for row in site_rows: rows.append([ row['unit_name'], row['name'], row['frequency'], format_as_date(row['utc'].due_date), row['window'], row['assigned_to'], ]) return rows
def as_qc_window(unit_test_collection): start, end = scheduling.qc_window(unit_test_collection.due_date, unit_test_collection.frequency) if start: start = format_as_date(start) if end: end = format_as_date(end) if start: return "%s - %s" % (start, end) elif unit_test_collection.due_date: start = format_as_date(unit_test_collection.due_date) return "%s - %s" % (start, end) return ""
def get_work_completed(self, tli): """Format work completed as link to instance if html report otherwise just return formatted date""" wc = format_as_date(tli.work_completed) if self.html: return self.make_url(tli.get_absolute_url(), wc, _("Click to view on site")) return wc
def value_to_serializable(val, val_type=None): """Convert input report form value to something serializable TODO:: handle other input types (single date or datetime) """ if 'daterange' in val_type.lower() and not isinstance(val, str): d1 = format_as_date(parser.parse(val[0])) d2 = format_as_date(parser.parse(val[1])) val = "%s - %s" % (d1, d2) elif val_type == "recurrencefield": val = str(val) if isinstance(val, Model): # pragma: no cover val = val.pk try: val = [x.pk for x in val] except (TypeError, AttributeError): pass return val
def date_to_datestrings(apps, schema): TestInstance = apps.get_model("qa", "TestInstance") for ti in TestInstance.objects.filter(unit_test_info__test__type="date"): ti.string_value = format_as_date(ti.date_value) ti.save() for ti in TestInstance.objects.filter(unit_test_info__test__type="datetime"): ti.string_value = format_datetime(ti.datetime_value) ti.save()
def get_context(self): context = super().get_context() # since we're grouping by site, we need to handle sites separately sites = self.filter_set.qs.order_by( "unit_service_area__unit__site__name", ).values_list( "unit_service_area__unit__site", flat=True, ).distinct() sites_data = [] for site in sites: if site: # site can be None here since not all units may have a site site = umodels.Site.objects.get(pk=site) sites_data.append((site.name if site else "", [])) for se in self.get_ses_for_site(self.filter_set.qs, site): sites_data[-1][-1].append({ 'id': se.id, 'problem': se.problem_description, 'work': se.work_description, 'unit_name': se.unit_service_area.unit.name, 'service_area': se.unit_service_area.service_area.name, 'service_type': se.service_type.name, 'status': se.service_status.name, 'service_date': format_as_date(se.datetime_service), 'service_time': se.duration_service_time, 'lost_time': se.duration_lost_time, 'link': self.make_url(se.get_absolute_url(), plain=True), }) context['sites_data'] = sites_data context['include_description'] = self.filter_set.form.cleaned_data.get( "include_description") return context
def saved_reports_datatable(request): reports = visible_user_reports(request.user) vals = [] template = get_template("reports/_saved_reports_table_link.html") sch_template = get_template("reports/_saved_reports_table_schedule.html") for r in reports: user = '******' % (format_as_date( r.created), r.created_by.username) context = {'report': r, 'editable': r.created_by == request.user} try: schedule = r.schedule recipients = ' '.join(schedule.recipients()) except models.ReportSchedule.DoesNotExist: recipients = "" vals.append([ template.render(context), user, sch_template.render(context), recipients ]) return JsonResponse({'data': vals})
def get_context(self): context = super().get_context() # since we're grouping by site, we need to handle sites separately sites = self.filter_set.qs.order_by( "unit_test_collection__unit__site__name").values_list( "unit_test_collection__unit__site", flat=True).distinct() sites_data = [] for site in sites: if site: # site can be None here since not all units may have a site site = umodels.Site.objects.get(pk=site) sites_data.append((site.name if site else "", [])) for tli in self.get_tlis_for_site(self.filter_set.qs, site): sites_data[-1][-1].append({ 'unit_name': tli.unit_test_collection.unit.name, 'test_list_name': tli.test_list.name, 'due_date': format_as_date(tli.due_date), 'work_completed': self.get_work_completed(tli), 'pass_fail': self.get_pass_fail_status(tli), 'link': self.make_url(tli.get_absolute_url(), plain=True), }) context['sites_data'] = sites_data return context
def to_table(self, context): rows = [ [_("Report Title:"), context['report_title']], [_("View On Site:"), self.get_report_url()], [_("Report Type:"), context['report_name']], [_("Report Description:"), context['report_description']], [_("Generated:"), format_datetime(timezone.now())], [], ["Filters:"], ] for label, criteria in context['report_details']: rows.append([label + ":", criteria]) for tli in context['queryset']: rows.extend([ [], [], ["Test List Instance:", self.make_url(tli.get_absolute_url())], [_("Created By") + ":", format_user(tli.created_by)], [_("Work Started") + ":", format_as_date(tli.work_started)], [ _("Work Completed") + ":", format_as_date(tli.work_completed) ], [ _("Duration") + ":", _("In Progress") if tli.in_progress else as_time_delta(tli.duration()) ], [_("Modified") + ":", format_as_date(tli.modified)], [_("Mofified By") + ":", format_user(tli.modified_by)], ]) if tli.all_reviewed and not tli.reviewed_by: rows.extend([ [_("Reviewed") + ":", format_as_date(tli.modified)], [_("Reviewed By") + ":", _("Auto Reviewed")], ]) else: rows.extend([ [_("Reviewed") + ":", format_as_date(tli.reviewed)], [_("Reviewed By") + ":", format_user(tli.reviewed_by)], ]) for c in context['comments'].get(tli.pk, []): rows.append( [_("Comment") + ":", format_datetime(c[0]), c[1], c[2]]) for a in tli.attachment_set.all(): rows.append([ _("Attachment") + ":", a.label, self.make_url(a.attachment.url, plain=True) ]) rows.append([]) rows.append([ _("Test"), _("Value"), _("Reference"), _("Tolerance"), _("Pass/Fail"), _("Review Status"), _("Comment"), _("Attachments"), ]) for ti, history in tli.history()[0]: row = [ ti.unit_test_info.test.name, ti.value_display(coerce_numerical=False), ti.reference.value_display() if ti.reference else "", ti.tolerance.name if ti.tolerance else "", ti.get_pass_fail_display(), ti.status.name, ti.comment, ] for a in ti.attachment_set.all(): row.append(self.make_url(a.attachment.url, plain=True)) rows.append(row) return rows
def test_instance_to_point(self, ti, relative=False): """Grab relevent plot data from a :model:`qa.TestInstance`""" if relative and ti.reference and ti.value is not None: ref_is_not_zero = ti.reference.value != 0. has_percent_tol = (ti.tolerance and ti.tolerance.type == models.PERCENT) has_no_tol = ti.tolerance is None use_percent = has_percent_tol or (has_no_tol and ref_is_not_zero) if ti.unit_test_info.test.type == models.WRAPAROUND: t = ti.unit_test_info.test ref = ti.reference.value if ti.value > ref: wrap_distance = (t.wrap_high - ti.value) + (ref - t.wrap_low) direct_distance = ti.value - ref direct_closer = direct_distance <= wrap_distance value = direct_distance if direct_closer else -wrap_distance elif ti.value < ref: wrap_distance = (ti.value - t.wrap_low) + (t.wrap_high - ref) direct_distance = ref - ti.value direct_closer = direct_distance <= wrap_distance value = -direct_distance if direct_closer else wrap_distance else: value = ti.value ref_value = 0 elif use_percent: value = 100 * (ti.value - ti.reference.value) / ti.reference.value ref_value = 0. else: value = ti.value - ti.reference.value ref_value = 0 else: value = ti.value ref_value = ti.reference.value if ti.reference is not None else None comment = "" tli_comments = list(ti.test_list_instance.comments.all()) if ti.comment or tli_comments: comments = [] if ti.comment: comments.append( "<strong>%s - %s:</strong> %s" % (format_as_date( ti.created), ti.created_by.username, ti.comment)) for c in sorted(tli_comments, key=lambda c: c.submit_date): user = c.user or ti.created_by comments.append( "<strong>%s - %s:</strong> %s" % (format_as_date(c.submit_date), user.username, c.comment)) comment = '<br/>'.join(comments) point = { "act_high": None, "act_low": None, "tol_low": None, "tol_high": None, "date": self.convert_date(timezone.make_naive(ti.work_completed, local_tz)), "display_date": ti.work_completed, "value": value, "display": ti.value_display() if not ti.skipped else "", "reference": ref_value, "orig_reference": ti.reference.value if ti.reference else None, 'test_instance_id': ti.id, 'test_instance_comment': comment, 'test_list_instance': { 'date': ti.test_list_instance.created, 'id': ti.test_list_instance.id, 'flagged': ti.test_list_instance.flagged } } if ti.tolerance is not None and ref_value is not None: if relative and ti.reference and ti.reference.value != 0. and not ti.tolerance.type == models.ABSOLUTE: tols = ti.tolerance.tolerances_for_value(100) for k in tols: if tols[k] is not None: tols[k] -= 100. else: tols = ti.tolerance.tolerances_for_value(ref_value) point.update(tols) return point
def get_context(self): context = super().get_context() # since we're grouping by site, we need to handle sites separately sites = self.filter_set.qs.order_by( "unit_service_area__unit__site__name", ).values_list( "unit_service_area__unit__site", flat=True, ).distinct() sites_data = [] for site in sites: if site: # site can be None here since not all units may have a site site = umodels.Site.objects.get(pk=site) sites_data.append((site.name if site else "", [])) for se in self.get_ses_for_site(self.filter_set.qs, site): initiated_by_link = (self.make_url( se.test_list_instance_initiated_by.get_absolute_url(), plain=True) if se.test_list_instance_initiated_by else None) related_ses = [] for se_rel in se.service_event_related.all(): related_ses.append( (se_rel.id, se_rel.datetime_service, self.make_url(se_rel.get_absolute_url(), plain=True))) group_linkers = defaultdict(list) for gli in se.grouplinkerinstance_set.all(): group_linkers[gli.group_linker.name].append( gli.user.username) hours = [] for h in se.hours_set.all(): u = h.user.username if h.user else "%s (%s)" % (str( h.third_party), h.third_party.vendor.name) hours.append((u, h.time)) rts_qc = [] for rts in se.returntoserviceqa_set.all(): tli = rts.test_list_instance wc = format_datetime(tli.work_completed) if tli else "" link = self.make_url(tli.get_absolute_url(), plain=True) if tli else "" rts_qc.append((rts.unit_test_collection.name, wc, link)) rts_comments = [] comment_qs = Comment.objects.for_model(se).values_list( "user__username", "submit_date", "comment", ) for user, dt, comment in comment_qs: rts_comments.append((user, format_datetime(dt), comment)) parts = [] for part_used in se.partused_set.all(): parts.append( (part_used.part.name, str(part_used.from_storage or ""), part_used.quantity)) attachments = [] for a in se.attachment_set.all(): attachments.append( (a.label, self.make_url(a.attachment.url, plain=True))) sites_data[-1][-1].append({ 'id': se.id, 'service_date': format_as_date(se.datetime_service), 'site': site.name if site else "", 'unit_name': se.unit_service_area.unit.name, 'service_area': se.unit_service_area.service_area.name, 'service_type': se.service_type.name, 'service_time': se.duration_service_time, 'lost_time': se.duration_lost_time, 'status': se.service_status.name, 'created_by': format_user(se.user_created_by), 'created_date': format_datetime(se.datetime_created), 'modified_by': format_user(se.user_modified_by), 'modified_date': format_datetime(se.datetime_modified), 'problem': se.problem_description, 'work': se.work_description, 'safety': se.safety_precautions, 'initiated_by': se.test_list_instance_initiated_by, 'initiated_by_link': initiated_by_link, 'related_ses': related_ses, 'group_linkers': sorted(group_linkers.items()), 'hours': hours, 'rts_qc': rts_qc, 'rts_comments': rts_comments, 'parts': parts, 'attachments': attachments, 'link': self.make_url(se.get_absolute_url(), plain=True), }) context['sites_data'] = sites_data return context
} ], 'TestList': { 'name': 'TestTestList' }, 'Modality': { 'name': 'TestModality' }, 'UnitType': { 'name': 'TestModality', 'vendor': 'TestVendor' }, 'Unit': { 'name': 'TestUnit', 'number': '1', 'date_acceptance': format_as_date(timezone.now()) }, 'Frequency': { 'name': 'TestFrequency', 'nominal_interval': '2', 'due_interval': '3', 'window_end': '4' }, 'UnitTestCollection': {}, 'absoluteTolerance': { 'act_low': '-2', 'tol_low': '-1', 'tol_high': '1', 'act_high': '2' }, 'percentTolerance': {