def water_temp(self): my_str = _("<u>general:</u> {shore}").format( shore=nz(self.water_temp_c, "---")) if self.water_temp_trap_c: my_str += _("<br><u>@trap:</u> {trap}").format( trap=nz(self.water_temp_trap_c, "---")) return my_str
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) qs = models.Sample.objects.filter(season=self.kwargs["year"]) # sum of samples context["sample_sum"] = qs.count # sum of fish running_total = 0 for sample in qs: running_total = running_total + nz(sample.total_fish_preserved, 0) context["fish_sum"] = running_total # LAB PROCESSING # sum of samples COMPLETE context["sample_sum_lab_complete"] = qs.filter( lab_processing_complete=True).count # sum of fish COMPLETE running_total = 0 for sample in qs.filter(lab_processing_complete=True): running_total = running_total + sample.total_fish_preserved context["fish_sum_lab_complete"] = running_total # sum of samples REMAINING context["sample_sum_lab_remaining"] = qs.filter( lab_processing_complete=False).count # sum of fish REMAINING running_total = 0 for sample in qs.filter(lab_processing_complete=False): running_total = running_total + nz(sample.total_fish_preserved, 0) context["fish_sum_lab_remaining"] = running_total # OTOLITH PROCESSING # sum of samples COMPLETE context["sample_sum_oto_complete"] = qs.filter( otolith_processing_complete=True).count # sum of fish COMPLETE running_total = 0 for sample in qs.filter(otolith_processing_complete=True): running_total = running_total + nz(sample.total_fish_preserved, 0) context["fish_sum_oto_complete"] = running_total # sum of samples REMAINING context["sample_sum_oto_remaining"] = qs.filter( otolith_processing_complete=False).count # sum of fish REMAINING running_total = 0 for sample in qs.filter(otolith_processing_complete=False): running_total = running_total + nz(sample.total_fish_preserved, 0) context["fish_sum_oto_remaining"] = running_total return context
def air_temp(self): return _( "<u>arrival:</u> {arrival}; <u>max:</u> {max} <u>min:</u> {min}" ).format( arrival=nz(self.air_temp_arrival, "---"), max=nz(self.max_air_temp, "---"), min=nz(self.min_air_temp, "---"), )
def crew_display(self): return mark_safe( _("<u>probe:</u> {probe}<br> <u>seine:</u> {seine}<br> <u>dipnet:</u> {dipnet}<br> <u>extras:</u> {extras}" ).format( probe=nz(self.crew_probe, "---"), seine=nz(self.crew_seine, "---"), dipnet=nz(self.crew_dipnet, "---"), extras=nz(self.crew_extras, "---"), ))
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) sample_id = self.kwargs.get("sample") sweep_id = self.kwargs.get("sweep") if not sample_id: sample_id = get_object_or_404(models.Sweep, pk=sweep_id).sample_id context["sample_id"] = nz(sample_id, "null") context["sweep_id"] = nz(sweep_id, "null") return context
def multiple_projects_financial_summary(project_list): my_dict = {} # first, get the list of funding sources funding_sources = [] for project in project_list: funding_sources.extend(project.get_funding_sources()) funding_sources = list(set(funding_sources)) funding_sources_order = [ "{} {}".format(fs.funding_source_type, fs.tname) for fs in funding_sources ] for fs in [ x for _, x in sorted(zip(funding_sources_order, funding_sources)) ]: my_dict[fs] = {} my_dict[fs]["salary"] = 0 my_dict[fs]["om"] = 0 my_dict[fs]["capital"] = 0 my_dict[fs]["total"] = 0 for project in project_list.all(): # first calc for staff for staff in project.staff_members.filter(funding_source=fs): # exclude any employees that should be excluded. This is a fail safe since the form should prevent data entry if not staff.employee_type.exclude_from_rollup: if staff.employee_type.cost_type == 1: my_dict[fs]["salary"] += nz(staff.cost, 0) elif staff.employee_type.cost_type == 2: my_dict[fs]["om"] += nz(staff.cost, 0) # O&M costs for cost in project.om_costs.filter(funding_source=fs): my_dict[fs]["om"] += nz(cost.budget_requested, 0) # Capital costs for cost in project.capital_costs.filter(funding_source=fs): my_dict[fs]["capital"] += nz(cost.budget_requested, 0) my_dict["total"] = {} my_dict["total"]["salary"] = 0 my_dict["total"]["om"] = 0 my_dict["total"]["capital"] = 0 my_dict["total"]["total"] = 0 for fs in funding_sources: my_dict[fs]["total"] = float(my_dict[fs]["capital"]) + float( my_dict[fs]["salary"]) + float(my_dict[fs]["om"]) my_dict["total"]["salary"] += my_dict[fs]["salary"] my_dict["total"]["om"] += my_dict[fs]["om"] my_dict["total"]["capital"] += my_dict[fs]["capital"] my_dict["total"]["total"] += my_dict[fs]["total"] return my_dict
def generate_dive_csv(year): """Returns a generator for an HTTP Streaming Response""" filter_kwargs = {} if year != "": filter_kwargs["sample__datetime__year"] = year qs = models.Dive.objects.filter(**filter_kwargs).iterator() random_obj = models.Dive.objects.first() fields = random_obj._meta.fields field_names = [field.name for field in fields] # add any FKs for field in fields: if field.attname not in field_names: field_names.append(field.attname) header_row = [field for field in field_names] # starter header_row.extend(["transect", "transect_id"]) pseudo_buffer = Echo() writer = csv.writer(pseudo_buffer) yield writer.writerow(header_row) for obj in qs: data_row = [ unidecode.unidecode(str(nz(getattr(obj, field), ""))) for field in field_names ] # starter data_row.extend([obj.sample.transect, obj.sample.transect_id]) yield writer.writerow(data_row)
def form_valid(self, form): report = int(form.cleaned_data["report"]) year = nz(form.cleaned_data["year"], "None") if report == 1: return HttpResponseRedirect(reverse("scuba:dive_log_report") + f"?year={year}") else: messages.error(self.request, "Report is not available. Please select another report.") return HttpResponseRedirect(reverse("scuba:reports"))
def crash_if_none(var_name, value): if nz(value, None) is None: raise Exception( f'the expected template variable: "{var_name}" is missing in the context' ) else: return ""
def multiple_financial_project_year_summary_data(project_years): my_list = [] fs_list = list() # first get funding source list for py in project_years: fs_list.extend([fs.id for fs in py.get_funding_sources()]) funding_sources = models.FundingSource.objects.filter(id__in=fs_list) for fs in funding_sources: my_dict = dict() my_dict["type"] = fs.get_funding_source_type_display() my_dict["name"] = str(fs) my_dict["salary"] = 0 my_dict["om"] = 0 my_dict["capital"] = 0 for py in project_years: # first calc for staff for staff in models.Staff.objects.filter(funding_source=fs, project_year=py): # exclude any employees that should be excluded. This is a fail safe since the form should prevent data entry if not staff.employee_type.exclude_from_rollup: if staff.employee_type.cost_type == 1: my_dict["salary"] += nz(staff.amount, 0) elif staff.employee_type.cost_type == 2: my_dict["om"] += nz(staff.amount, 0) # O&M costs for cost in models.OMCost.objects.filter(funding_source=fs, project_year=py): my_dict["om"] += nz(cost.amount, 0) # Capital costs for cost in models.CapitalCost.objects.filter(funding_source=fs, project_year=py): my_dict["capital"] += nz(cost.amount, 0) my_dict["total"] = my_dict["salary"] + my_dict["om"] + my_dict[ "capital"] my_list.append(my_dict) return my_list
def financial_summary_data(project): # for every funding source, we will want to summarize: Salary, O&M, Capital and TOTAL my_dict = OrderedDict() for fs in project.get_funding_sources(): my_dict[fs] = {} my_dict[fs]["salary"] = 0 my_dict[fs]["om"] = 0 my_dict[fs]["capital"] = 0 my_dict[fs]["total"] = 0 # first calc for staff for staff in project.staff_members.filter(funding_source=fs): # exclude any employees that should be excluded. This is a fail safe since the form should prevent data entry if not staff.employee_type.exclude_from_rollup: if staff.employee_type.cost_type == 1: my_dict[fs]["salary"] += nz(staff.cost, 0) elif staff.employee_type.cost_type == 2: my_dict[fs]["om"] += nz(staff.cost, 0) # O&M costs for cost in project.om_costs.filter(funding_source=fs): my_dict[fs]["om"] += nz(cost.budget_requested, 0) # Capital costs for cost in project.capital_costs.filter(funding_source=fs): my_dict[fs]["capital"] += nz(cost.budget_requested, 0) # do the totals. I am doing this loop as separate so that the total entry comes at the end of all the funding sources my_dict["total"] = {} my_dict["total"]["salary"] = 0 my_dict["total"]["om"] = 0 my_dict["total"]["capital"] = 0 my_dict["total"]["total"] = 0 for fs in project.get_funding_sources(): my_dict[fs]["total"] = float(my_dict[fs]["capital"]) + float( my_dict[fs]["salary"]) + float(my_dict[fs]["om"]) my_dict["total"]["salary"] += my_dict[fs]["salary"] my_dict["total"]["om"] += my_dict[fs]["om"] my_dict["total"]["capital"] += my_dict[fs]["capital"] my_dict["total"]["total"] += my_dict[fs]["total"] return my_dict
def get_user_fte_breakdown(user, fiscal_year_id): staff_instances = models.Staff.objects.filter( user=user, project_year__fiscal_year_id=fiscal_year_id) my_dict = dict() my_dict['name'] = f"{user.last_name}, {user.first_name}" my_dict['fiscal_year'] = str( shared_models.FiscalYear.objects.get(pk=fiscal_year_id)) my_dict['draft'] = nz( staff_instances.filter(project_year__status=1).aggregate( dsum=Sum("duration_weeks"))["dsum"], 0) my_dict['submitted_unapproved'] = nz( staff_instances.filter(project_year__status__in=[2, 3]).aggregate( dsum=Sum("duration_weeks"))["dsum"], 0) my_dict['approved'] = nz( staff_instances.filter(project_year__status=4).aggregate( dsum=Sum("duration_weeks"))["dsum"], 0) return my_dict
def financial_project_summary_data(project): my_list = [] if project.get_funding_sources(): for fs in project.get_funding_sources(): my_dict = dict() my_dict["type"] = fs.get_funding_source_type_display() my_dict["name"] = str(fs) my_dict["salary"] = 0 my_dict["om"] = 0 my_dict["capital"] = 0 # first calc for staff for staff in models.Staff.objects.filter( funding_source=fs, project_year__project=project): # exclude any employees that should be excluded. This is a fail safe since the form should prevent data entry if not staff.employee_type.exclude_from_rollup: if staff.employee_type.cost_type == 1: my_dict["salary"] += nz(staff.amount, 0) elif staff.employee_type.cost_type == 2: my_dict["om"] += nz(staff.amount, 0) # O&M costs for cost in models.OMCost.objects.filter( funding_source=fs, project_year__project=project): my_dict["om"] += nz(cost.amount, 0) # Capital costs for cost in models.CapitalCost.objects.filter( funding_source=fs, project_year__project=project): my_dict["capital"] += nz(cost.amount, 0) my_dict["total"] = my_dict["salary"] + my_dict["om"] + my_dict[ "capital"] my_list.append(my_dict) return my_list
def financial_project_year_summary_data(project_year): """ this function will return a list, where each row corresponds to a funding source""" # for every funding source, we will want to summarize: Salary, O&M, Capital and TOTAL my_list = [] for fs in project_year.get_funding_sources(): my_dict = dict() my_dict["type"] = fs.get_funding_source_type_display() my_dict["name"] = str(fs) my_dict["salary"] = 0 my_dict["om"] = 0 my_dict["capital"] = 0 # first calc for staff for staff in project_year.staff_set.filter(funding_source=fs): # exclude any employees that should be excluded. This is a fail safe since the form should prevent data entry if not staff.employee_type.exclude_from_rollup: if staff.employee_type.cost_type == 1: my_dict["salary"] += nz(staff.amount, 0) elif staff.employee_type.cost_type == 2: my_dict["om"] += nz(staff.amount, 0) # O&M costs for cost in project_year.omcost_set.filter(funding_source=fs): my_dict["om"] += nz(cost.amount, 0) # Capital costs for cost in project_year.capitalcost_set.filter(funding_source=fs): my_dict["capital"] += nz(cost.amount, 0) my_dict[ "total"] = my_dict["salary"] + my_dict["om"] + my_dict["capital"] my_list.append(my_dict) return my_list
def generate_obs_csv(year): """Returns a generator for an HTTP Streaming Response""" filter_kwargs = {} if year != "": filter_kwargs["section__dive__sample__datetime__year"] = year qs = models.Observation.objects.filter(**filter_kwargs).iterator() random_obj = models.Observation.objects.first() fields = random_obj._meta.fields field_names = [field.name for field in fields] # add any FKs for field in fields: if field.attname not in field_names: field_names.append(field.attname) header_row = [field for field in field_names] # starter header_row.extend([ "sample", "sample_id", "date", "region", "transect", "side_display", "interval", "interval_display" ]) pseudo_buffer = Echo() writer = csv.writer(pseudo_buffer) yield writer.writerow(header_row) for obj in qs: data_row = [ unidecode.unidecode(str(nz(getattr(obj, field), "NA"))) for field in field_names ] # starter data_row.extend([ obj.section.dive.sample, obj.section.dive.sample_id, obj.section.dive.sample.datetime.strftime("%Y-%m-%d"), obj.section.dive.sample.transect.region if obj.section.dive.sample.transect else "NA", obj.section.dive.sample.transect.name if obj.section.dive.sample.transect else "NA", obj.section.dive.get_side_display(), obj.section.interval, obj.section.get_interval_display(), ]) yield writer.writerow(data_row)
def water_depth_display(self): if self.sample_type == 1: return mark_safe( _("<u>depth (m):</u> {depth}±{delta}; <u>discharge (m<sup>3</sup>/s):</u> {dischard}" ).format( depth=nz(self.water_depth_m, "---"), delta=nz(self.water_level_delta_m, "---"), dischard=nz(self.discharge_m3_sec, "---"), )) elif self.sample_type == 2: return mark_safe( _("<u>avg. lower depth (cm):</u> {d1}<br> <u>avg. middle depth (cm):</u> {d2}<br> <u>avg. upper depth (cm):</u> {d3}" ).format( d1=nz(self.get_avg_depth("lower"), "---"), d2=nz(self.get_avg_depth("middle"), "---"), d3=nz(self.get_avg_depth("upper"), "---"), ))
def generate_obs_csv(year, fishing_areas, rivers, sites): """Returns a generator for an HTTP Streaming Response""" filter_kwargs = {} if year != "": filter_kwargs["sample__season"] = year if fishing_areas != "": filter_kwargs[ "sample__site__river__fishing_area_id__in"] = fishing_areas.split( ",") if rivers != "": filter_kwargs["sample__site__river_id__in"] = rivers.split(",") if sites != "": filter_kwargs["sample__site_id__in"] = sites.split(",") qs = models.Observation.objects.filter(**filter_kwargs).iterator() random_obj = models.Observation.objects.first() fields = random_obj._meta.fields field_names = [field.name for field in fields] # add any FKs for field in fields: if field.attname not in field_names: field_names.append(field.attname) header_row = [field for field in field_names] # starter header_row.extend(["site", "site_id"]) pseudo_buffer = Echo() writer = csv.writer(pseudo_buffer) yield writer.writerow(header_row) for obj in qs: data_row = [ str(nz(getattr(obj, field), "")).encode("utf-8").decode('utf-8') for field in field_names ] # starter data_row.extend([obj.sample.site, obj.sample.site_id]) yield writer.writerow(data_row)
def total_cost(self): return nz(self.staff_members.all().aggregate(dsum=Sum("cost"))['dsum'], 0) + \ nz(self.om_costs.aggregate(dsum=Sum("budget_requested"))['dsum'], 0) + \ nz(self.capital_costs.aggregate(dsum=Sum("budget_requested"))['dsum'], 0)
def total_capital(self): return nz( self.capital_costs.aggregate(dsum=Sum("budget_requested"))['dsum'], 0)
def total_om(self): return nz(self.staff_members.filter(employee_type__cost_type=2).aggregate(dsum=Sum("cost"))['dsum'], 0) + \ nz(self.om_costs.aggregate(dsum=Sum("budget_requested"))['dsum'], 0)
def total_salary(self): return nz( self.staff_members.filter(employee_type__cost_type=1).aggregate( dsum=Sum("cost"))['dsum'], 0)
def total_ot(self): return sum([ nz(staff.overtime_hours, 0) for staff in self.staff_members.all() ])
def total_fte(self): return sum([ nz(staff.duration_weeks, 0) for staff in self.staff_members.all() ])
def digest_qc_data(): # open the csv we want to read my_target_data_file = os.path.join(settings.BASE_DIR, 'ihub', 'qc_data_april_23_2021.csv') with open(my_target_data_file, 'r') as csv_read_file: my_csv = csv.DictReader(csv_read_file) # stuff that has to happen before running the loop qc_region = shared_models.Region.objects.get(name="Quebec") models.Status.objects.get_or_create( name="cancelled") # make sure the cancelled status exists activate("fr") for row in my_csv: # title entry, created = models.Entry.objects.get_or_create( title=row["title"].strip(), ) if created: entry.old_id = entry.id entry.save() entry.regions.add(qc_region) # Org... # there can be up to 9 organizations org_cols = [ "org1", "org2", "org3", "org4", "org5", "org6", "org7", "org8", "org9", ] for org_col in org_cols: org_txt = nz(row[org_col].strip(), None) if org_txt and org_txt != "": org = None qs = ml_models.Organization.objects.filter( Q(name_eng__icontains=org_txt) | Q(abbrev__icontains=org_txt) ) # in term "David Fishman" et "David Smith" --> d == True | avid == True ishm if not qs.exists(): # pas resultat # then we just create a new org org = ml_models.Organization.objects.create( name_eng=org_txt) print( f"Creating new organization: {org.name_eng} ({org.id})" ) # --> http://dmapps{reverse('ihub:org_detail', args=[org.id])}") elif qs.count() == 1: # means we have a direct hit org = qs.first() else: print( f"Found multiple organizations for {org_txt}: {listrify([str(o) for o in qs])}" ) if org: org.regions.add(qc_region) org.grouping.add(7) entry.organizations.add(org) # Sector sector, created = ml_models.Sector.objects.get_or_create( name=row["sector"].strip(), region=qc_region, ) entry.sectors.add(sector) # type try: type = models.EntryType.objects.get( name__iexact=row["type"].strip()) except: type = None if row["type"] == 'Mobilisation': type = models.EntryType.objects.get( name__iexact="engagement") else: print("can't find type: ", row["type"].strip()) entry.entry_type = type # status status_txt = nz(row["status"].strip(), None) if status_txt: try: status = models.Status.objects.get( name__icontains=status_txt) except: if 'active' in status_txt.lower(): status = models.Status.objects.get(pk=1) else: status = None print("can't find status: ", row["status"].strip()) entry.status = status # date1 dt = None date1 = nz(row["date1"], None) if date1: if len(date1.split("/")) == 3: dt = datetime.datetime.strptime(date1, "%m/%d/%Y") dt = timezone.make_aware(dt, timezone.get_current_timezone()) entry.initial_date = dt else: print( f'Cannot parse start date for Entry #{entry.id}: {date1}' ) # date2 dt = None date2 = nz(row["date2"], None) if date2: if len(date2.split("/")) == 3: dt = datetime.datetime.strptime(date2, "%m/%d/%Y") dt = timezone.make_aware(dt, timezone.get_current_timezone()) entry.anticipated_end_date = dt else: print( f'Cannot parse start date for Entry #{entry.id}: {date2}' ) proponent = nz(row['promoteur'], None) if proponent: entry.proponent = proponent entry.save() # contact i_list = [1, 2, 3, 4] for i in i_list: name = nz(row['contact' + str(i)], None) if name: person, created = models.EntryPerson.objects.get_or_create( entry=entry, name=name, organization=nz(row['contact_org' + str(i)], "DFO-MPO"), role=2) i_list = [1, 2, 3, 4, 5, 6] for i in i_list: comment = nz(row['comment' + str(i)], None) if comment: models.EntryNote.objects.get_or_create( entry=entry, type=3, note=comment, ) suivi = nz(row["suivi"], None) if suivi: models.EntryNote.objects.get_or_create( entry=entry, type=4, note=suivi, )
def import_org_list(): # open the csv we want to read my_target_data_file = os.path.join(settings.BASE_DIR, 'ihub', 'orgs.csv') with open(my_target_data_file, 'r') as csv_read_file: my_csv = csv.DictReader(csv_read_file) # stuff that has to happen before running the loop for row in my_csv: # first step: make sure there are no duplicate names org, created = ml_models.Organization.objects.get_or_create( name_eng=row["name_eng"]) # add the region # check the province in order to determine the region region = shared_models.Region.objects.get( name__icontains="pacific") for r in org.regions.all(): org.regions.remove(r) org.regions.add(region) # add the grouping if "first nation" in row["Grouping"].lower(): grouping = ml_models.Grouping.objects.filter( name__icontains="First Nation / Community").first() else: grouping, created = ml_models.Grouping.objects.get_or_create( name=row["Grouping"]) if not grouping.is_indigenous: grouping.is_indigenous = True grouping.save() for g in org.grouping.all(): org.grouping.remove(g) org.grouping.add(grouping) # add the normal attrs org.name_ind = nz(row["name_ind"], None) org.abbrev = nz(row["abbrev"], None) org.address = nz(row["address"], None) org.mailing_address = nz(row["mailing_address"], None) org.city = nz(row["city"], None) org.postal_code = nz(row["postal_code"], None) org.province_id = row["province"] org.phone = nz(row["phone"], None) org.fax = nz(row["fax"], None) org.dfo_contact_instructions = nz(row["dfo_contact_instructions"], None) org.notes = nz(row["notes"], None) org.key_species = nz(row["key_species"], None) org.former_name = nz(row["former_name"], None) org.website = nz(row["website"], None) org.council_quorum = nz(row["council_quorum"], None) org.election_term = nz(row["election_term"], None) date = None if row["next_election"]: date = make_aware( datetime.datetime.strptime(row["next_election"] + " 12:00", "%m/%d/%Y %H:%M"), timezone("Canada/Central")) org.next_election = date date = None if row["new_coucil_effective_date"]: date = make_aware( datetime.datetime.strptime( row["new_coucil_effective_date"] + " 12:00", "%m/%d/%Y %H:%M"), timezone("Canada/Central")) org.new_coucil_effective_date = date org.population_on_reserve = nz(row["population_on_reserve"], None) org.population_off_reserve = nz(row["population_off_reserve"], None) org.population_other_reserve = nz(row["population_other_reserve"], None) org.fin = nz(row["fin"], None) org.processing_plant = nz(row["processing_plant"], 0) try: org.save() except Exception as e: print(org, e)
def form_valid(self, form): my_object = form.save() # now we need to do some magic with the file... # load the file url = self.request.META.get("HTTP_ORIGIN") + my_object.file.url r = requests.get(url) csv_reader = csv.DictReader(r.text.splitlines()) i = 0 # loop through each row of the csv file for row in csv_reader: # what to do if we are importing a sample data export.. if self.kwargs.get("type") == "sample": # each row will represent a sample # we only want herring.. so if there is a species field, it should be clupea ... species_name = row.get("species") if not species_name or species_name.lower().startswith("clupea"): sample_qs = models.Sample.objects.filter(old_id=row.get("uuid")) # let's get or create a sample based on the uuid if sample_qs.exists(): my_sample = get_object_or_404(models.Sample, old_id=row.get("uuid")) else: my_sample = models.Sample.objects.create( old_id=row.get("uuid"), sample_date=datetime.strptime(row.get("sample_date"), "%Y-%m-%d %H:%M:%S%z"), ) # let's do this easy stuff in one shot: my_sample.type = row.get("type") my_sample.survey_id = nz(row.get("survey_id"), None) my_sample.sampler_ref_number = nz(row.get("sampler_ref_number"), None) my_sample.latitude_n = nz(row.get("latitude_n"), None) my_sample.longitude_w = nz(row.get("longitude_w"), None) my_sample.experimental_net_used = row.get("experimental_net_used") my_sample.sample_weight_lbs = nz(row.get("sample_weight_lbs"), None) my_sample.catch_weight_lbs = nz(row.get("catch_weight_lbs"), None) my_sample.total_fish_measured = nz(row.get("total_fish_measured"), None) my_sample.total_fish_preserved = nz(row.get("total_fish_preserved"), None) my_sample.remarks = nz(row.get("remarks"), None) my_sample.creation_date = datetime.strptime(row.get("creation_date"), "%Y-%m-%d %H:%M:%S%z") my_sample.last_modified_date = datetime.strptime(row.get("last_modified_date"), "%Y-%m-%d %H:%M:%S%z") if row.get( "last_modified_date") else None my_sample.created_by = self.request.user my_sample.last_modified_by = self.request.user my_sample.vessel_cfvn = nz(row.get("vessel_cfvn"), None) # now the trickier stuff: # SAMPLER if row.get("sampler"): sedna_sampler = row.get("sampler").lower().split(", ") # this will be in the format [last_name, first_name] # look for something similar in the hermorrhage db herm_sampler = models.Sampler.objects.filter( first_name__istartswith=sedna_sampler[1], last_name__iexact=sedna_sampler[0], ) if herm_sampler.count() == 1: # bingo, we found our man print("bingo, we found our man") my_sample.sampler = herm_sampler.first() elif herm_sampler.count() == 0: print("no hits for sampler") # this user appears to be absent from hermorrhage db new_sampler = models.Sampler.objects.create(first_name=sedna_sampler[1], last_name=sedna_sampler[0]) my_sample.sampler = new_sampler else: print("more than one hit for sampler") # we are in a position where there are more than one hits.. try using the whole first name. # if there are still more than one hits we can just choose the first sampler arbitrarily... means there is a duplicate # If no hits probably safer just to create a new sampler herm_sampler = models.Sampler.objects.filter( first_name__iexact=sedna_sampler[1], last_name__iexact=sedna_sampler[0], ) if herm_sampler.count() > 0: # bingo, we found our man (after a few adjustments) print("bingo, we found our man (after a few adjustments)") my_sample.sampler = herm_sampler.first() else: print("no hits for sampler, when using full first name") # this user appears to be absent from hermorrhage db new_sampler = models.Sampler.objects.create(first_name=sedna_sampler[1], last_name=sedna_sampler[0]) my_sample.sampler = new_sampler else: herm_sampler = models.Sampler.objects.get(pk=29) # sampler = UNKNOWN # FISHING AREA # since this is more fundamental, let's crush the script is not found # look for something exactly the same in the hermorrhage db if row.get("fishing_area"): my_sample.fishing_area = models.FishingArea.objects.get(nafo_area_code__iexact=row.get("fishing_area")) # GEAR # same for gear. not finding something here is unacceptable if row.get("gear"): my_sample.gear = models.Gear.objects.get(gear_code__iexact=row.get("gear")) # MESH SIZE if row.get("mesh_size"): try: my_mesh = models.MeshSize.objects.get(size_mm=row.get("mesh_size")) except models.MeshSize.DoesNotExist: my_mesh = models.MeshSize.objects.create( size_mm=row.get("mesh_size") ) my_sample.mesh_size = my_mesh # PORT if row.get("port_code"): # not finding something here is unacceptable for port in shared_models.Port.objects.all(): if row.get("port_code") == port.full_code: my_sample.port = port break my_sample.save() else: messages.warning(self.request, "Skipping sample with uuid {} because it is not a herring sample.".format(row.get("uuid"))) elif self.kwargs.get("type") == "lf": # each row will represent a length frequency object # let's get the sample based on the uuid; if not found we should crash because something went wrong try: my_sample = models.Sample.objects.get(old_id=row.get("sample_uuid")) except models.Sample.DoesNotExist: messages.warning(self.request, "Sample with uuid {} was not found in the hermorrhage db. This length frequecy will be skipped".format( row.get("sample_uuid"))) else: if i == 0: my_sample.length_frequency_objects.all().delete() my_lf, created = models.LengthFrequency.objects.get_or_create( sample=my_sample, length_bin_id=row.get("length_bin"), ) my_lf.count = row.get("count") my_lf.save() my_sample.save() # run the save method to do a few updates elif self.kwargs.get("type") == "detail": # each row will represent a fish detail # let's get the sample based on the uuid; if not found we should crash because something went wrong my_sample = models.Sample.objects.get(old_id=row.get("sample_uuid")) # DJF: I DON'T HAVE THE TIME TO COMPLETE THIS RIGHT NOW. THIS YEAR (2019) THERE WERE NO FISH DETAIL RECORDS # PROCCESSED IN SEDNA SO I WILL KICK THE CAN DOWN UNTIL A LATER DATE # Note: this import scirpt is a combination of the sample import and the lf import above. messages.info(self.request, "Due to limited time resources, this import script was not developed. Once fish details are process on " "boats, this function will be built") i += 1 # clear the file in my object my_object.delete() return HttpResponseRedirect(reverse_lazy('herring:index'))
def wind(self): return _("<u>speed:</u> {speed}; <u>direction:</u> {dir}").format( speed=nz(self.get_wind_speed_display(), "---"), dir=nz(self.get_wind_direction_display(), "---"), )
def import_old_data(): # open the csv we want to read rootdir = "C:\\Users\\fishmand\\Desktop\\dump" with open(os.path.join(rootdir, "diet_import.csv"), 'r') as csv_read_file: my_csv = csv.DictReader(csv_read_file) for row in my_csv: if int(row["N_orderUhl"]) >= 43160: # first get the predator my_pred, created = models.Predator.objects.get_or_create( old_seq_num=row["SeqNoUhl"].strip(), species_id=row["Predator"].strip(), ) # if the predator is being created, populate it with stuff # if created or not my_pred.processing_date: my_pred.cruise_id = nz(row["cruise_id"].strip(), None) my_pred.processing_date = timezone.datetime( int(row["year_pro"]), int(row["month_pro"]), int(row["day_pro"]), tzinfo=timezone.now().tzinfo) my_pred.set = nz(row["Set"].strip(), None) my_pred.fish_number = nz(row["FishNo"].strip(), None) my_pred.somatic_length_cm = nz(row["PredLen"].strip(), None) my_pred.stomach_wt_g = nz(row["StomWt"].strip(), None) my_pred.comments = nz(row["survey_comment"].strip(), None) my_pred.stratum = nz(row["Strat"].strip(), None) my_pred.date_last_modified = timezone.now() if int(row["Prey"]) != 9900: my_pred.comments = "empty stomach" if not my_pred.comments else "{}; empty stomach".format( my_pred.comments) try: my_pred.save() except Exception as e: print("cannot save predator.") print(e) print(row["N_orderUhl"]) break # add in the sampler, if exists if nz(row["Sampler"].strip(), None): my_pred.samplers.add( models.Sampler.objects.get( first_name=nz(row["Sampler"].strip(), None))) # next get the prey item, if not empty if int(row["Prey"]) != 9900: try: my_prey, created = models.Prey.objects.get_or_create( old_id=row["N_orderUhl"].strip(), species_id=row["Prey"].strip(), predator=my_pred, ) except Exception as e: print("cannot create prey") print(e) print(row["N_orderUhl"]) break else: my_prey.digestion_level_id = nz( row["PreyDigestionState"].strip(), None) my_prey.somatic_length_mm = nz(row["PreyWt"].strip(), None) my_prey.somatic_wt_g = nz(row["PreyLen"].strip(), None) my_prey.censored_length = nz( row["CensoredLen"].strip(), None) my_prey.somatic_wt_g = nz(row["PreyStomWt"].strip(), None) my_prey.comments = nz(row["Comments"].strip(), None) try: my_prey.save() except Exception as e: print("cannot save prey") print(e) print(row["N_orderUhl"]) break
def max_overhanging_veg_display(self): return mark_safe( _("<u>left:</u> {left}; <u>right:</u> {right}").format( left=nz(self.max_overhanging_veg_left, "---"), right=nz(self.max_overhanging_veg_right, "---"), ))
def rpms(self): return mark_safe( _("<u>@start (m):</u> {start}; <u>@end:</u> {end}").format( start=nz(self.rpm_arrival, "---"), end=nz(self.rpm_departure, "---"), ))