def row_parser(self, row): super().row_parser(row) row_datetime = utils.get_row_date(row) row_date = row_datetime.date() if utils.nan_to_none(row.get(self.box_key)): self.row_entered += utils.enter_indvd(self.anix_indv.pk, self.cleaned_data, row_date, row[self.box_key], self.box_anidc_id.pk, None) if utils.nan_to_none(row.get(self.location_key)): self.row_entered += utils.enter_indvd(self.anix_indv.pk, self.cleaned_data, row_date, row[self.location_key], self.boxl_anidc_id.pk, None) if utils.nan_to_none(row.get(self.indt_key)) and utils.nan_to_none( row.get(self.indt_amt_key)): indvtc_id = models.IndTreatCode.objects.filter( name__icontains=row[self.indt_key]).get() unit_id = models.UnitCode.objects.filter( name__icontains="gram").order_by(Length('name').asc()).first() self.row_entered += utils.enter_indvt(self.anix_indv.pk, self.cleaned_data, row_datetime, row[self.indt_amt_key], indvtc_id.pk, unit_id=unit_id)
def row_parser(self, row): super().row_parser(row) row_datetime = utils.get_row_date(row) row_date = row_datetime.date() utils.enter_bulk_indvd( self.anix_indv.pk, self.cleaned_data, row_date, gender=row.get(self.sex_key), tissue_yn=row.get(self.tissue_key), )
def row_parser(self, row): cleaned_data = self.cleaned_data row_datetime = utils.get_row_date(row, get_time=True) trof_list = utils.parse_trof_str(row.get(self.trof_key), cleaned_data["facic_id"]) for trof_id in trof_list: row_contx, contx_entered = utils.enter_contx(trof_id, cleaned_data, final_flag=None, return_contx=True) self.row_entered += contx_entered self.row_entered += utils.enter_env(row[self.temp_key], row_datetime.date(), cleaned_data, self.envc_id, env_time=row_datetime.time(), contx=row_contx, save=True, qual_id=self.qual_id)
def row_parser(self, row): # need to: find the pair's group, link it to it's pairing, create a tray, and add the count. cleaned_data = self.cleaned_data row_date = utils.get_row_date(row) pair_list = utils.get_pair(row[self.cross_key], row["stok_id"], row[self.year_key], prog_grp=utils.nan_to_none( row.get(self.prog_key)), fail_on_not_found=True) if len(pair_list) == 1: pair_id = pair_list[0] else: raise Exception("Too many pairs found for row \n{}".format(row)) anix_id = models.AniDetailXref.objects.filter( pair_id=pair_id, grp_id__isnull=False).select_related('grp_id').first() grp_id = anix_id.grp_id self.row_entered += utils.enter_anix(cleaned_data, grp_pk=grp_id.pk, return_sucess=True) tray_id = utils.create_tray(row["trof_id"], row[self.tray_key], row_date, cleaned_data) contx, contx_entered = utils.enter_contx(tray_id, cleaned_data, True, grp_pk=grp_id.pk, return_contx=True) self.row_entered += contx_entered if utils.nan_to_none(row.get(self.fecu_key)): cnt, cnt_entered = utils.enter_cnt(cleaned_data, row[self.fecu_key], contx_pk=contx.pk, cnt_code="Photo Count") self.row_entered += cnt_entered self.row_entered += utils.enter_bulk_grpd(anix_id.pk, cleaned_data, row_date, comments=row.get( self.comment_key))
def row_parser(self, row): cleaned_data = self.cleaned_data # get tray, group, and row date row_date = utils.get_row_date(row) tray_qs = models.Tray.objects.filter(trof_id=row["trof_id"], name=row[self.tray_key]) tray_id = tray_qs.filter( Q(start_date__lte=row_date, end_date__gte=row_date) | Q(end_date__isnull=True)).get() pair_id = models.Pairing.objects.filter( cross=row[self.cross_key], end_date__isnull=True, indv_id__stok_id=row["stok_id"], start_date__year=row[self.year_key]).first() grp_id = utils.get_tray_group(pair_id, tray_id, row_date) # want to shift the hu move event, so that the counting math always works out. hu_move_date = row_date + timedelta(minutes=1) hu_cleaned_data = utils.create_new_evnt(cleaned_data, "Allocation", hu_move_date) hu_anix, data_entered = utils.enter_anix(hu_cleaned_data, grp_pk=grp_id.pk) self.row_entered += data_entered hu_contx, data_entered = utils.enter_contx(tray_id, hu_cleaned_data, None, grp_pk=grp_id.pk, return_contx=True) self.row_entered += data_entered # record development dev_at_hu_transfer = grp_id.get_development(hu_move_date) utils.enter_grpd(hu_anix.pk, hu_cleaned_data, hu_move_date, dev_at_hu_transfer, None, anidc_str="Development") self.row_entered += utils.enter_contx(row["trof_id"], cleaned_data) # HU Picks: self.row_entered += utils.enter_cnt(cleaned_data, row[self.loss_key], hu_contx.pk, cnt_code="HU Transfer Loss")[1] # generate new group, cup, and movement event: cont = None if utils.nan_to_none(row[self.end_tray_key]): trof_id = models.Trough.objects.filter( facic_id=cleaned_data["facic_id"], name=row[self.end_trof_key]).get() tray_qs = models.Tray.objects.filter(trof_id=trof_id, name=row[self.tray_key]) cont = tray_qs.filter( Q(start_date__lte=row_date, end_date__gte=row_date) | Q(end_date__isnull=True)).get() elif utils.nan_to_none(row[self.end_trof_key]): cont = models.Trough.objects.filter( facic_id=cleaned_data["facic_id"], name=row[self.end_trof_key]).get() elif utils.nan_to_none(row[self.heatl_key]): cont = utils.get_cont_from_dot(row[self.cont_key], cleaned_data, row_date) elif utils.nan_to_none(row[self.tank_key]): cont = models.Tank.objects.filter( facic_id=cleaned_data["facic_id"], name=row[self.tank_key]) self.row_entered += utils.enter_contx(cont, cleaned_data) if not utils.y_n_to_bool(row[self.final_key]): # NEW GROUPS TAKEN FROM INITIAL out_cnt = utils.enter_cnt(cleaned_data, 0, hu_contx.pk, cnt_code="Eggs Removed")[0] utils.enter_cnt_det(cleaned_data, out_cnt, row[self.cnt_key], "Program Group Split", row[self.prog_key]) indv, final_grp = cont.fish_in_cont(row_date) if not final_grp: final_grp = models.Group( spec_id=grp_id.spec_id, coll_id=grp_id.coll_id, grp_year=grp_id.grp_year, stok_id=grp_id.stok_id, grp_valid=True, created_by=cleaned_data["created_by"], created_date=cleaned_data["created_date"], ) try: final_grp.clean() final_grp.save() except (ValidationError, IntegrityError): return None else: # MAIN GROUP GETTING MOVED final_grp = final_grp[0] final_grp_anix = utils.enter_anix(cleaned_data, grp_pk=final_grp.pk, return_anix=True) self.row_entered += utils.enter_anix(hu_cleaned_data, grp_pk=final_grp.pk, return_sucess=True) self.row_entered += utils.enter_bulk_grpd( final_grp_anix, cleaned_data, row_date, prnt_grp=grp_id, prog_grp=row.get(self.prog_key), comments=row.get(self.comment_key)) self.row_entered += utils.enter_grpd(final_grp_anix.pk, cleaned_data, row_date, dev_at_hu_transfer, None, anidc_str="Development") # create movement for the new group, create 2 contx's and 3 anix's # cup contx is contx used to link the positive counts cont_contx = utils.create_egg_movement_evnt(tray_id, cont, cleaned_data, row_date, final_grp.pk, return_cup_contx=True) move_cleaned_data = cleaned_data.copy() move_cleaned_data["evnt_id"] = cont_contx.evnt_id cnt_contx = cont_contx cnt_contx.pk = None cnt_contx.tray_id = tray_id try: cnt_contx.save() except IntegrityError: cnt_contx = models.ContainerXRef.objects.filter( pk=cont_contx.pk).get() self.row_entered += utils.enter_anix(move_cleaned_data, grp_pk=final_grp.pk, contx_pk=cnt_contx.pk, return_sucess=True) # add the positive counts cnt = utils.enter_cnt( move_cleaned_data, row[self.cnt_key], cnt_contx.pk, cnt_code="Eggs Added", )[0] if utils.nan_to_none(self.weight_key): utils.enter_cnt_det(move_cleaned_data, cnt, row[self.weight_key], "Weight") utils.enter_cnt_det(move_cleaned_data, cnt, row[self.cnt_key], "Program Group Split", row[self.prog_key]) else: # Move main group to drawer, and add end date to tray: if cont: end_contx = utils.create_movement_evnt(tray_id, cont, cleaned_data, row_date, grp_pk=grp_id.pk, return_end_contx=True) tray_id.end_date = row_date tray_id.save() end_cnt = utils.enter_cnt(cleaned_data, row[self.cnt_key], end_contx.pk, cnt_code="Egg Count")[0] utils.enter_cnt_det(cleaned_data, end_cnt, row[self.weight_key], "Weight") else: self.log_data += "\n Draw {} from {} not found".format( cont, row[self.cont_key]) # link cup to egg development event utils.enter_contx(cont, cleaned_data, None)
def row_parser(self, row): cleaned_data = self.cleaned_data row_date = utils.get_row_date(row) self.row_entered += utils.enter_contx(row["trof_id"], cleaned_data) # find group from either cross or tray: if utils.nan_to_none(row.get(self.hu_key)): cont_id = utils.get_cont_from_dot(row[self.hu_key], cleaned_data, row_date) elif utils.nan_to_none(row.get(self.tray_key)): cont_id = models.Tray.objects.filter( trof_id=row["trof_id"], end_date__isnull=True, name=row[self.tray_key]).get() else: cont_id = row["trof_id"] if utils.nan_to_none(row.get(self.cross_key)): pair_id = models.Pairing.objects.filter( cross=row[self.cross_key], end_date__isnull=True, indv_id__stok_id=row["stok_id"], start_date__year=row[self.year_key]).first() grp_id = utils.get_tray_group(pair_id, cont_id, row_date) else: grp_id = cont_id.fish_in_cont(row_date, get_grp=True) grp_anix = None shock = False for pickc_id in cleaned_data["pickc_id"]: if utils.nan_to_none(row[pickc_id.name]): shock = utils.y_n_to_bool(row.get(self.shocking_key)) grp_anix, evnt_entered = utils.create_picks_evnt( cleaned_data, cont_id, grp_id.pk, row[pickc_id.name], row_date, pickc_id.name, cleaned_data["evnt_id"].perc_id, shocking=shock, return_anix=True, pick_comments=row.get(self.comment_key)) self.row_entered += evnt_entered for col_name in row.keys(): col_date = utils.get_col_date(col_name) if col_date: col_date_str = datetime.strftime(col_date, "%Y-%b-%d") self.date_dict[col_date_str] = True if utils.nan_to_none(row.get(col_name)): self.row_entered += utils.create_picks_evnt( cleaned_data, cont_id, grp_id.pk, row[col_name], col_date, self.default_pickc_id, cleaned_data["evnt_id"].perc_id, pick_comments=row.get(self.comment_key)) # record development if grp_anix and shock: pick_evnt_cleaned_data = cleaned_data.copy() pick_evnt_cleaned_data["evnt_id"] = grp_anix.evnt_id dev_at_pick = grp_id.get_development(row_date) utils.enter_grpd(grp_anix.pk, pick_evnt_cleaned_data, row_date, dev_at_pick, None, anidc_str="Development") self.row_entered += utils.enter_contx(row["trof_id"], cleaned_data)
def row_parser(self, row): cleaned_data = self.cleaned_data year, coll = utils.year_coll_splitter(row[self.coll_key]) row_datetime = utils.get_row_date(row) row_date = row_datetime.date() indv_ufid = utils.nan_to_none(row.get(self.ufid_key)) indv = models.Individual( grp_id=self.grp_id, spec_id=self.salmon_id, stok_id=self.stok_id, coll_id=self.coll_id, indv_year=year, pit_tag=row[self.pit_key], ufid=indv_ufid, indv_valid=True, comments=utils.nan_to_none(row.get(self.comment_key)), created_by=cleaned_data["created_by"], created_date=cleaned_data["created_date"], ) try: indv.clean() indv.save() self.row_entered = True except (ValidationError, IntegrityError): indv = models.Individual.objects.filter(pit_tag=indv.pit_tag).get() if utils.nan_to_none(row[self.from_tank_id_key]) or utils.nan_to_none( row[self.to_tank_id_key]): in_tank = row[self.from_tank_id_key] out_tank = row[self.to_tank_id_key] self.row_entered += utils.create_movement_evnt(in_tank, out_tank, cleaned_data, row_datetime, indv_pk=indv.pk) # if tagged fish goes back into same tank, still link fish to tank: if in_tank == out_tank: utils.enter_contx(in_tank, cleaned_data, True, indv_pk=indv.pk) anix_indv, anix_entered = utils.enter_anix(cleaned_data, indv_pk=indv.pk) self.row_entered += anix_entered self.anix_indv = anix_indv utils.enter_bulk_indvd( anix_indv.pk, self.cleaned_data, row_date, len_mm=row.get(self.len_key_mm), len_val=row.get(self.len_key), weight=row.get(self.weight_key), weight_kg=row.get(self.weight_key_kg), vial=row.get(self.vial_key), mark=row.get(self.mark_key), prog_grp=row.get(self.group_key), lifestage=row.get(self.lifestage_key), comments=row.get(self.comment_key), ) if utils.nan_to_none(row.get(self.precocity_key)): self.row_entered += utils.enter_indvd(anix_indv.pk, cleaned_data, row_date, None, self.ani_health_anidc_id.pk, "Precocity") if utils.nan_to_none(row.get(self.crew_key)): perc_list, inits_not_found = utils.team_list_splitter( row[self.crew_key]) for perc_id in perc_list: team_id, team_entered = utils.add_team_member( perc_id, cleaned_data["evnt_id"], role_id=self.tagger_code, return_team=True) self.row_entered += team_entered if team_id: self.row_entered += utils.enter_anix(cleaned_data, indv_pk=indv.pk, team_pk=team_id.pk, return_sucess=True) for inits in inits_not_found: self.log_data += "No valid personnel with initials ({}) for row with pit tag" \ " {}\n".format(inits, row[self.pit_key])
def row_parser(self, row): cleaned_data = self.cleaned_data indv_female, samp_female, new_log = utils.get_indv_or_samp( row, self.pit_key_f, self.samp_key_f, cleaned_data["evnt_id"]) if new_log: self.log_data += new_log return self.log_data, False indv_male, samp_male, new_log = utils.get_indv_or_samp( row, self.pit_key_m, self.samp_key_m, cleaned_data["evnt_id"]) if new_log: self.log_data += new_log return self.log_data, False if not (indv_female or samp_female) or not (indv_male or samp_male): raise Exception( "No Individual or Fish found for row {}".format(row)) if not utils.nan_to_none(row[self.choice_key]): raise Exception( "Choice column cannot be empty. Set Fecundity column to zero to indicate Duds." ) row_date = utils.get_row_date(row) if indv_female: anix_female, anix_entered = utils.enter_anix( cleaned_data, indv_pk=indv_female.pk) self.row_entered += anix_entered self.row_entered += utils.enter_bulk_indvd( anix_female.pk, cleaned_data, row_date, gender="F", len_mm=row.get(self.len_key_f_mm), len_val=row.get(self.len_key_f), weight=row.get(self.weight_key_f), weight_kg=row.get(self.weight_key_f_kg), status=row.get(self.status_key_f), comments=row.get(self.comment_key_f)) if indv_male: anix_male, anix_entered = utils.enter_anix(cleaned_data, indv_pk=indv_male.pk) self.row_entered += anix_entered self.row_entered += utils.enter_bulk_indvd( anix_male.pk, cleaned_data, row_date, gender="M", len_mm=row.get(self.len_key_m_mm), len_val=row.get(self.len_key_m), weight=row.get(self.weight_key_m), weight_kg=row.get(self.weight_key_m_kg), status=row.get(self.status_key_m), comments=row.get(self.comment_key_m)) if samp_female: self.row_entered += utils.enter_bulk_sampd( samp_female.pk, cleaned_data, row_date, gender="F", len_mm=row.get(self.len_key_f_mm), len_val=row.get(self.len_key_f), weight=row.get(self.weight_key_f), weight_kg=row.get(self.weight_key_f_kg), status=row.get(self.status_key_f), comments=row.get(self.comment_key_f)) if samp_male: self.row_entered += utils.enter_bulk_sampd( samp_male.pk, cleaned_data, row_date, gender="M", len_mm=row.get(self.len_key_m_mm), len_val=row.get(self.len_key_m), weight=row.get(self.weight_key_m), weight_kg=row.get(self.weight_key_m_kg), status=row.get(self.status_key_m), comments=row.get(self.comment_key_m)) if utils.nan_to_none(row.get(self.dest_key_f)) and indv_female: end_tank_id_f = models.Tank.objects.filter( name=row[self.dest_key_f], facic_id=cleaned_data["facic_id"]).get() self.row_entered += utils.create_movement_evnt( None, end_tank_id_f, cleaned_data, row_date, indv_female.pk) if utils.nan_to_none(row.get(self.dest_key_m)) and indv_male: end_tank_id_m = models.Tank.objects.filter( name=row[self.dest_key_m], facic_id=cleaned_data["facic_id"]).get() self.row_entered += utils.create_movement_evnt( None, end_tank_id_m, cleaned_data, row_date, indv_male.pk) # pair pair = models.Pairing( start_date=row_date, prio_id=models.PriorityCode.objects.filter( name__iexact=prio_dict[row[self.prio_key_f]]).get(), pair_prio_id=models.PriorityCode.objects.filter( name__iexact=prio_dict[row[self.prio_key_pair]]).get(), cross=row[self.cross_key], valid=True, indv_id=indv_female, samp_id=samp_female, comments=utils.nan_to_none(row[self.comment_key_pair]), created_by=cleaned_data["created_by"], created_date=cleaned_data["created_date"], ) try: pair.clean() pair.save() self.row_entered = True except (ValidationError, IntegrityError): pair = models.Pairing.objects.filter(start_date=row_date, indv_id=indv_female, samp_id=samp_female).get() # sire sire = models.Sire( prio_id=models.PriorityCode.objects.filter( name__iexact=prio_dict[row[self.prio_key_m]]).get(), pair_id=pair, indv_id=indv_male, samp_id=samp_male, choice=row[self.choice_key], comments=utils.nan_to_none(row[self.comment_key_m]), created_by=cleaned_data["created_by"], created_date=cleaned_data["created_date"], ) try: sire.clean() sire.save() self.row_entered = True except (ValidationError, IntegrityError): # don't use sire again anywhere pass self.row_entered += utils.enter_anix(cleaned_data, pair_pk=pair.pk, return_sucess=True) # pairing program: if utils.nan_to_none(row.get(self.prog_key)): self.row_entered += utils.enter_spwnd( pair.pk, cleaned_data, row[self.prog_key], self.prog_spwndc_id.pk, spwnsc_str=row[self.prog_key]) # fecu/dud/extra male if row[self.egg_est_key] > 0: self.row_entered += utils.enter_spwnd(pair.pk, cleaned_data, int(row[self.egg_est_key]), self.fecu_spwndc_id.pk, None, "Calculated") else: self.row_entered += utils.enter_spwnd(pair.pk, cleaned_data, row[self.choice_key], self.dud_spwndc_id.pk, None, "Good") # grp anix_grp_qs = models.AniDetailXref.objects.filter( evnt_id=cleaned_data["evnt_id"], grp_id__isnull=False, pair_id=pair, indv_id__isnull=True, contx_id__isnull=True, loc_id__isnull=True, ) anix_grp = False if anix_grp_qs.count() == 0: if indv_female: stok_id = indv_female.stok_id spec_id = indv_female.spec_id else: stok_id = samp_female.stok_id spec_id = samp_female.spec_id grp = models.Group( spec_id=spec_id, stok_id=stok_id, coll_id=models.Collection.objects.filter( name="Egg (F1)").get(), grp_year=row_date.year, grp_valid=False, created_by=cleaned_data["created_by"], created_date=cleaned_data["created_date"], ) try: grp.clean() grp.save() row_entered = True anix_grp, anix_entered = utils.enter_anix(cleaned_data, grp_pk=grp.pk) row_entered += utils.enter_anix(cleaned_data, grp_pk=grp.pk, pair_pk=pair.pk, return_sucess=True) grp.grp_valid = True grp.save() self.row_entered = True except ValidationError: # recovering the group is only doable through the anix with both grp and pair. # no way to find it here, so only make the group valid after anix's created. pass elif anix_grp_qs.count() == 1: anix_grp = anix_grp_qs.get() grp = anix_grp.grp_id if anix_grp: utils.enter_bulk_grpd(anix_grp, cleaned_data, row_date, prog_grp=row[self.prog_key])
def row_parser(self, row): cleaned_data = self.cleaned_data row_datetime = utils.get_row_date(row) relc_id = self.site_dict[row[self.site_key]] year, coll = utils.year_coll_splitter(row[self.coll_key]) coll_id = utils.coll_getter(coll) stok_id = models.StockCode.objects.filter(name__iexact=relc_id.rive_id.name).get() indv_id = None if utils.nan_to_none(row[self.pit_key]): indv_id = models.Individual.objects.filter(pit_tag=row[self.pit_key]).first() if not indv_id: indv_id = models.Individual(spec_id=self.salmon_id, stok_id=stok_id, coll_id=coll_id, indv_year=year, pit_tag=row[self.pit_key], indv_valid=True, comments=utils.nan_to_none(row.get(self.comment_key)), created_by=cleaned_data["created_by"], created_date=cleaned_data["created_date"], ) try: indv_id.clean() indv_id.save() self.row_entered = True except (ValidationError, IntegrityError): indv_id = models.Individual.objects.filter(pit_tag=indv_id.pit_tag).get() indv_anix, data_entered = utils.enter_anix(cleaned_data, indv_pk=indv_id.pk) self.row_entered += data_entered # add program group to individual if needed: loc = models.Location(evnt_id_id=cleaned_data["evnt_id"].pk, locc_id=self.locc_id, rive_id=relc_id.rive_id, relc_id=relc_id, loc_date=row_datetime, created_by=cleaned_data["created_by"], created_date=cleaned_data["created_date"], ) try: loc.clean() loc.save() self.row_entered = True except ValidationError: loc = models.Location.objects.filter(evnt_id=loc.evnt_id, locc_id=loc.locc_id, rive_id=loc.rive_id, subr_id=loc.subr_id, relc_id=loc.relc_id, loc_lat=loc.loc_lat, loc_lon=loc.loc_lon, loc_date=loc.loc_date).get() self.loc = loc self.team_parser(row[self.crew_key], row, loc_id=loc) if indv_id: anix_loc_indv, anix_entered = utils.enter_anix(cleaned_data, loc_pk=loc.pk, indv_pk=indv_id.pk) self.row_entered += anix_entered self.row_entered += utils.enter_bulk_indvd(anix_loc_indv.pk, self.cleaned_data, row_datetime, gender=row.get(self.sex_key), len_mm=row.get(self.len_key_mm), len_val=row.get(self.len_key), weight=row.get(self.weight_key), weight_kg=row.get(self.weight_key_kg), vial=row.get(self.vial_key), scale_envelope=row.get(self.scale_key), prog_grp=row.get(self.grp_key), comments=row.get(self.comment_key) ) if utils.nan_to_none(row.get(self.mort_key)): if utils.y_n_to_bool(row[self.mort_key]): mort_anix, mort_entered = utils.enter_mortality(indv_id, self.cleaned_data, row_datetime) self.row_entered += mort_entered if utils.nan_to_none(row.get(self.wr_key)): if utils.y_n_to_bool(row[self.wr_key]): self.row_entered += utils.enter_indvd(anix_loc_indv.pk, cleaned_data, row_datetime, None, self.ani_health_anidc_id.pk, adsc_str=self.wr_adsc_id.name) if utils.nan_to_none(row.get(self.aquaculture_key)): if utils.y_n_to_bool(row[self.aquaculture_key]): self.row_entered += utils.enter_indvd(anix_loc_indv.pk, cleaned_data, row_datetime, None, self.ani_health_anidc_id.pk, adsc_str="Aquaculture") if utils.nan_to_none(row[self.tank_key]): self.row_entered += utils.enter_contx(self.tank_dict[row[self.tank_key]], cleaned_data, True, indv_id.pk) if self.loc.pk not in self.loc_caught_dict: self.loc_caught_dict[self.loc.pk] = 1 else: self.loc_caught_dict[self.loc.pk] += 1 else: if self.loc.pk not in self.loc_obs_dict: self.loc_obs_dict[self.loc.pk] = 1 else: self.loc_obs_dict[self.loc.pk] += 1 elif utils.nan_to_none(row.get(self.samp_key)): samp = models.Sample.objects.filter(anix_id__evnt_id=cleaned_data["evnt_id"], loc_id=loc, spec_id=self.salmon_id, samp_num=row[self.samp_key], sampc_id=self.sampc_id, ).get() if not samp: # create group for sample: grp_id = models.Group(spec_id=self.salmon_id, stok_id=stok_id, coll_id=coll_id, grp_year=year, grp_valid=False, created_by=cleaned_data["created_by"], created_date=cleaned_data["created_date"], ) grp_id.clean() grp_id.save() self.row_entered = True grp_anix, data_entered = utils.enter_anix(cleaned_data, grp_pk=grp_id.pk) self.row_entered += data_entered samp, samp_entered = utils.enter_samp(cleaned_data, row[self.samp_key], self.salmon_id.pk, self.sampc_id.pk, anix_pk=grp_anix.pk, loc_pk=loc.pk, comments=utils.nan_to_none(row.get(self.comment_key))) self.row_entered += samp_entered self.row_entered += utils.enter_bulk_sampd(samp.pk, self.cleaned_data, row_datetime, gender=row.get(self.sex_key), len_mm=row.get(self.len_key_mm), len_val=row.get(self.len_key), weight=row.get(self.weight_key), weight_kg=row.get(self.weight_key_kg), vial=row.get(self.vial_key), scale_envelope=row.get(self.scale_key), prog_grp=row.get(self.grp_key), comments=row.get(self.comment_key) ) if utils.nan_to_none(row.get(self.mort_key)): if utils.y_n_to_bool(row[self.mort_key]): self.row_entered += utils.enter_samp_mortality(samp, self.cleaned_data, row_datetime) if utils.nan_to_none(row.get(self.wr_key)): if utils.y_n_to_bool(row[self.wr_key]): self.row_entered += utils.enter_sampd(samp.pk, cleaned_data, row_datetime, None, self.ani_health_anidc_id.pk, adsc_str=self.wr_adsc_id.name) if utils.nan_to_none(row.get(self.aquaculture_key)): if utils.y_n_to_bool(row[self.aquaculture_key]): self.row_entered += utils.enter_sampd(samp.pk, cleaned_data, row_datetime, None, self.ani_health_anidc_id.pk, adsc_str="Aquaculture") else: raise Exception("Fish must either be assigned a sample number or a pit tag.")
def row_parser(self, row): cleaned_data = self.cleaned_data row_datetime = utils.get_row_date(row) relc_id = None rive_id = self.river_dict[row[self.rive_key]] if not utils.nan_to_none(row.get(self.tank_key)) and utils.nan_to_none(row.get(self.fish_caught_key)): # make sure if fish are caught they are assigned a tank: raise Exception("All caught fish must be assigned a tank") if utils.nan_to_none(row.get(self.site_key)): relc_qs = models.ReleaseSiteCode.objects.filter(name__iexact=row[self.site_key]) if len(relc_qs) == 1: relc_id = relc_qs.get() start_lat = utils.round_no_nan(row.get(self.lat_key), 6) start_lon = utils.round_no_nan(row.get(self.lon_key), 6) if not relc_id and not (start_lat and start_lon): raise Exception("Site code not found and lat-long not given for site on row") loc = models.Location(evnt_id_id=cleaned_data["evnt_id"].pk, locc_id=self.locc_id, rive_id=rive_id, relc_id=relc_id, loc_lat=start_lat, loc_lon=start_lon, end_lat=utils.round_no_nan(row.get(self.end_lat), 6), end_lon=utils.round_no_nan(row.get(self.end_lon), 6), loc_date=row_datetime, comments=utils.nan_to_none(row.get(self.comment_key)), created_by=cleaned_data["created_by"], created_date=cleaned_data["created_date"], ) try: loc.clean() loc.save() self.row_entered = True except ValidationError: loc = models.Location.objects.filter(evnt_id=loc.evnt_id, locc_id=loc.locc_id, rive_id=loc.rive_id, subr_id=loc.subr_id, relc_id=loc.relc_id, loc_lat=loc.loc_lat, loc_lon=loc.loc_lon, loc_date=loc.loc_date).get() self.loc = loc if row["grp_id"]: self.row_entered += utils.enter_anix(cleaned_data, loc_pk=loc.pk, grp_pk=row["grp_id"].pk, return_sucess=True) if self.loc.loc_lon and self.loc.loc_lat and not self.loc.relc_id: self.log_data += "\nNo site found in db for Lat-Long ({}, {}) given on row: \n{}\n\n"\ .format(self.loc.loc_lat, self.loc.loc_lon, row) if utils.nan_to_none(row["contx_id"]): self.row_entered += utils.enter_anix(cleaned_data, loc_pk=loc.pk, contx_pk=row["contx_id"].pk, return_sucess=True) self.team_parser(row[self.crew_key], row, loc_id=loc) if utils.nan_to_none(row.get(self.temp_key)): self.row_entered += utils.enter_env(row[self.temp_key], row_datetime, cleaned_data, self.temp_envc_id, loc_id=loc) cnt_caught, cnt_entered = utils.enter_cnt(cleaned_data, cnt_value=row[self.fish_caught_key], loc_pk=loc.pk, cnt_code="Fish Caught") self.row_entered += cnt_entered cnt_obs, cnt_entered = utils.enter_cnt(cleaned_data, cnt_value=row[self.fish_obs_key], loc_pk=loc.pk, cnt_code="Fish Observed") self.row_entered += cnt_entered if utils.nan_to_none(row.get(self.settings_key)): self.row_entered += utils.enter_locd(loc.pk, cleaned_data, row_datetime, row[self.settings_key], self.settings_locdc_id.pk) if utils.nan_to_none(row.get(self.fishing_time_key)): self.row_entered += utils.enter_locd(loc.pk, cleaned_data, row_datetime, row[self.fishing_time_key], self.fishing_time_locdc_id.pk) if utils.nan_to_none(row.get(self.voltage_key)): self.row_entered += utils.enter_locd(loc.pk, cleaned_data, row_datetime, row[self.voltage_key], self.voltage_locdc_id.pk)
def row_parser(self, row): row_datetime = utils.get_row_date(row) row_date = row_datetime.date() indv_qs = models.Individual.objects.filter(pit_tag=row[self.pit_key]) if len(indv_qs) == 1: indv = indv_qs.get() else: self.log_data += "Error parsing row: \n" self.log_data += str(row) self.log_data += "\nFish with PIT {} not found in db\n".format( row[self.pit_key]) self.success = False return anix, anix_entered = utils.enter_anix(self.cleaned_data, indv_pk=indv.pk) self.row_entered += anix_entered self.row_entered += utils.enter_bulk_indvd( anix.pk, self.cleaned_data, row_date, gender=row.get(self.sex_key), len_mm=row.get(self.len_key_mm), len_val=row.get(self.len_key), weight=row.get(self.weight_key), weight_kg=row.get(self.weight_key_kg), vial=row.get(self.vial_key), scale_envelope=row.get(self.envelope_key), tissue_yn=row.get(self.tissue_key), mark=row.get(self.mark_key), vaccinated=row.get(self.vax_key), status=row.get(self.status_key), lifestage=row.get(self.lifestage_key), comments=row.get(self.comment_key)) if utils.nan_to_none(row.get(self.precocity_key)): if utils.y_n_to_bool(row[self.precocity_key]): self.row_entered += utils.enter_indvd( anix.pk, self.cleaned_data, row_date, None, self.ani_health_anidc_id.pk, "Precocity") if utils.nan_to_none(row.get(self.mort_key)): if utils.y_n_to_bool(row[self.mort_key]): mort_anix, mort_entered = utils.enter_mortality( indv, self.cleaned_data, row_datetime) self.row_entered += mort_entered in_tank = None out_tank = None if utils.nan_to_none(row[self.start_tank_key]): in_tank = models.Tank.objects.filter( name=row[self.start_tank_key]).get() if utils.nan_to_none(row[self.end_tank_key]): out_tank = models.Tank.objects.filter( name=row[self.end_tank_key]).get() if in_tank or out_tank: self.row_entered += utils.create_movement_evnt(in_tank, out_tank, self.cleaned_data, row_datetime, indv_pk=indv.pk) self.row_entered += utils.parse_extra_cols(row, self.cleaned_data, anix, indv=True)
def row_parser(self, row): cleaned_data = self.cleaned_data contx, data_entered = utils.enter_tank_contx(row[self.tank_key], cleaned_data, None, return_contx=True) self.row_entered += data_entered row_date = utils.get_row_date(row) if utils.nan_to_none(row[self.time_key]): row_time = row[self.time_key].replace(tzinfo=pytz.UTC) else: row_time = None if utils.nan_to_none(row.get(self.temp_key)): self.row_entered += utils.enter_env(row[self.temp_key], row_date, cleaned_data, self.temp_envc_id, contx=contx, env_time=row_time) if utils.nan_to_none(row.get(self.dox_key)): self.row_entered += utils.enter_env(row[self.dox_key], row_date, cleaned_data, self.oxlvl_envc_id, contx=contx, env_time=row_time) if utils.nan_to_none(row.get(self.ph_key)): self.row_entered += utils.enter_env(row[self.ph_key], row_date, cleaned_data, self.ph_envc_id, contx=contx, env_time=row_time) if utils.nan_to_none(row.get(self.dn_key)): self.row_entered += utils.enter_env(row[self.dn_key], row_date, cleaned_data, self.disn_envc_id, contx=contx, env_time=row_time) if utils.nan_to_none(row.get(self.source_key)): source_envsc_id = models.EnvSubjCode.objects.filter( name__icontains=row[self.source_key]).get() self.row_entered += utils.enter_env(row[self.source_key], row_date, cleaned_data, self.ws_envc_id, envsc_id=source_envsc_id, contx=contx, env_time=row_time) if utils.nan_to_none(row.get(self.crew_key)): perc_list, inits_not_found = utils.team_list_splitter( row[self.crew_key]) for perc_id in perc_list: team_id, team_entered = utils.add_team_member( perc_id, cleaned_data["evnt_id"], return_team=True) self.row_entered += team_entered if team_id: self.row_entered += utils.enter_tank_contx( row[self.tank_key], cleaned_data, team_pk=team_id.pk) for inits in inits_not_found: self.log_data += "No valid personnel with initials ({}) for row {} \n".format( inits, row)