コード例 #1
0
ファイル: tagging.py プロジェクト: dfo-mar-odis/dm_apps
    def row_parser(self, row):
        super().row_parser(row)
        row_datetime = utils.get_row_date(row)
        row_date = row_datetime.date()
        if utils.nan_to_none(row.get(self.box_key)):
            self.row_entered += utils.enter_indvd(self.anix_indv.pk,
                                                  self.cleaned_data, row_date,
                                                  row[self.box_key],
                                                  self.box_anidc_id.pk, None)
        if utils.nan_to_none(row.get(self.location_key)):
            self.row_entered += utils.enter_indvd(self.anix_indv.pk,
                                                  self.cleaned_data, row_date,
                                                  row[self.location_key],
                                                  self.boxl_anidc_id.pk, None)

        if utils.nan_to_none(row.get(self.indt_key)) and utils.nan_to_none(
                row.get(self.indt_amt_key)):
            indvtc_id = models.IndTreatCode.objects.filter(
                name__icontains=row[self.indt_key]).get()
            unit_id = models.UnitCode.objects.filter(
                name__icontains="gram").order_by(Length('name').asc()).first()
            self.row_entered += utils.enter_indvt(self.anix_indv.pk,
                                                  self.cleaned_data,
                                                  row_datetime,
                                                  row[self.indt_amt_key],
                                                  indvtc_id.pk,
                                                  unit_id=unit_id)
コード例 #2
0
    def row_parser(self, row):
        # need to: find the pair's group, link it to it's pairing, create a tray, and add the count.
        cleaned_data = self.cleaned_data
        row_date = utils.get_row_date(row)
        pair_list = utils.get_pair(row[self.cross_key],
                                   row["stok_id"],
                                   row[self.year_key],
                                   prog_grp=utils.nan_to_none(
                                       row.get(self.prog_key)),
                                   fail_on_not_found=True)
        if len(pair_list) == 1:
            pair_id = pair_list[0]
        else:
            raise Exception("Too many pairs found for row \n{}".format(row))

        anix_id = models.AniDetailXref.objects.filter(
            pair_id=pair_id,
            grp_id__isnull=False).select_related('grp_id').first()
        grp_id = anix_id.grp_id
        self.row_entered += utils.enter_anix(cleaned_data,
                                             grp_pk=grp_id.pk,
                                             return_sucess=True)

        tray_id = utils.create_tray(row["trof_id"], row[self.tray_key],
                                    row_date, cleaned_data)
        contx, contx_entered = utils.enter_contx(tray_id,
                                                 cleaned_data,
                                                 True,
                                                 grp_pk=grp_id.pk,
                                                 return_contx=True)
        self.row_entered += contx_entered

        if utils.nan_to_none(row.get(self.fecu_key)):
            cnt, cnt_entered = utils.enter_cnt(cleaned_data,
                                               row[self.fecu_key],
                                               contx_pk=contx.pk,
                                               cnt_code="Photo Count")
            self.row_entered += cnt_entered

        self.row_entered += utils.enter_bulk_grpd(anix_id.pk,
                                                  cleaned_data,
                                                  row_date,
                                                  comments=row.get(
                                                      self.comment_key))
コード例 #3
0
    def data_preper(self):
        cleaned_data = self.cleaned_data
        self.prog_grp_anidc_id = models.AnimalDetCode.objects.filter(name="Program Group").get()
        self.sex_anidc_id = models.AnimalDetCode.objects.filter(name="Gender").get()
        self.len_anidc_id = models.AnimalDetCode.objects.filter(name="Length").get()
        self.weight_anidc_id = models.AnimalDetCode.objects.filter(name="Weight").get()
        self.vial_anidc_id = models.AnimalDetCode.objects.filter(name="Vial").get()
        self.ani_health_anidc_id = models.AnimalDetCode.objects.filter(name="Animal Health").get()
        self.envelope_anidc_id = models.AnimalDetCode.objects.filter(name="Scale Envelope").get()
        self.wr_adsc_id = models.AniDetSubjCode.objects.filter(name="Wild Return").get()
        self.locc_id = models.LocCode.objects.filter(name="Adult Collection Site").get()
        self.salmon_id = models.SpeciesCode.objects.filter(name="Salmon").get()
        self.sampc_id = models.SampleCode.objects.filter(name="Individual Sample").get()

        for site_name in self.data[self.site_key].unique():
            if utils.nan_to_none(site_name):
                self.site_dict[site_name] = models.ReleaseSiteCode.objects.filter(name__icontains=site_name).select_related("rive_id").get()

        for tank_name in self.data[self.tank_key].unique():
            if utils.nan_to_none(tank_name):
                self.tank_dict[tank_name] = models.Tank.objects.filter(name__iexact=tank_name, facic_id=cleaned_data["facic_id"]).get()
                utils.enter_contx(self.tank_dict[tank_name], cleaned_data)
コード例 #4
0
ファイル: generic.py プロジェクト: dfo-mar-odis/dm_apps
 def clean_data(self):
     if self.success:
         contx_df = DataFrame(self.data_dict)
         cnt_df = contx_df.groupby("start_contx_pk", as_index=False).sum()
         for row in cnt_df.to_dict('records'):
             if utils.nan_to_none(row["start_contx_pk"]):
                 cnt, cnt_entered = utils.enter_cnt(
                     self.cleaned_data,
                     0,
                     int(row["start_contx_pk"]),
                     cnt_code="Fish Removed from Container")
                 cnt.cnt = row[self.nfish_key]
                 cnt.save()
     super(GenericGrpParser, self).clean_data()
コード例 #5
0
ファイル: sites.py プロジェクト: dfo-mar-odis/dm_apps
    def row_parser(self, row):
        cleaned_data = self.cleaned_data
        rive_id = models.RiverCode.objects.filter(
            name__iexact=row.get(self.rive_key)).get()
        if not rive_id:
            self.log_data += "River Code not found on row: {}".format(row)
            self.success = False
            return
        subr_id = None
        if utils.nan_to_none(row.get(self.subr_key)):
            subr_id = models.SubRiverCode.objects.filter(
                name__iexact=row.get(self.subr_key), rive_id=rive_id).get()
            if not subr_id:
                self.log_data += "Subriver Code not found on row: {}".format(
                    row)
                self.success = False
                return

        trib_id = None
        if utils.nan_to_none(row.get(self.trib_key)):
            trib_id = models.Tributary.objects.filter(name__iexact=row.get(
                self.trib_key),
                                                      rive_id=rive_id).get()
            if not trib_id:
                self.log_data += "Tributary not found on row: {}".format(row)
                self.success = False
                return

        site_id = models.ReleaseSiteCode(
            name=row.get(self.name_key),
            description_en=row.get(self.desc_key),
            rive_id=rive_id,
            trib_id=trib_id,
            subr_id=subr_id,
            min_lat=utils.nan_to_none(row.get(self.lat_key)),
            max_lat=utils.nan_to_none(row.get(self.max_lat_key)),
            min_lon=utils.nan_to_none(row.get(self.lon_key)),
            max_lon=utils.nan_to_none(row.get(self.max_lon_key)),
            created_by=cleaned_data["created_by"],
            created_date=cleaned_data["created_date"])
        try:
            site_id.clean()
            site_id.save()
            self.row_entered = True
        except (IntegrityError, ValidationError):
            self.log_data += "Row {} not entered. \n".format(self.row_count)
        self.row_count += 1
コード例 #6
0
    def row_parser(self, row):
        cleaned_data = self.cleaned_data
        # get tray, group, and row date
        row_date = utils.get_row_date(row)

        tray_qs = models.Tray.objects.filter(trof_id=row["trof_id"],
                                             name=row[self.tray_key])
        tray_id = tray_qs.filter(
            Q(start_date__lte=row_date, end_date__gte=row_date)
            | Q(end_date__isnull=True)).get()
        pair_id = models.Pairing.objects.filter(
            cross=row[self.cross_key],
            end_date__isnull=True,
            indv_id__stok_id=row["stok_id"],
            start_date__year=row[self.year_key]).first()

        grp_id = utils.get_tray_group(pair_id, tray_id, row_date)

        # want to shift the hu move event, so that the counting math always works out.
        hu_move_date = row_date + timedelta(minutes=1)
        hu_cleaned_data = utils.create_new_evnt(cleaned_data, "Allocation",
                                                hu_move_date)
        hu_anix, data_entered = utils.enter_anix(hu_cleaned_data,
                                                 grp_pk=grp_id.pk)
        self.row_entered += data_entered
        hu_contx, data_entered = utils.enter_contx(tray_id,
                                                   hu_cleaned_data,
                                                   None,
                                                   grp_pk=grp_id.pk,
                                                   return_contx=True)
        self.row_entered += data_entered
        # record development
        dev_at_hu_transfer = grp_id.get_development(hu_move_date)
        utils.enter_grpd(hu_anix.pk,
                         hu_cleaned_data,
                         hu_move_date,
                         dev_at_hu_transfer,
                         None,
                         anidc_str="Development")
        self.row_entered += utils.enter_contx(row["trof_id"], cleaned_data)

        # HU Picks:
        self.row_entered += utils.enter_cnt(cleaned_data,
                                            row[self.loss_key],
                                            hu_contx.pk,
                                            cnt_code="HU Transfer Loss")[1]

        # generate new group, cup, and movement event:
        cont = None
        if utils.nan_to_none(row[self.end_tray_key]):
            trof_id = models.Trough.objects.filter(
                facic_id=cleaned_data["facic_id"],
                name=row[self.end_trof_key]).get()
            tray_qs = models.Tray.objects.filter(trof_id=trof_id,
                                                 name=row[self.tray_key])
            cont = tray_qs.filter(
                Q(start_date__lte=row_date, end_date__gte=row_date)
                | Q(end_date__isnull=True)).get()
        elif utils.nan_to_none(row[self.end_trof_key]):
            cont = models.Trough.objects.filter(
                facic_id=cleaned_data["facic_id"],
                name=row[self.end_trof_key]).get()
        elif utils.nan_to_none(row[self.heatl_key]):
            cont = utils.get_cont_from_dot(row[self.cont_key], cleaned_data,
                                           row_date)
        elif utils.nan_to_none(row[self.tank_key]):
            cont = models.Tank.objects.filter(
                facic_id=cleaned_data["facic_id"], name=row[self.tank_key])

        self.row_entered += utils.enter_contx(cont, cleaned_data)
        if not utils.y_n_to_bool(row[self.final_key]):
            # NEW GROUPS TAKEN FROM INITIAL
            out_cnt = utils.enter_cnt(cleaned_data,
                                      0,
                                      hu_contx.pk,
                                      cnt_code="Eggs Removed")[0]
            utils.enter_cnt_det(cleaned_data, out_cnt, row[self.cnt_key],
                                "Program Group Split", row[self.prog_key])

            indv, final_grp = cont.fish_in_cont(row_date)
            if not final_grp:
                final_grp = models.Group(
                    spec_id=grp_id.spec_id,
                    coll_id=grp_id.coll_id,
                    grp_year=grp_id.grp_year,
                    stok_id=grp_id.stok_id,
                    grp_valid=True,
                    created_by=cleaned_data["created_by"],
                    created_date=cleaned_data["created_date"],
                )
                try:
                    final_grp.clean()
                    final_grp.save()
                except (ValidationError, IntegrityError):
                    return None
            else:
                # MAIN GROUP GETTING MOVED
                final_grp = final_grp[0]
            final_grp_anix = utils.enter_anix(cleaned_data,
                                              grp_pk=final_grp.pk,
                                              return_anix=True)
            self.row_entered += utils.enter_anix(hu_cleaned_data,
                                                 grp_pk=final_grp.pk,
                                                 return_sucess=True)
            self.row_entered += utils.enter_bulk_grpd(
                final_grp_anix,
                cleaned_data,
                row_date,
                prnt_grp=grp_id,
                prog_grp=row.get(self.prog_key),
                comments=row.get(self.comment_key))
            self.row_entered += utils.enter_grpd(final_grp_anix.pk,
                                                 cleaned_data,
                                                 row_date,
                                                 dev_at_hu_transfer,
                                                 None,
                                                 anidc_str="Development")

            # create movement for the new group, create 2 contx's and 3 anix's
            # cup contx is contx used to link the positive counts
            cont_contx = utils.create_egg_movement_evnt(tray_id,
                                                        cont,
                                                        cleaned_data,
                                                        row_date,
                                                        final_grp.pk,
                                                        return_cup_contx=True)

            move_cleaned_data = cleaned_data.copy()
            move_cleaned_data["evnt_id"] = cont_contx.evnt_id
            cnt_contx = cont_contx
            cnt_contx.pk = None
            cnt_contx.tray_id = tray_id
            try:
                cnt_contx.save()
            except IntegrityError:
                cnt_contx = models.ContainerXRef.objects.filter(
                    pk=cont_contx.pk).get()
            self.row_entered += utils.enter_anix(move_cleaned_data,
                                                 grp_pk=final_grp.pk,
                                                 contx_pk=cnt_contx.pk,
                                                 return_sucess=True)
            # add the positive counts
            cnt = utils.enter_cnt(
                move_cleaned_data,
                row[self.cnt_key],
                cnt_contx.pk,
                cnt_code="Eggs Added",
            )[0]
            if utils.nan_to_none(self.weight_key):
                utils.enter_cnt_det(move_cleaned_data, cnt,
                                    row[self.weight_key], "Weight")
            utils.enter_cnt_det(move_cleaned_data, cnt, row[self.cnt_key],
                                "Program Group Split", row[self.prog_key])
        else:
            # Move main group to drawer, and add end date to tray:
            if cont:
                end_contx = utils.create_movement_evnt(tray_id,
                                                       cont,
                                                       cleaned_data,
                                                       row_date,
                                                       grp_pk=grp_id.pk,
                                                       return_end_contx=True)
                tray_id.end_date = row_date
                tray_id.save()
                end_cnt = utils.enter_cnt(cleaned_data,
                                          row[self.cnt_key],
                                          end_contx.pk,
                                          cnt_code="Egg Count")[0]
                utils.enter_cnt_det(cleaned_data, end_cnt,
                                    row[self.weight_key], "Weight")
            else:
                self.log_data += "\n Draw {} from {} not found".format(
                    cont, row[self.cont_key])

            # link cup to egg development event
            utils.enter_contx(cont, cleaned_data, None)
コード例 #7
0
    def row_parser(self, row):
        cleaned_data = self.cleaned_data
        row_date = utils.get_row_date(row)
        self.row_entered += utils.enter_contx(row["trof_id"], cleaned_data)
        # find group from either cross or tray:

        if utils.nan_to_none(row.get(self.hu_key)):
            cont_id = utils.get_cont_from_dot(row[self.hu_key], cleaned_data,
                                              row_date)
        elif utils.nan_to_none(row.get(self.tray_key)):
            cont_id = models.Tray.objects.filter(
                trof_id=row["trof_id"],
                end_date__isnull=True,
                name=row[self.tray_key]).get()
        else:
            cont_id = row["trof_id"]

        if utils.nan_to_none(row.get(self.cross_key)):
            pair_id = models.Pairing.objects.filter(
                cross=row[self.cross_key],
                end_date__isnull=True,
                indv_id__stok_id=row["stok_id"],
                start_date__year=row[self.year_key]).first()
            grp_id = utils.get_tray_group(pair_id, cont_id, row_date)
        else:
            grp_id = cont_id.fish_in_cont(row_date, get_grp=True)

        grp_anix = None
        shock = False
        for pickc_id in cleaned_data["pickc_id"]:
            if utils.nan_to_none(row[pickc_id.name]):
                shock = utils.y_n_to_bool(row.get(self.shocking_key))
                grp_anix, evnt_entered = utils.create_picks_evnt(
                    cleaned_data,
                    cont_id,
                    grp_id.pk,
                    row[pickc_id.name],
                    row_date,
                    pickc_id.name,
                    cleaned_data["evnt_id"].perc_id,
                    shocking=shock,
                    return_anix=True,
                    pick_comments=row.get(self.comment_key))
                self.row_entered += evnt_entered

        for col_name in row.keys():
            col_date = utils.get_col_date(col_name)

            if col_date:
                col_date_str = datetime.strftime(col_date, "%Y-%b-%d")
                self.date_dict[col_date_str] = True
                if utils.nan_to_none(row.get(col_name)):
                    self.row_entered += utils.create_picks_evnt(
                        cleaned_data,
                        cont_id,
                        grp_id.pk,
                        row[col_name],
                        col_date,
                        self.default_pickc_id,
                        cleaned_data["evnt_id"].perc_id,
                        pick_comments=row.get(self.comment_key))

        # record development
        if grp_anix and shock:
            pick_evnt_cleaned_data = cleaned_data.copy()
            pick_evnt_cleaned_data["evnt_id"] = grp_anix.evnt_id
            dev_at_pick = grp_id.get_development(row_date)
            utils.enter_grpd(grp_anix.pk,
                             pick_evnt_cleaned_data,
                             row_date,
                             dev_at_pick,
                             None,
                             anidc_str="Development")
            self.row_entered += utils.enter_contx(row["trof_id"], cleaned_data)
コード例 #8
0
ファイル: generic.py プロジェクト: dfo-mar-odis/dm_apps
    def data_preper(self):
        cleaned_data = self.cleaned_data
        self.sampc_id = models.SampleCode.objects.filter(
            name="Individual Sample").get()
        self.prnt_grp_anidc_id = models.AnimalDetCode.objects.filter(
            name="Parent Group").get()
        self.prog_grp_anidc_id = models.AnimalDetCode.objects.filter(
            name="Program Group").get()
        self.sex_anidc_id = models.AnimalDetCode.objects.filter(
            name="Gender").get()
        self.len_anidc_id = models.AnimalDetCode.objects.filter(
            name="Length").get()
        self.weight_anidc_id = models.AnimalDetCode.objects.filter(
            name="Weight").get()
        self.vial_anidc_id = models.AnimalDetCode.objects.filter(
            name="Vial").get()
        self.envelope_anidc_id = models.AnimalDetCode.objects.filter(
            name="Scale Envelope").get()
        self.ani_health_anidc_id = models.AnimalDetCode.objects.filter(
            name="Animal Health").get()
        self.anidc_ufid_id = models.AnimalDetCode.objects.filter(
            name="UFID").get()
        self.vax_anidc_id = models.AnimalDetCode.objects.filter(
            name="Vaccination").get()
        self.mark_anidc_id = models.AnimalDetCode.objects.filter(
            name="Mark").get()
        self.lifestage_anidc_id = models.AnimalDetCode.objects.filter(
            name="Lifestage").get()
        self.comment_anidc_id = models.AnimalDetCode.objects.filter(
            name="Comment").get()

        # The following steps are to set additional columns on each row to facilitate parsing.
        # In particular,  columns set will be: "datetime", "grp_year", "grp_coll", "start_tank_id",
        # "end_tank_id", "grp_key", "end_grp_key".
        # The two grp_keys will link to dictionaries of the groups, which are also set below

        # set date
        self.data = utils.set_row_datetime(self.data)
        # split year-coll
        self.data["grp_year"] = self.data.apply(
            lambda row: utils.year_coll_splitter(row[self.yr_coll_key])[0],
            axis=1)
        self.data["grp_coll"] = self.data.apply(
            lambda row: utils.year_coll_splitter(row[self.yr_coll_key])[1],
            axis=1)

        # set start and end tank columns:
        self.data = utils.set_row_tank(self.data,
                                       cleaned_data,
                                       self.start_tank_key,
                                       col_name="start_tank_id")
        self.data = utils.set_row_tank(self.data,
                                       cleaned_data,
                                       self.end_tank_key,
                                       col_name="end_tank_id")

        # set the dict keys for groups, use astype(str) to handle anything that might be a nan.
        self.data, self.start_grp_dict = utils.set_row_grp(
            self.data,
            self.rive_key,
            self.yr_coll_key,
            self.prio_key,
            "start_tank_id",
            "datetime",
            self.grp_mark_key,
            grp_col_name="start_grp_id",
            return_dict=True)
        for item, grp in self.start_grp_dict.items():
            utils.enter_anix(cleaned_data, grp_pk=grp.pk)

        self.data["end_grp_key"] = self.data[self.rive_key] + self.data[self.yr_coll_key] + \
                                   self.data[self.end_tank_key].astype(str) + self.data[self.prio_key].astype(str) + \
                                   self.data["datetime"].astype(str) + self.data[self.grp_mark_key].astype(str)

        # create the end group dict and create, movement event, groups, counts, contxs, etc. necesarry
        end_grp_data = self.data.groupby([
            self.rive_key, "grp_year", "grp_coll", "end_tank_id",
            "start_tank_id", self.prio_key, "datetime", self.grp_mark_key,
            "grp_key", "end_grp_key"
        ],
                                         dropna=False,
                                         sort=False).size().reset_index()
        for row in end_grp_data.to_dict('records'):
            # check if end tank is set, otherwise, skip this step
            if not utils.nan_to_none(row["end_tank_id"]):
                self.end_grp_dict[row["end_grp_key"]] = None
                continue
            grps = utils.get_grp(row[self.rive_key],
                                 row["grp_year"],
                                 row["grp_coll"],
                                 row["end_tank_id"],
                                 at_date=row["datetime"],
                                 prog_str=row[self.prio_key],
                                 mark_str=row[self.grp_mark_key])
            start_grp_id = self.start_grp_dict[row["grp_key"]]
            start_contx, contx_entered = utils.enter_contx(
                row["start_tank_id"],
                cleaned_data,
                None,
                grp_pk=start_grp_id.pk,
                return_contx=True)
            self.row_entered += utils.enter_cnt(
                cleaned_data,
                sum(end_grp_data[end_grp_data["grp_key"] == row["grp_key"]]
                    [0]),
                start_contx.pk,
                cnt_code="Fish Removed from Container")[1]

            if len(grps) > 0:
                end_grp_id = grps[0]
                self.end_grp_dict[row["end_grp_key"]] = grps[0]
            else:
                end_grp_id = copy.deepcopy(start_grp_id)
                end_grp_id.pk = None
                end_grp_id.save()
                self.end_grp_dict[row["end_grp_key"]] = end_grp_id

            if end_grp_id.pk != start_grp_id.pk:
                grp_anix = utils.enter_anix(cleaned_data,
                                            grp_pk=end_grp_id.pk,
                                            return_anix=True)
                utils.enter_grpd(grp_anix.pk,
                                 cleaned_data,
                                 row["datetime"],
                                 None,
                                 self.prnt_grp_anidc_id.pk,
                                 frm_grp_id=start_grp_id)
                if utils.nan_to_none(row[self.prio_key]):
                    utils.enter_grpd(grp_anix.pk, cleaned_data,
                                     row["datetime"], row[self.prio_key],
                                     self.prog_grp_anidc_id.pk,
                                     row[self.prio_key])
                if utils.nan_to_none(row[self.grp_mark_key]):
                    utils.enter_grpd(grp_anix.pk, cleaned_data,
                                     row["datetime"], row[self.grp_mark_key],
                                     self.mark_anidc_id.pk,
                                     row[self.grp_mark_key])
                end_contx = utils.create_movement_evnt(row["start_tank_id"],
                                                       row["end_tank_id"],
                                                       cleaned_data,
                                                       row["datetime"],
                                                       grp_pk=end_grp_id.pk,
                                                       return_end_contx=True)
                if end_contx:
                    self.row_entered += utils.enter_cnt(
                        cleaned_data, row[0], end_contx.pk)[1]
        self.data_dict = self.data.to_dict("records")
コード例 #9
0
ファイル: tagging.py プロジェクト: dfo-mar-odis/dm_apps
    def row_parser(self, row):
        cleaned_data = self.cleaned_data
        year, coll = utils.year_coll_splitter(row[self.coll_key])
        row_datetime = utils.get_row_date(row)
        row_date = row_datetime.date()
        indv_ufid = utils.nan_to_none(row.get(self.ufid_key))
        indv = models.Individual(
            grp_id=self.grp_id,
            spec_id=self.salmon_id,
            stok_id=self.stok_id,
            coll_id=self.coll_id,
            indv_year=year,
            pit_tag=row[self.pit_key],
            ufid=indv_ufid,
            indv_valid=True,
            comments=utils.nan_to_none(row.get(self.comment_key)),
            created_by=cleaned_data["created_by"],
            created_date=cleaned_data["created_date"],
        )
        try:
            indv.clean()
            indv.save()
            self.row_entered = True
        except (ValidationError, IntegrityError):
            indv = models.Individual.objects.filter(pit_tag=indv.pit_tag).get()

        if utils.nan_to_none(row[self.from_tank_id_key]) or utils.nan_to_none(
                row[self.to_tank_id_key]):
            in_tank = row[self.from_tank_id_key]
            out_tank = row[self.to_tank_id_key]
            self.row_entered += utils.create_movement_evnt(in_tank,
                                                           out_tank,
                                                           cleaned_data,
                                                           row_datetime,
                                                           indv_pk=indv.pk)
            # if tagged fish goes back into same tank, still link fish to tank:
            if in_tank == out_tank:
                utils.enter_contx(in_tank, cleaned_data, True, indv_pk=indv.pk)

        anix_indv, anix_entered = utils.enter_anix(cleaned_data,
                                                   indv_pk=indv.pk)
        self.row_entered += anix_entered
        self.anix_indv = anix_indv

        utils.enter_bulk_indvd(
            anix_indv.pk,
            self.cleaned_data,
            row_date,
            len_mm=row.get(self.len_key_mm),
            len_val=row.get(self.len_key),
            weight=row.get(self.weight_key),
            weight_kg=row.get(self.weight_key_kg),
            vial=row.get(self.vial_key),
            mark=row.get(self.mark_key),
            prog_grp=row.get(self.group_key),
            lifestage=row.get(self.lifestage_key),
            comments=row.get(self.comment_key),
        )

        if utils.nan_to_none(row.get(self.precocity_key)):
            self.row_entered += utils.enter_indvd(anix_indv.pk, cleaned_data,
                                                  row_date, None,
                                                  self.ani_health_anidc_id.pk,
                                                  "Precocity")

        if utils.nan_to_none(row.get(self.crew_key)):
            perc_list, inits_not_found = utils.team_list_splitter(
                row[self.crew_key])
            for perc_id in perc_list:
                team_id, team_entered = utils.add_team_member(
                    perc_id,
                    cleaned_data["evnt_id"],
                    role_id=self.tagger_code,
                    return_team=True)
                self.row_entered += team_entered
                if team_id:
                    self.row_entered += utils.enter_anix(cleaned_data,
                                                         indv_pk=indv.pk,
                                                         team_pk=team_id.pk,
                                                         return_sucess=True)
            for inits in inits_not_found:
                self.log_data += "No valid personnel with initials ({}) for row with pit tag" \
                                 " {}\n".format(inits, row[self.pit_key])
コード例 #10
0
ファイル: spawning.py プロジェクト: dfo-mar-odis/dm_apps
    def row_parser(self, row):
        cleaned_data = self.cleaned_data

        indv_female, samp_female, new_log = utils.get_indv_or_samp(
            row, self.pit_key_f, self.samp_key_f, cleaned_data["evnt_id"])

        if new_log:
            self.log_data += new_log
            return self.log_data, False

        indv_male, samp_male, new_log = utils.get_indv_or_samp(
            row, self.pit_key_m, self.samp_key_m, cleaned_data["evnt_id"])
        if new_log:
            self.log_data += new_log
            return self.log_data, False

        if not (indv_female or samp_female) or not (indv_male or samp_male):
            raise Exception(
                "No Individual or Fish found for row {}".format(row))

        if not utils.nan_to_none(row[self.choice_key]):
            raise Exception(
                "Choice column cannot be empty. Set Fecundity column to zero to indicate Duds."
            )

        row_date = utils.get_row_date(row)
        if indv_female:
            anix_female, anix_entered = utils.enter_anix(
                cleaned_data, indv_pk=indv_female.pk)
            self.row_entered += anix_entered
            self.row_entered += utils.enter_bulk_indvd(
                anix_female.pk,
                cleaned_data,
                row_date,
                gender="F",
                len_mm=row.get(self.len_key_f_mm),
                len_val=row.get(self.len_key_f),
                weight=row.get(self.weight_key_f),
                weight_kg=row.get(self.weight_key_f_kg),
                status=row.get(self.status_key_f),
                comments=row.get(self.comment_key_f))
        if indv_male:
            anix_male, anix_entered = utils.enter_anix(cleaned_data,
                                                       indv_pk=indv_male.pk)
            self.row_entered += anix_entered

            self.row_entered += utils.enter_bulk_indvd(
                anix_male.pk,
                cleaned_data,
                row_date,
                gender="M",
                len_mm=row.get(self.len_key_m_mm),
                len_val=row.get(self.len_key_m),
                weight=row.get(self.weight_key_m),
                weight_kg=row.get(self.weight_key_m_kg),
                status=row.get(self.status_key_m),
                comments=row.get(self.comment_key_m))
        if samp_female:
            self.row_entered += utils.enter_bulk_sampd(
                samp_female.pk,
                cleaned_data,
                row_date,
                gender="F",
                len_mm=row.get(self.len_key_f_mm),
                len_val=row.get(self.len_key_f),
                weight=row.get(self.weight_key_f),
                weight_kg=row.get(self.weight_key_f_kg),
                status=row.get(self.status_key_f),
                comments=row.get(self.comment_key_f))
        if samp_male:
            self.row_entered += utils.enter_bulk_sampd(
                samp_male.pk,
                cleaned_data,
                row_date,
                gender="M",
                len_mm=row.get(self.len_key_m_mm),
                len_val=row.get(self.len_key_m),
                weight=row.get(self.weight_key_m),
                weight_kg=row.get(self.weight_key_m_kg),
                status=row.get(self.status_key_m),
                comments=row.get(self.comment_key_m))

        if utils.nan_to_none(row.get(self.dest_key_f)) and indv_female:
            end_tank_id_f = models.Tank.objects.filter(
                name=row[self.dest_key_f],
                facic_id=cleaned_data["facic_id"]).get()
            self.row_entered += utils.create_movement_evnt(
                None, end_tank_id_f, cleaned_data, row_date, indv_female.pk)

        if utils.nan_to_none(row.get(self.dest_key_m)) and indv_male:
            end_tank_id_m = models.Tank.objects.filter(
                name=row[self.dest_key_m],
                facic_id=cleaned_data["facic_id"]).get()
            self.row_entered += utils.create_movement_evnt(
                None, end_tank_id_m, cleaned_data, row_date, indv_male.pk)

        # pair
        pair = models.Pairing(
            start_date=row_date,
            prio_id=models.PriorityCode.objects.filter(
                name__iexact=prio_dict[row[self.prio_key_f]]).get(),
            pair_prio_id=models.PriorityCode.objects.filter(
                name__iexact=prio_dict[row[self.prio_key_pair]]).get(),
            cross=row[self.cross_key],
            valid=True,
            indv_id=indv_female,
            samp_id=samp_female,
            comments=utils.nan_to_none(row[self.comment_key_pair]),
            created_by=cleaned_data["created_by"],
            created_date=cleaned_data["created_date"],
        )
        try:
            pair.clean()
            pair.save()
            self.row_entered = True
        except (ValidationError, IntegrityError):
            pair = models.Pairing.objects.filter(start_date=row_date,
                                                 indv_id=indv_female,
                                                 samp_id=samp_female).get()

        # sire
        sire = models.Sire(
            prio_id=models.PriorityCode.objects.filter(
                name__iexact=prio_dict[row[self.prio_key_m]]).get(),
            pair_id=pair,
            indv_id=indv_male,
            samp_id=samp_male,
            choice=row[self.choice_key],
            comments=utils.nan_to_none(row[self.comment_key_m]),
            created_by=cleaned_data["created_by"],
            created_date=cleaned_data["created_date"],
        )
        try:
            sire.clean()
            sire.save()
            self.row_entered = True
        except (ValidationError, IntegrityError):
            # don't use sire again anywhere
            pass

        self.row_entered += utils.enter_anix(cleaned_data,
                                             pair_pk=pair.pk,
                                             return_sucess=True)

        # pairing program:
        if utils.nan_to_none(row.get(self.prog_key)):
            self.row_entered += utils.enter_spwnd(
                pair.pk,
                cleaned_data,
                row[self.prog_key],
                self.prog_spwndc_id.pk,
                spwnsc_str=row[self.prog_key])

        # fecu/dud/extra male
        if row[self.egg_est_key] > 0:
            self.row_entered += utils.enter_spwnd(pair.pk, cleaned_data,
                                                  int(row[self.egg_est_key]),
                                                  self.fecu_spwndc_id.pk, None,
                                                  "Calculated")
        else:
            self.row_entered += utils.enter_spwnd(pair.pk, cleaned_data,
                                                  row[self.choice_key],
                                                  self.dud_spwndc_id.pk, None,
                                                  "Good")

        # grp
        anix_grp_qs = models.AniDetailXref.objects.filter(
            evnt_id=cleaned_data["evnt_id"],
            grp_id__isnull=False,
            pair_id=pair,
            indv_id__isnull=True,
            contx_id__isnull=True,
            loc_id__isnull=True,
        )
        anix_grp = False
        if anix_grp_qs.count() == 0:
            if indv_female:
                stok_id = indv_female.stok_id
                spec_id = indv_female.spec_id
            else:
                stok_id = samp_female.stok_id
                spec_id = samp_female.spec_id

            grp = models.Group(
                spec_id=spec_id,
                stok_id=stok_id,
                coll_id=models.Collection.objects.filter(
                    name="Egg (F1)").get(),
                grp_year=row_date.year,
                grp_valid=False,
                created_by=cleaned_data["created_by"],
                created_date=cleaned_data["created_date"],
            )
            try:
                grp.clean()
                grp.save()
                row_entered = True
                anix_grp, anix_entered = utils.enter_anix(cleaned_data,
                                                          grp_pk=grp.pk)
                row_entered += utils.enter_anix(cleaned_data,
                                                grp_pk=grp.pk,
                                                pair_pk=pair.pk,
                                                return_sucess=True)
                grp.grp_valid = True
                grp.save()
                self.row_entered = True
            except ValidationError:
                # recovering the group is only doable through the anix with both grp and pair.
                # no way to find it here, so only make the group valid after anix's created.
                pass

        elif anix_grp_qs.count() == 1:
            anix_grp = anix_grp_qs.get()
            grp = anix_grp.grp_id

        if anix_grp:
            utils.enter_bulk_grpd(anix_grp,
                                  cleaned_data,
                                  row_date,
                                  prog_grp=row[self.prog_key])
コード例 #11
0
    def data_preper(self):
        cleaned_data = self.cleaned_data
        self.temp_envc_id = models.EnvCode.objects.filter(name="Temperature").get()
        self.leader_code = models.RoleCode.objects.filter(name__iexact="Crew Lead").get()
        self.settings_locdc_id = models.LocationDetCode.objects.filter(name__iexact="Electrofishing Settings").get()
        self.voltage_locdc_id = models.LocationDetCode.objects.filter(name__iexact="Voltage").get()
        self.fishing_time_locdc_id = models.LocationDetCode.objects.filter(name__iexact="Electrofishing Seconds").get()

        for river_name in self.data[self.rive_key].unique():
            self.river_dict[river_name] = models.RiverCode.objects.filter(name__icontains=river_name).get()

        if self.cleaned_data["evntc_id"].__str__() == "Electrofishing":
            self.locc_id = models.LocCode.objects.filter(name__icontains="Electrofishing site").get()
        elif self.cleaned_data["evntc_id"].__str__() == "Smolt Wheel Collection":
            self.locc_id = models.LocCode.objects.filter(name__icontains="Smolt Wheel site").get()
        elif self.cleaned_data["evntc_id"].__str__() == "Bypass Collection":
            self.locc_id = models.LocCode.objects.filter(name__icontains="Bypass site").get()

        # assign groups to columns, add generic group data:
        self.data["grp_id"] = None

        river_group_data = self.data.groupby([self.rive_key, self.prio_key, self.year_key, self.coll_key, self.tank_key],
                                             dropna=False).size().reset_index()

        if not river_group_data[self.tank_key].is_unique:
            raise Exception("Too many different groups going into same tank. Create multiple events if needed")

        for index, row in river_group_data.iterrows():
            if not utils.nan_to_none(row[self.tank_key]):
                # if fish are only observed, don't make a group
                data_rows = (self.data[self.tank_key].isnull())
                self.data.loc[data_rows, "grp_id"] = None
                self.data.loc[data_rows, "contx_id"] = None
                break
            stok_id = models.StockCode.objects.filter(name__icontains=row[self.rive_key]).get()

            coll_str = row[self.coll_key]
            if len(coll_str.lstrip(' 0123456789')) == len(coll_str):
                # year taken from year coll:
                coll_id = utils.coll_getter(row[self.coll_key])
                grp_year = row[self.year_key]
            else:
                grp_year, coll_str = utils.year_coll_splitter(row[self.coll_key])
                coll_id = utils.coll_getter(coll_str)

            anix_grp_qs = models.AniDetailXref.objects.filter(evnt_id=cleaned_data["evnt_id"],
                                                              grp_id__stok_id=stok_id,
                                                              grp_id__coll_id=coll_id,
                                                              grp_id__grp_year=grp_year,
                                                              indv_id__isnull=True,
                                                              contx_id__isnull=True,
                                                              loc_id__isnull=True,
                                                              pair_id__isnull=True)

            grp_found = False
            grp = None
            for anix in anix_grp_qs:
                anix_prog_grp_names = [adsc.name for adsc in anix.grp_id.prog_group()]
                if utils.nan_to_none(row[self.prio_key]) and row[self.prio_key] in anix_prog_grp_names:
                    grp_found = True
                    grp = anix.grp_id
                    break
                elif not utils.nan_to_none(row[self.prio_key]) and not anix_prog_grp_names:
                    grp_found = True
                    grp = anix.grp_id
                    break
            if not grp_found:
                grp = models.Group(spec_id=models.SpeciesCode.objects.filter(name__iexact="Salmon").get(),
                                   stok_id=stok_id,
                                   coll_id=coll_id,
                                   grp_year=grp_year,
                                   grp_valid=True,
                                   created_by=cleaned_data["created_by"],
                                   created_date=cleaned_data["created_date"],
                                   )
                try:
                    grp.clean()
                    grp.save()
                except ValidationError:
                    grp = models.Group.objects.filter(spec_id=grp.spec_id, stok_id=grp.stok_id,
                                                      grp_year=grp.grp_year, coll_id=grp.coll_id).get()

                anix_grp = utils.enter_anix(cleaned_data, grp_pk=grp.pk, return_anix=True)
                if utils.nan_to_none(row.get(self.prio_key)):
                    utils.enter_grpd(anix_grp.pk, cleaned_data, cleaned_data["evnt_id"].start_date, None,
                                     None, anidc_str="Program Group", adsc_str=row[self.prio_key])

            # create index column matching all rows in data to this group-tank-prio combination
            if utils.nan_to_none(row.get(self.prio_key)):
                data_rows = (self.data[self.rive_key] == row[self.rive_key]) & \
                            (self.data[self.prio_key] == row[self.prio_key]) & \
                            (self.data[self.tank_key] == row[self.tank_key]) & \
                            (self.data[self.coll_key] == row[self.coll_key])
            else:
                data_rows = (self.data[self.rive_key] == row[self.rive_key]) & \
                    (self.data[self.coll_key] == row[self.coll_key]) & \
                    (self.data[self.tank_key] == row[self.tank_key]) & \
                    (self.data[self.prio_key].isnull())

            # grp found, assign to all rows:
            self.data.loc[data_rows, "grp_id"] = grp
            contx, data_entered = utils.enter_tank_contx(row[self.tank_key], cleaned_data, True, None, grp.pk,
                                                         return_contx=True)
            self.data.loc[data_rows, "contx_id"] = contx

        self.data_dict = self.data.to_dict("records")
コード例 #12
0
    def row_parser(self, row):
        cleaned_data = self.cleaned_data
        row_datetime = utils.get_row_date(row)
        relc_id = self.site_dict[row[self.site_key]]
        year, coll = utils.year_coll_splitter(row[self.coll_key])
        coll_id = utils.coll_getter(coll)
        stok_id = models.StockCode.objects.filter(name__iexact=relc_id.rive_id.name).get()

        indv_id = None

        if utils.nan_to_none(row[self.pit_key]):
            indv_id = models.Individual.objects.filter(pit_tag=row[self.pit_key]).first()
            if not indv_id:
                indv_id = models.Individual(spec_id=self.salmon_id,
                                            stok_id=stok_id,
                                            coll_id=coll_id,
                                            indv_year=year,
                                            pit_tag=row[self.pit_key],
                                            indv_valid=True,
                                            comments=utils.nan_to_none(row.get(self.comment_key)),
                                            created_by=cleaned_data["created_by"],
                                            created_date=cleaned_data["created_date"],
                                            )
                try:
                    indv_id.clean()
                    indv_id.save()
                    self.row_entered = True
                except (ValidationError, IntegrityError):
                    indv_id = models.Individual.objects.filter(pit_tag=indv_id.pit_tag).get()
            indv_anix, data_entered = utils.enter_anix(cleaned_data, indv_pk=indv_id.pk)
            self.row_entered += data_entered
            # add program group to individual if needed:

        loc = models.Location(evnt_id_id=cleaned_data["evnt_id"].pk,
                              locc_id=self.locc_id,
                              rive_id=relc_id.rive_id,
                              relc_id=relc_id,
                              loc_date=row_datetime,
                              created_by=cleaned_data["created_by"],
                              created_date=cleaned_data["created_date"],
                              )
        try:
            loc.clean()
            loc.save()
            self.row_entered = True
        except ValidationError:
            loc = models.Location.objects.filter(evnt_id=loc.evnt_id, locc_id=loc.locc_id,
                                                 rive_id=loc.rive_id, subr_id=loc.subr_id,
                                                 relc_id=loc.relc_id, loc_lat=loc.loc_lat,
                                                 loc_lon=loc.loc_lon, loc_date=loc.loc_date).get()
        self.loc = loc
        self.team_parser(row[self.crew_key], row, loc_id=loc)

        if indv_id:
            anix_loc_indv, anix_entered = utils.enter_anix(cleaned_data, loc_pk=loc.pk, indv_pk=indv_id.pk)
            self.row_entered += anix_entered

            self.row_entered += utils.enter_bulk_indvd(anix_loc_indv.pk, self.cleaned_data, row_datetime,
                                                       gender=row.get(self.sex_key),
                                                       len_mm=row.get(self.len_key_mm),
                                                       len_val=row.get(self.len_key),
                                                       weight=row.get(self.weight_key),
                                                       weight_kg=row.get(self.weight_key_kg),
                                                       vial=row.get(self.vial_key),
                                                       scale_envelope=row.get(self.scale_key),
                                                       prog_grp=row.get(self.grp_key),
                                                       comments=row.get(self.comment_key)
                                                       )

            if utils.nan_to_none(row.get(self.mort_key)):
                if utils.y_n_to_bool(row[self.mort_key]):
                    mort_anix, mort_entered = utils.enter_mortality(indv_id, self.cleaned_data, row_datetime)
                    self.row_entered += mort_entered

            if utils.nan_to_none(row.get(self.wr_key)):
                if utils.y_n_to_bool(row[self.wr_key]):
                    self.row_entered += utils.enter_indvd(anix_loc_indv.pk, cleaned_data, row_datetime, None,
                                                          self.ani_health_anidc_id.pk, adsc_str=self.wr_adsc_id.name)

            if utils.nan_to_none(row.get(self.aquaculture_key)):
                if utils.y_n_to_bool(row[self.aquaculture_key]):
                    self.row_entered += utils.enter_indvd(anix_loc_indv.pk, cleaned_data, row_datetime, None,
                                                          self.ani_health_anidc_id.pk, adsc_str="Aquaculture")

            if utils.nan_to_none(row[self.tank_key]):
                self.row_entered += utils.enter_contx(self.tank_dict[row[self.tank_key]], cleaned_data, True, indv_id.pk)
                if self.loc.pk not in self.loc_caught_dict:
                    self.loc_caught_dict[self.loc.pk] = 1
                else:
                    self.loc_caught_dict[self.loc.pk] += 1
            else:
                if self.loc.pk not in self.loc_obs_dict:
                    self.loc_obs_dict[self.loc.pk] = 1
                else:
                    self.loc_obs_dict[self.loc.pk] += 1

        elif utils.nan_to_none(row.get(self.samp_key)):
            samp = models.Sample.objects.filter(anix_id__evnt_id=cleaned_data["evnt_id"],
                                                loc_id=loc,
                                                spec_id=self.salmon_id,
                                                samp_num=row[self.samp_key],
                                                sampc_id=self.sampc_id,
                                                ).get()
            if not samp:
                # create group for sample:
                grp_id = models.Group(spec_id=self.salmon_id,
                                      stok_id=stok_id,
                                      coll_id=coll_id,
                                      grp_year=year,
                                      grp_valid=False,
                                      created_by=cleaned_data["created_by"],
                                      created_date=cleaned_data["created_date"],
                                      )
                grp_id.clean()
                grp_id.save()
                self.row_entered = True

                grp_anix, data_entered = utils.enter_anix(cleaned_data, grp_pk=grp_id.pk)
                self.row_entered += data_entered

                samp, samp_entered = utils.enter_samp(cleaned_data, row[self.samp_key], self.salmon_id.pk, self.sampc_id.pk,
                                                      anix_pk=grp_anix.pk, loc_pk=loc.pk,
                                                      comments=utils.nan_to_none(row.get(self.comment_key)))
                self.row_entered += samp_entered

            self.row_entered += utils.enter_bulk_sampd(samp.pk, self.cleaned_data, row_datetime,
                                                       gender=row.get(self.sex_key),
                                                       len_mm=row.get(self.len_key_mm),
                                                       len_val=row.get(self.len_key),
                                                       weight=row.get(self.weight_key),
                                                       weight_kg=row.get(self.weight_key_kg),
                                                       vial=row.get(self.vial_key),
                                                       scale_envelope=row.get(self.scale_key),
                                                       prog_grp=row.get(self.grp_key),
                                                       comments=row.get(self.comment_key)
                                                       )
            if utils.nan_to_none(row.get(self.mort_key)):
                if utils.y_n_to_bool(row[self.mort_key]):
                    self.row_entered += utils.enter_samp_mortality(samp, self.cleaned_data, row_datetime)

            if utils.nan_to_none(row.get(self.wr_key)):
                if utils.y_n_to_bool(row[self.wr_key]):
                    self.row_entered += utils.enter_sampd(samp.pk, cleaned_data, row_datetime, None,
                                                          self.ani_health_anidc_id.pk, adsc_str=self.wr_adsc_id.name)

            if utils.nan_to_none(row.get(self.aquaculture_key)):
                if utils.y_n_to_bool(row[self.aquaculture_key]):
                    self.row_entered += utils.enter_sampd(samp.pk, cleaned_data, row_datetime, None,
                                                          self.ani_health_anidc_id.pk, adsc_str="Aquaculture")
        else:
            raise Exception("Fish must either be assigned a sample number or a pit tag.")
コード例 #13
0
ファイル: water_quality.py プロジェクト: dfo-mar-odis/dm_apps
    def row_parser(self, row):
        cleaned_data = self.cleaned_data
        contx, data_entered = utils.enter_tank_contx(row[self.tank_key],
                                                     cleaned_data,
                                                     None,
                                                     return_contx=True)
        self.row_entered += data_entered
        row_date = utils.get_row_date(row)
        if utils.nan_to_none(row[self.time_key]):
            row_time = row[self.time_key].replace(tzinfo=pytz.UTC)
        else:
            row_time = None

        if utils.nan_to_none(row.get(self.temp_key)):
            self.row_entered += utils.enter_env(row[self.temp_key],
                                                row_date,
                                                cleaned_data,
                                                self.temp_envc_id,
                                                contx=contx,
                                                env_time=row_time)
        if utils.nan_to_none(row.get(self.dox_key)):
            self.row_entered += utils.enter_env(row[self.dox_key],
                                                row_date,
                                                cleaned_data,
                                                self.oxlvl_envc_id,
                                                contx=contx,
                                                env_time=row_time)
        if utils.nan_to_none(row.get(self.ph_key)):
            self.row_entered += utils.enter_env(row[self.ph_key],
                                                row_date,
                                                cleaned_data,
                                                self.ph_envc_id,
                                                contx=contx,
                                                env_time=row_time)
        if utils.nan_to_none(row.get(self.dn_key)):
            self.row_entered += utils.enter_env(row[self.dn_key],
                                                row_date,
                                                cleaned_data,
                                                self.disn_envc_id,
                                                contx=contx,
                                                env_time=row_time)
        if utils.nan_to_none(row.get(self.source_key)):
            source_envsc_id = models.EnvSubjCode.objects.filter(
                name__icontains=row[self.source_key]).get()
            self.row_entered += utils.enter_env(row[self.source_key],
                                                row_date,
                                                cleaned_data,
                                                self.ws_envc_id,
                                                envsc_id=source_envsc_id,
                                                contx=contx,
                                                env_time=row_time)

        if utils.nan_to_none(row.get(self.crew_key)):
            perc_list, inits_not_found = utils.team_list_splitter(
                row[self.crew_key])
            for perc_id in perc_list:
                team_id, team_entered = utils.add_team_member(
                    perc_id, cleaned_data["evnt_id"], return_team=True)
                self.row_entered += team_entered
                if team_id:
                    self.row_entered += utils.enter_tank_contx(
                        row[self.tank_key], cleaned_data, team_pk=team_id.pk)
            for inits in inits_not_found:
                self.log_data += "No valid personnel with initials ({}) for row {} \n".format(
                    inits, row)
コード例 #14
0
    def row_parser(self, row):
        super().row_parser(row)

        if utils.nan_to_none(row.get(self.crew_lead_key)):
            self.team_parser(row[self.crew_lead_key], row, loc_id=self.loc, role_id=self.leader_code)
コード例 #15
0
    def row_parser(self, row):
        cleaned_data = self.cleaned_data
        row_datetime = utils.get_row_date(row)
        relc_id = None
        rive_id = self.river_dict[row[self.rive_key]]

        if not utils.nan_to_none(row.get(self.tank_key)) and utils.nan_to_none(row.get(self.fish_caught_key)):
            # make sure if fish are caught they are assigned a tank:
            raise Exception("All caught fish must be assigned a tank")

        if utils.nan_to_none(row.get(self.site_key)):
            relc_qs = models.ReleaseSiteCode.objects.filter(name__iexact=row[self.site_key])
            if len(relc_qs) == 1:
                relc_id = relc_qs.get()

        start_lat = utils.round_no_nan(row.get(self.lat_key), 6)
        start_lon = utils.round_no_nan(row.get(self.lon_key), 6)
        if not relc_id and not (start_lat and start_lon):
            raise Exception("Site code not found and lat-long not given for site on row")
        loc = models.Location(evnt_id_id=cleaned_data["evnt_id"].pk,
                              locc_id=self.locc_id,
                              rive_id=rive_id,
                              relc_id=relc_id,
                              loc_lat=start_lat,
                              loc_lon=start_lon,
                              end_lat=utils.round_no_nan(row.get(self.end_lat), 6),
                              end_lon=utils.round_no_nan(row.get(self.end_lon), 6),
                              loc_date=row_datetime,
                              comments=utils.nan_to_none(row.get(self.comment_key)),
                              created_by=cleaned_data["created_by"],
                              created_date=cleaned_data["created_date"],
                              )
        try:
            loc.clean()
            loc.save()
            self.row_entered = True
        except ValidationError:
            loc = models.Location.objects.filter(evnt_id=loc.evnt_id, locc_id=loc.locc_id,
                                                 rive_id=loc.rive_id, subr_id=loc.subr_id,
                                                 relc_id=loc.relc_id, loc_lat=loc.loc_lat,
                                                 loc_lon=loc.loc_lon, loc_date=loc.loc_date).get()
        self.loc = loc
        if row["grp_id"]:
            self.row_entered += utils.enter_anix(cleaned_data, loc_pk=loc.pk, grp_pk=row["grp_id"].pk,
                                                 return_sucess=True)
        if self.loc.loc_lon and self.loc.loc_lat and not self.loc.relc_id:
            self.log_data += "\nNo site found in db for Lat-Long ({}, {}) given on row: \n{}\n\n"\
                .format(self.loc.loc_lat, self.loc.loc_lon, row)

        if utils.nan_to_none(row["contx_id"]):
            self.row_entered += utils.enter_anix(cleaned_data, loc_pk=loc.pk, contx_pk=row["contx_id"].pk,
                                                 return_sucess=True)

        self.team_parser(row[self.crew_key], row, loc_id=loc)

        if utils.nan_to_none(row.get(self.temp_key)):
            self.row_entered += utils.enter_env(row[self.temp_key], row_datetime, cleaned_data, self.temp_envc_id,
                                                loc_id=loc)

        cnt_caught, cnt_entered = utils.enter_cnt(cleaned_data, cnt_value=row[self.fish_caught_key], loc_pk=loc.pk,
                                                  cnt_code="Fish Caught")
        self.row_entered += cnt_entered
        cnt_obs, cnt_entered = utils.enter_cnt(cleaned_data, cnt_value=row[self.fish_obs_key], loc_pk=loc.pk,
                                               cnt_code="Fish Observed")
        self.row_entered += cnt_entered

        if utils.nan_to_none(row.get(self.settings_key)):
            self.row_entered += utils.enter_locd(loc.pk, cleaned_data, row_datetime, row[self.settings_key],
                                                 self.settings_locdc_id.pk)
        if utils.nan_to_none(row.get(self.fishing_time_key)):
            self.row_entered += utils.enter_locd(loc.pk, cleaned_data, row_datetime, row[self.fishing_time_key],
                                                 self.fishing_time_locdc_id.pk)
        if utils.nan_to_none(row.get(self.voltage_key)):
            self.row_entered += utils.enter_locd(loc.pk, cleaned_data, row_datetime, row[self.voltage_key],
                                                 self.voltage_locdc_id.pk)
コード例 #16
0
ファイル: generic.py プロジェクト: dfo-mar-odis/dm_apps
    def row_parser(self, row):
        row_datetime = utils.get_row_date(row)
        row_date = row_datetime.date()

        indv_qs = models.Individual.objects.filter(pit_tag=row[self.pit_key])
        if len(indv_qs) == 1:
            indv = indv_qs.get()
        else:
            self.log_data += "Error parsing row: \n"
            self.log_data += str(row)
            self.log_data += "\nFish with PIT {} not found in db\n".format(
                row[self.pit_key])
            self.success = False
            return

        anix, anix_entered = utils.enter_anix(self.cleaned_data,
                                              indv_pk=indv.pk)
        self.row_entered += anix_entered

        self.row_entered += utils.enter_bulk_indvd(
            anix.pk,
            self.cleaned_data,
            row_date,
            gender=row.get(self.sex_key),
            len_mm=row.get(self.len_key_mm),
            len_val=row.get(self.len_key),
            weight=row.get(self.weight_key),
            weight_kg=row.get(self.weight_key_kg),
            vial=row.get(self.vial_key),
            scale_envelope=row.get(self.envelope_key),
            tissue_yn=row.get(self.tissue_key),
            mark=row.get(self.mark_key),
            vaccinated=row.get(self.vax_key),
            status=row.get(self.status_key),
            lifestage=row.get(self.lifestage_key),
            comments=row.get(self.comment_key))

        if utils.nan_to_none(row.get(self.precocity_key)):
            if utils.y_n_to_bool(row[self.precocity_key]):
                self.row_entered += utils.enter_indvd(
                    anix.pk, self.cleaned_data, row_date, None,
                    self.ani_health_anidc_id.pk, "Precocity")
        if utils.nan_to_none(row.get(self.mort_key)):
            if utils.y_n_to_bool(row[self.mort_key]):
                mort_anix, mort_entered = utils.enter_mortality(
                    indv, self.cleaned_data, row_datetime)
                self.row_entered += mort_entered

        in_tank = None
        out_tank = None
        if utils.nan_to_none(row[self.start_tank_key]):
            in_tank = models.Tank.objects.filter(
                name=row[self.start_tank_key]).get()
        if utils.nan_to_none(row[self.end_tank_key]):
            out_tank = models.Tank.objects.filter(
                name=row[self.end_tank_key]).get()
        if in_tank or out_tank:
            self.row_entered += utils.create_movement_evnt(in_tank,
                                                           out_tank,
                                                           self.cleaned_data,
                                                           row_datetime,
                                                           indv_pk=indv.pk)

        self.row_entered += utils.parse_extra_cols(row,
                                                   self.cleaned_data,
                                                   anix,
                                                   indv=True)
コード例 #17
0
ファイル: generic.py プロジェクト: dfo-mar-odis/dm_apps
    def row_parser(self, row):
        cleaned_data = self.cleaned_data
        row_date = row["datetime"].date()
        row_start_grp = utils.get_grp(row[self.rive_key],
                                      row["grp_year"],
                                      row["grp_coll"],
                                      row["start_tank_id"],
                                      row_date,
                                      prog_str=row.get(self.prio_key),
                                      mark_str=row.get(self.grp_mark_key),
                                      fail_on_not_found=True)[0]
        start_anix, self.row_entered = utils.enter_anix(
            cleaned_data, grp_pk=row_start_grp.pk)
        start_contx, contx_entered = utils.enter_contx(row["start_tank_id"],
                                                       cleaned_data,
                                                       None,
                                                       return_contx=True)
        self.row_entered += contx_entered

        whole_grp = utils.y_n_to_bool(row[self.abs_key])
        det_anix = start_anix
        row["start_contx_pk"] = None
        if not whole_grp:
            row["start_contx_pk"] = start_contx.pk

        if utils.nan_to_none(row["end_tank_id"]):
            # 4 possible cases here: group in tank or not and whole group move or not:
            row_end_grp_list = utils.get_grp(row[self.rive_key],
                                             row["grp_year"],
                                             row["grp_coll"],
                                             row["end_tank_id"],
                                             row_date,
                                             prog_str=row[self.prio_key],
                                             mark_str=row[self.grp_mark_key])
            row_end_grp = None
            if not whole_grp and not row_end_grp_list:
                # splitting fish group, create end group:
                row_end_grp = copy.deepcopy(row_start_grp)
                row_end_grp.pk = None
                row_end_grp.id = None
                row_end_grp.save()
                end_grp_anix, anix_entered = utils.enter_anix(
                    cleaned_data, grp_pk=row_end_grp.pk)
                self.row_entered = anix_entered

                self.row_entered += utils.enter_bulk_grpd(
                    end_grp_anix.pk,
                    cleaned_data,
                    row_date,
                    prog_grp=row.get(self.prio_key),
                    mark=row.get(self.mark_key))
            elif not whole_grp:
                # splitting fish group, merging to exsisting end group
                row_end_grp = row_end_grp_list[0]

            if row_end_grp:
                move_contx = utils.create_movement_evnt(row["start_tank_id"],
                                                        row["end_tank_id"],
                                                        cleaned_data,
                                                        row_date,
                                                        grp_pk=row_end_grp.pk,
                                                        return_end_contx=True)
                end_grp_anix, anix_entered = utils.enter_anix(
                    cleaned_data, grp_pk=row_end_grp.pk)
                self.row_entered += anix_entered
                self.row_entered += utils.enter_grpd(end_grp_anix.pk,
                                                     cleaned_data,
                                                     row_date,
                                                     None,
                                                     self.prnt_grp_anidc_id.pk,
                                                     frm_grp_id=row_start_grp)
                cnt, cnt_entered = utils.enter_cnt(cleaned_data,
                                                   row[self.nfish_key],
                                                   move_contx.pk)
                self.row_entered = cnt_entered

                # record details on end tank group
                det_anix = end_grp_anix

            else:
                # move all the fish (whole group, merge to fish at destination if needed)
                move_contx = utils.create_movement_evnt(
                    row["start_tank_id"],
                    row["end_tank_id"],
                    cleaned_data,
                    row_date,
                    grp_pk=row_start_grp.pk,
                    return_end_contx=True)
                cnt, cnt_entered = utils.enter_cnt(cleaned_data,
                                                   row[self.nfish_key],
                                                   move_contx.pk,
                                                   cnt_code="Fish Count")
                self.row_entered = cnt_entered
        else:
            if utils.nan_to_none(row[self.nfish_key]):
                cnt, cnt_entered = utils.enter_cnt(cleaned_data,
                                                   row[self.nfish_key],
                                                   start_contx.pk,
                                                   cnt_code="Fish Count")
                self.row_entered = cnt_entered

        # add details to det_anix:
        self.row_entered += utils.enter_bulk_grpd(
            det_anix.pk,
            cleaned_data,
            row_date,
            vaccinated=row.get(self.vax_key),
            mark=row.get(self.mark_key),
            lifestage=row.get(self.lifestage_key),
            comments=row.get(self.comment_key))

        self.row_entered += utils.parse_extra_cols(row,
                                                   self.cleaned_data,
                                                   det_anix,
                                                   grp=True)
コード例 #18
0
ファイル: generic.py プロジェクト: dfo-mar-odis/dm_apps
    def row_parser(self, row):
        cleaned_data = self.cleaned_data
        row_date = row["datetime"].date()
        row_grp = row["start_grp_id"]
        row_end_grp = self.end_grp_dict[row["end_grp_key"]]
        if row_end_grp:
            row_grp = row_end_grp
        row_anix, data_entered = utils.enter_anix(cleaned_data,
                                                  grp_pk=row_grp.pk)
        self.row_entered += data_entered

        row_contx, data_entered = utils.enter_contx(row.get("start_tank_id"),
                                                    cleaned_data,
                                                    None,
                                                    grp_pk=row_grp.pk,
                                                    return_contx=True)
        self.row_entered += data_entered

        samp_anix = row_contx.animal_details.filter(
            grp_id=row_grp,
            evnt_id=cleaned_data["evnt_id"],
            indv_id__isnull=True,
            loc_id__isnull=True,
            pair_id__isnull=True,
            final_contx_flag=None).get()

        row_samp, data_entered = utils.enter_samp(cleaned_data,
                                                  row[self.samp_key],
                                                  row_grp.spec_id.pk,
                                                  self.sampc_id.pk,
                                                  anix_pk=samp_anix.pk)
        self.row_entered += data_entered

        if utils.nan_to_none(row.get(self.mort_key)):
            if utils.y_n_to_bool(row[self.mort_key]):
                self.row_entered += utils.enter_samp_mortality(
                    row_samp, cleaned_data, row_date)

        if row_samp:
            self.row_entered += utils.enter_bulk_sampd(
                row_samp.pk,
                self.cleaned_data,
                row_date,
                gender=row.get(self.sex_key),
                len_mm=row.get(self.len_key_mm),
                len_val=row.get(self.len_key),
                weight=row.get(self.weight_key),
                weight_kg=row.get(self.weight_key_kg),
                vial=row.get(self.vial_key),
                scale_envelope=row.get(self.envelope_key),
                tissue_yn=row.get(self.tissue_key),
                mark=row.get(self.mark_key),
                vaccinated=row.get(self.vax_key),
                lifestage=row.get(self.lifestage_key),
                comments=row.get(self.comment_key))

            if utils.nan_to_none(row.get(self.precocity_key)):
                if utils.y_n_to_bool(row[self.precocity_key]):
                    self.row_entered += utils.enter_sampd(
                        row_samp.pk,
                        cleaned_data,
                        row_date,
                        "Precocity",
                        self.ani_health_anidc_id.pk,
                        adsc_str="Precocity")

            if utils.nan_to_none(row.get(self.ufid_key)):
                self.row_entered += utils.enter_sampd(row_samp.pk,
                                                      cleaned_data, row_date,
                                                      row[self.ufid_key],
                                                      self.anidc_ufid_id.pk)

            self.row_entered += utils.parse_extra_cols(row,
                                                       self.cleaned_data,
                                                       row_samp,
                                                       samp=True)

        else:
            self.success = False