Example #1
0
 def historize_classification(self, only_qual=None, manual=True):
     """
        Copy current classification information into history table, for all rows in self.
        :param only_qual: If set, only historize for current rows with this classification.
        :param manual: If set, historize manual entries, otherwise, pick automatic ones.
     """
     # Light up a bit the SQLA expressions
     oh = ObjectHeader
     och = ObjectsClassifHisto
     # What we want to historize, as a subquery
     if manual:
         # What we want to historize, as a subquery
         sel_subqry = select([
             oh.objid, oh.classif_when,
             text("'M'"), oh.classif_id, oh.classif_qual, oh.classif_who
         ])
         if only_qual is not None:
             qual_cond = oh.classif_qual.in_(only_qual)
         else:
             qual_cond = true()
         sel_subqry = sel_subqry.where(
             and_(oh.objid == any_(self.object_ids),
                  oh.classif_when.isnot(None), qual_cond))
         ins_columns = [
             och.objid, och.classif_date, och.classif_type, och.classif_id,
             och.classif_qual, och.classif_who
         ]
     else:
         # What we want to historize, as a subquery
         sel_subqry = select([
             oh.objid, oh.classif_auto_when,
             text("'A'"), oh.classif_auto_id, oh.classif_qual,
             oh.classif_auto_score
         ])
         sel_subqry = sel_subqry.where(
             and_(oh.objid == any_(self.object_ids),
                  oh.classif_auto_id.isnot(None),
                  oh.classif_auto_when.isnot(None)))
         ins_columns = [
             och.objid, och.classif_date, och.classif_type, och.classif_id,
             och.classif_qual, och.classif_score
         ]
     # Insert into the log table
     ins_qry: Insert = pg_insert(och.__table__)
     ins_qry = ins_qry.from_select(ins_columns, sel_subqry)
     ins_qry = ins_qry.on_conflict_do_nothing(
         constraint='objectsclassifhisto_pkey')
     # TODO: mypy crashes due to pg_dialect below
     # logger.info("Histo query: %s", ins_qry.compile(dialect=pg_dialect()))
     nb_objs = self.session.execute(ins_qry).rowcount
     logger.info(" %d out of %d rows copied to log", nb_objs,
                 len(self.object_ids))
     return oh
Example #2
0
 def update(self, session: Session, title: str, visible: bool, status: str, projtype: str,
            init_classif_list: List[int],
            classiffieldlist: str, popoverfieldlist: str,
            cnn_network_id: str, comments: str,
            contact: Any,
            managers: List[Any], annotators: List[Any], viewers: List[Any],
            license_: str):
     assert contact is not None, "A valid Contact is needed."
     proj_id = self._project.projid
     # Field reflexes
     if cnn_network_id != self._project.cnn_network_id:
         sub_qry: Query = session.query(ObjectHeader.objid)
         sub_qry = sub_qry.join(Acquisition, Acquisition.acquisid == ObjectHeader.acquisid)
         sub_qry = sub_qry.join(Sample, and_(Sample.sampleid == Acquisition.acq_sample_id,
                                             Sample.projid == proj_id))
         # Delete CNN features which depend on the CNN network
         qry: Query = session.query(ObjectCNNFeature)
         qry = qry.filter(ObjectCNNFeature.objcnnid.in_(sub_qry.subquery()))
         qry.delete(synchronize_session=False)
     # Fields update
     self._project.title = title
     self._project.visible = visible
     self._project.status = status
     self._project.projtype = projtype
     self._project.classiffieldlist = classiffieldlist
     self._project.popoverfieldlist = popoverfieldlist
     self._project.cnn_network_id = cnn_network_id
     self._project.comments = comments
     self._project.license = license_
     # Inverse for extracted values
     self._project.initclassiflist = ",".join([str(cl_id) for cl_id in init_classif_list])
     # Inverse for users by privilege
     # Dispatch members by right
     # TODO: Nothing prevents or cares about redundant rights, such as adding same
     #     user as both Viewer and Annotator.
     by_right = {ProjectPrivilegeBO.MANAGE: managers,
                 ProjectPrivilegeBO.ANNOTATE: annotators,
                 ProjectPrivilegeBO.VIEW: viewers}
     # Remove all to avoid tricky diffs
     session.query(ProjectPrivilege). \
         filter(ProjectPrivilege.projid == proj_id).delete()
     # Add all
     contact_used = False
     for a_right, a_user_list in by_right.items():
         for a_user in a_user_list:
             # Set flag for contact person
             extra = None
             if a_user.id == contact.id and a_right == ProjectPrivilegeBO.MANAGE:
                 extra = 'C'
                 contact_used = True
             session.add(ProjectPrivilege(projid=proj_id,
                                          member=a_user.id,
                                          privilege=a_right,
                                          extra=extra))
     # Sanity check
     assert contact_used, "Could not set Contact, the designated user is not in Managers list."
     session.commit()
Example #3
0
 def get_all_object_ids(cls, session: Session, prj_id: int):  # TODO: Problem with recursive import -> ObjetIdListT:
     """
         Return the full list of objects IDs inside a project.
         TODO: Maybe better in ObjectBO
     """
     qry: Query = session.query(ObjectHeader.objid)
     qry = qry.join(Acquisition, Acquisition.acquisid == ObjectHeader.acquisid)
     qry = qry.join(Sample, and_(Sample.sampleid == Acquisition.acq_sample_id,
                                 Sample.projid == prj_id))
     return [an_id for an_id, in qry.all()]
Example #4
0
 def match_with_extension():
     # We also match if these are trailing on EcoTaxa side
     # ok_ext = [" X", " sp.", " X sp."]
     # ok_ext_txt = [text("'" + ext.lower() + "'") for ext in ok_ext]
     # match_name = [func.lower(WoRMS.scientificname)]
     # match_name += [func.concat(func.lower(WoRMS.scientificname), ext) for ext in ok_ext_txt]
     return or_(
         func.lower(Taxonomy.name) == func.lower(WoRMS.scientificname),
         and_(
             Taxonomy.name.like(text("'% X'")),
             func.lower(Taxonomy.name) == func.concat(
                 func.lower(WoRMS.scientificname), text("' x'"))),
         and_(
             Taxonomy.name.like(text("'% sp.'")),
             func.lower(Taxonomy.name) == func.concat(
                 func.lower(WoRMS.scientificname), text("' sp.'"))),
         and_(
             Taxonomy.name.like(text("'% X sp.'")),
             func.lower(Taxonomy.name) == func.concat(
                 func.lower(WoRMS.scientificname), text("' x sp.'"))))
Example #5
0
 def get_all_object_ids(cls, session: Session, acquis_id: AcquisitionIDT,
                        classif_ids: Optional[ClassifIDListT] = None) \
         -> List[int]:
     qry: Query = session.query(ObjectHeader.objid)
     qry = qry.join(
         Acquisition,
         and_(ObjectHeader.acquisid == Acquisition.acquisid,
              Acquisition.acquisid == acquis_id))
     if classif_ids is not None:
         qry = qry.filter(ObjectHeader.classif_id.in_(classif_ids))
     return [an_id for an_id in qry.all()]
Example #6
0
    def reset_to_predicted(self):
        """
            Reset to Predicted state, keeping log, i.e. history, of previous change.
        """
        oh = ObjectHeader
        self.historize_classification(['V', 'D'])

        # Update objects table
        obj_upd_qry: Update = oh.__table__.update()
        obj_upd_qry = obj_upd_qry.where(
            and_(oh.objid == any_(self.object_ids),
                 (oh.classif_qual.in_(['V', 'D']))))
        obj_upd_qry = obj_upd_qry.values(classif_qual='P')
        nb_objs = self.session.execute(obj_upd_qry).rowcount
        logger.info(" %d out of %d rows reset to predicted", nb_objs,
                    len(self.object_ids))

        self.session.commit()
Example #7
0
 def strict_match_subquery(session, used_taxo_ids,
                           phylo_or_morpho: Optional[str]):
     subqry = session.query(Taxonomy.name,
                            func.max(Taxonomy.id).label("id"),
                            WoRMS.aphia_id)
     subqry = subqry.join(WoRMS,
                          TaxonomyChangeService.match_with_extension())
     subqry = subqry.filter(Taxonomy.id == any_(used_taxo_ids))
     if phylo_or_morpho is not None:
         subqry = subqry.filter(Taxonomy.taxotype == text("'%s'" %
                                                          phylo_or_morpho))
     subqry = subqry.filter(WoRMS.status == text("'accepted'"))
     # Group to exclude multiple matches
     subqry = subqry.group_by(Taxonomy.name, WoRMS.aphia_id)
     subqry = subqry.having(
         and_(
             func.count(Taxonomy.name) == 1,
             func.count(WoRMS.aphia_id) == 1))
     subqry = subqry.subquery().alias("ids")
     return subqry