def Prjpopupeditpreset(PrjId): sql = """select p.projid,title,initclassiflist ,(select string_agg(pts.id::varchar,',') objtaxon from projects_taxo_stat pts where pts.projid=p.projid) from projects p """ if not current_user.has_role(database.AdministratorLabel): sql += " Join projectspriv pp on p.projid = pp.projid and pp.member=%d" % ( current_user.id, ) sql += " order by upper(title) " Prj = GetAll(sql, cursor_factory=psycopg2.extras.RealDictCursor) # Prj2=Prj[:] # for i in range(200):Prj.extend(Prj2) # for test on bigger list TaxonList = {-1} for P in Prj: lst = ntcv(P['initclassiflist']) + "," + ntcv(P['objtaxon']) for t in lst.split(','): if t.isdecimal(): TaxonList.add(int(t)) # txt=",".join((str(x) for x in TaxonList)) TaxoMap = GetAssoc2Col( "select id,display_name from taxonomy where id = any (%s)", [[x for x in TaxonList]]) # txt = str(TaxoMap) txt = "" for P in Prj: result = [] initclassiflist = { int(x.strip()) for x in ntcv(P['initclassiflist']).split(',') if x.isdecimal() } objtaxon = { int(x.strip()) for x in ntcv(P['objtaxon']).split(',') if x.isdecimal() } objtaxon.difference_update(initclassiflist) for t in initclassiflist: resolved = TaxoMap.get(int(t), None) if resolved: result.append(resolved) P['presetids'] = ",".join((str(x) for x in initclassiflist)) P['preset'] = ", ".join(sorted(result)) result = [] for t in objtaxon: resolved = TaxoMap.get(int(t), None) if resolved: result.append(resolved) P['objtaxonnotinpreset'] = ", ".join(sorted(result)) P['objtaxonids'] = ",".join((str(x) for x in objtaxon)) # construction de la liste distincte tous les taxon return render_template('project/popupeditpreset.html', Prj=Prj, txt=txt)
def browsetaxotsvexport(): sql = """select t.id,t.parent_id,t.name,t.taxotype,t.taxostatus ,t.id_source,t.source_url,t.source_desc ,t.creator_email,to_char(t.creation_datetime,'yyyy-mm-dd hh24:mi,ss') creation_datetime ,t.id_instance,t.rename_to ,to_char(t.lastupdate_datetime,'yyyy-mm-dd hh24:mi:ss') lastupdate_datetime ,i.name instance_name,tr.display_name rename_to_name,t.nbrobj,t.nbrobjcum ,t.display_name,{} from taxonomy t LEFT JOIN ecotaxainst i on t.id_instance=i.id LEFT JOIN taxonomy tr on tr.id=t.rename_to {} order by 1 """.format(SQLTreeSelect, SQLTreeJoin) lst = GetAll(sql) t = [] t.append( "id\tparent_id\tname\ttaxotype\ttaxostatus\tid_source\tsource_url\tsource_desc\tcreator_email\tcreation_datetime" + "\tid_instance\trename_to\tlastupdate_datetime\tinstance_name\trename_to_name\tnbrobj\tnbrobjcum\tdisplay_name\tlineage" ) for l in lst: # t.append("{id},{parent_id}".format(l)) t.append("\t".join( (str(ntcv(x)).replace("\n", "\\n").replace("\t", "\\t").replace("\r", "") for x in l[0:99]))) return Response("\n".join(t), mimetype="text/tsv", headers={ "Content-Disposition": "attachment; filename=taxoexport_%s.tsv" % datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S') })
def searchtaxo(): term=gvg("q") if len(term)<=2: # return "[]" if not current_user.is_authenticated: return "[]" # current_user.id with app.MRUClassif_lock: # app.MRUClassif[current_user.id]=[{"id": 2904, "pr": 0, "text": "Teranympha (Eucomonymphidae-Teranymphidae)"}, # {"id": 12488, "pr": 0, "text": "Teranympha mirabilis "}, # {"id": 76677, "pr": 0, "text": "Terasakiella (Methylocystaceae)"}, # {"id": 82969, "pr": 0, "text": "Terasakiella pusilla "}] return json.dumps(app.MRUClassif.get(current_user.id,[])) # gère les MRU en utilisant les classif ltfound=term.find('<')>0 SQLWith=""" """ # * et espace comme % terms=[x.lower().replace("*","%").replace(" ","%")+R"%" for x in term.split('<')] param={'term':terms[0]} # le premier term est toujours appliqué sur le display name ExtraWhere=ExtraFrom="" if len(terms)>1: ExtraFrom = SQLTreeJoin terms = ['%%<'+x.replace("%","%%").replace("*","%%").replace(" ","%%") for x in terms[1:]] termsSQL=QuotedString("".join(terms)).getquoted().decode('iso-8859-15','strict') ExtraWhere= ' and '+SQLTreeExp+" ilike "+termsSQL sql="""SELECT tf.id, tf.display_name as name ,0 FROM taxonomy tf {0} WHERE lower(tf.display_name) LIKE %(term)s {1} order by lower(tf.display_name) limit 200""".format(ExtraFrom,ExtraWhere) PrjId=gvg("projid") if PrjId!="": PrjId=int(PrjId) Prj=database.Projects.query.filter_by(projid=PrjId).first() if ntcv(Prj.initclassiflist) != "": InitClassif=Prj.initclassiflist InitClassif=", ".join(["("+x.strip()+")" for x in InitClassif.split(",") if x.strip()!=""]) # ,tf.name||case when p1.name is not null and tf.name not like '%% %%' then ' ('||p1.name||')' else ' ' end as name sql=""" SELECT tf.id ,tf.display_name as name , case when id2 is null then 0 else 1 end inpreset FROM taxonomy tf join (select t.id id1,c.id id2 FROM taxonomy t full JOIN (VALUES """+InitClassif+""") c(id) ON t.id = c.id WHERE lower(display_name) LIKE %(term)s) tl2 on tf.id=coalesce(id1,id2) """+ExtraFrom+""" WHERE lower(tf.display_name) LIKE %(term)s """+ExtraWhere+""" order by inpreset desc,lower(tf.display_name),name limit 200 """ res = GetAll(sql, param,debug=False) return json.dumps([dict(id=r[0],text=r[1],pr=r[2]) for r in res])
def SPStep1(self): logging.info("Input Param = %s" % (self.param.__dict__)) logging.info("Start Step 1") Prj = database.Projects.query.filter_by( projid=self.param.ProjectId).first() WarnMessages = [] if getattr(self.param, 'IntraStep', 0) == 0: #Sous tache 1 On dezippe ou on pointe sur le repertoire source. if self.param.InData.lower().endswith("zip"): logging.info("SubTask0 : Unzip File on temporary folder") self.UpdateProgress(1, "Unzip File on temporary folder") self.param.SourceDir = os.path.normpath( os.path.join( os.path.dirname(os.path.realpath(__file__)), "../../temptask/task%06d/data" % (int(self.task.id)))) if not os.path.exists(self.param.SourceDir): os.mkdir(self.param.SourceDir) with zipfile.ZipFile(self.param.InData, 'r') as z: z.extractall(self.param.SourceDir) else: self.param.SourceDir = self.param.InData self.param.IntraStep = 1 if self.param.IntraStep == 1: self.param.Mapping = {} # Reset à chaque Tentative # Import du mapping existant dans le projet for k, v in DecodeEqualList(Prj.mappingobj).items(): self.param.Mapping['object_' + v] = { 'table': 'obj_field', 'title': v, 'type': k[0], 'field': k } for k, v in DecodeEqualList(Prj.mappingsample).items(): self.param.Mapping['sample_' + v] = { 'table': 'sample', 'title': v, 'type': k[0], 'field': k } for k, v in DecodeEqualList(Prj.mappingacq).items(): self.param.Mapping['acq_' + v] = { 'table': 'acq', 'title': v, 'type': k[0], 'field': k } for k, v in DecodeEqualList(Prj.mappingprocess).items(): self.param.Mapping['process_' + v] = { 'table': 'process', 'title': v, 'type': k[0], 'field': k } ProjectWasEmpty = len(self.param.Mapping) == 0 self.param.TaxoFound = {} # Reset à chaque Tentative self.param.UserFound = {} # Reset à chaque Tentative self.param.steperrors = [] # Reset des erreurs # La version 1.1 normalise le champ 'acq_instrument' donc s'il etait mappé sur un txx on le renome pour que la MAJ aille dans le champ instrument if 'acq_instrument' in self.param.Mapping: self.param.Mapping['acq_instrument_old'] = self.param.Mapping[ 'acq_instrument'] self.param.Mapping['acq_instrument_old'][ 'title'] = 'instrument_old' del self.param.Mapping['acq_instrument'] # recuperation de toutes les paire objet/Images du projet logging.info("SubTask1 : Analyze TSV Files") self.UpdateProgress(2, "Analyze TSV Files") self.LastNum = {x: {'n': 0, 't': 0} for x in PredefinedTables} # Extraction des Max des champs for m in self.param.Mapping.values(): v = int(m['field'][1:]) if v > self.LastNum[m['table']][m['field'][0]]: self.LastNum[m['table']][m['field'][0]] = v sd = Path(self.param.SourceDir) self.param.TotalRowCount = 0 Seen = set( ) # Memorise les champs pour lesquels il y a des valeurs ClassifIDSeen = set() for filter in ("**/ecotaxa*.txt", "**/ecotaxa*.tsv"): for CsvFile in sd.glob(filter): relname = CsvFile.relative_to( sd) # Nom relatif à des fins d'affichage uniquement logging.info("Analyzing file %s" % (relname.as_posix())) with open(CsvFile.as_posix(), encoding='latin_1') as csvfile: # lecture en mode dictionnaire basé sur la premiere ligne rdr = csv.DictReader(csvfile, delimiter='\t', quotechar='"') #lecture la la ligne des types (2nd ligne du fichier LType = { champ.strip(" \t").lower(): v for champ, v in rdr.__next__().items() } # Fabrication du mapping ListeChamps = [ champ.strip(" \t").lower() for champ in rdr.fieldnames ] for ColName in ListeChamps: if ColName in self.param.Mapping or ColName in ( 'object_annotation_parent_category', 'object_annotation_hierarchy'): continue # Le champ à déjà été détecté OU un des champs Bani de L'importation car fruit de l'export. ColSplitted = ColName.split("_", 1) if len(ColSplitted) != 2: self.LogErrorForUser( "Invalid Header '%s' in file %s. Format must be Table_Field. Field ignored" % (ColName, relname.as_posix())) continue Table = ColSplitted[ 0] # On isole la partie table avant le premier _ if ColName in PredefinedFields: Table = PredefinedFields[ColName]['table'] self.param.Mapping[ColName] = PredefinedFields[ ColName] else: # champs non predefinis donc dans nXX ou tXX if Table == "object": Table = "obj_field" if not Table in PredefinedTables: self.LogErrorForUser( "Invalid Header '%s' in file %s. Table Incorrect. Field ignored" % (ColName, relname.as_posix())) continue if Table != 'obj_head' and Table != 'obj_field': # Dans les autres tables les types sont forcés à texte SelType = 't' else: if LType[ColName] not in PredefinedTypes: self.LogErrorForUser( "Invalid Type '%s' for Field '%s' in file %s. Incorrect Type. Field ignored" % (LType[champ], ColName, relname.as_posix())) continue SelType = PredefinedTypes[LType[ColName]] self.LastNum[Table][SelType] += 1 self.param.Mapping[ColName] = { 'table': Table, 'field': SelType + "%02d" % self.LastNum[Table][SelType], 'type': SelType, 'title': ColSplitted[1] } logging.info("New field %s found in file %s", ColName, relname.as_posix()) if not ProjectWasEmpty: WarnMessages.append( "New field %s found in file %s" % (ColName, relname.as_posix())) # Test du contenu du fichier RowCount = 0 for lig in rdr: RowCount += 1 for champ in rdr.fieldnames: ColName = champ.strip(" \t").lower() m = self.param.Mapping.get(ColName, None) if m is None: continue # Le champ n'est pas considéré v = CleanValue(lig[champ]) Seen.add( ColName ) # V2.0 (1.1 sur main import) si la colonne est présente c'est considéré Seen, avant il fallait avoir vu une valeur. if v != "": # si pas de valeurs, pas de controle if m['type'] == 'n': vf = ToFloat(v) if vf is None: self.LogErrorForUser( "Invalid float value '%s' for Field '%s' in file %s." % (v, champ, relname.as_posix())) elif ColName == 'object_lat': if vf < -90 or vf > 90: self.LogErrorForUser( "Invalid Lat. value '%s' for Field '%s' in file %s. Incorrect range -90/+90°." % (v, champ, relname.as_posix())) elif ColName == 'object_long': if vf < -180 or vf > 180: self.LogErrorForUser( "Invalid Long. value '%s' for Field '%s' in file %s. Incorrect range -180/+180°." % (v, champ, relname.as_posix())) elif ColName == 'object_annotation_category_id': if self.param.updateclassif == "Y": ClassifIDSeen.add(int(v)) elif ColName == 'object_date': try: datetime.date( int(v[0:4]), int(v[4:6]), int(v[6:8])) except ValueError: self.LogErrorForUser( "Invalid Date value '%s' for Field '%s' in file %s." % (v, champ, relname.as_posix())) elif ColName == 'object_time': try: v = v.zfill(6) datetime.time( int(v[0:2]), int(v[2:4]), int(v[4:6])) except ValueError: self.LogErrorForUser( "Invalid Time value '%s' for Field '%s' in file %s." % (v, champ, relname.as_posix())) elif ColName == 'object_annotation_category': if self.param.updateclassif == "Y": if CleanValue( lig.get( 'object_annotation_category_id', '') ) == '': # traité que si un ID numérique non spécifié v = self.param.TaxoMap.get( v, v) # Applique le mapping self.param.TaxoFound[v.lower( )] = None #creation d'une entrée dans le dictionnaire. elif ColName == 'object_annotation_person_name': if self.param.updateclassif == "Y": self.param.UserFound[v.lower()] = { 'email': CleanValue( lig.get( 'object_annotation_person_email', '')) } elif ColName == 'object_annotation_status': if self.param.updateclassif == "Y": if v != 'noid' and v.lower( ) not in database.ClassifQualRevert: self.LogErrorForUser( "Invalid Annotation Status '%s' for Field '%s' in file %s." % (v, champ, relname.as_posix())) #Analyse l'existance du fichier Image ObjectId = CleanValue(lig.get('object_id', '')) if ObjectId == '': self.LogErrorForUser( "Missing object_id on line '%s' in file %s. " % (RowCount, relname.as_posix())) logging.info("File %s : %d row analysed", relname.as_posix(), RowCount) self.param.TotalRowCount += RowCount if self.param.TotalRowCount == 0: self.LogErrorForUser("No object found") if len(ClassifIDSeen) > 0: ClassifIDFoundInDB = GetAll( "select id from taxonomy where id = any (%s)", [list(ClassifIDSeen)]) ClassifIDFoundInDB = {int(r['id']) for r in ClassifIDFoundInDB} ClassifIDNotFoundInDB = ClassifIDSeen.difference( ClassifIDFoundInDB) if len(ClassifIDNotFoundInDB) > 0: msg = "Some specified classif_id doesn't exists, correct them prior to reload %s" % ( ",".join([str(x) for x in ClassifIDNotFoundInDB])) self.param.steperrors.append(msg) logging.error(msg) self.UpdateProgress(15, "TSV File Parsed" % ()) # print(self.param.Mapping) logging.info("Taxo Found = %s", self.param.TaxoFound) logging.info("Users Found = %s", self.param.UserFound) logging.info("For Information Not Seen Fields %s", [k for k in self.param.Mapping if k not in Seen]) if len(self.param.steperrors) > 0: self.task.taskstate = "Error" self.task.progressmsg = "Some errors founds during file parsing " db.session.commit() return self.param.IntraStep = 2 if self.param.IntraStep == 2: logging.info("Start Sub Step 1.2") self.pgcur.execute( "select id,lower(name),lower(email) from users where lower(name) = any(%s) or email= any(%s) ", ([x for x in self.param.UserFound.keys() ], [x.get('email') for x in self.param.UserFound.values()])) # Résolution des noms à partir du nom ou de l'email for rec in self.pgcur: for u in self.param.UserFound: if u == rec[1] or ntcv(self.param.UserFound[u].get( 'email')).lower() == rec[2]: self.param.UserFound[u]['id'] = rec[0] logging.info("Users Found = %s", self.param.UserFound) NotFoundUser = [ k for k, v in self.param.UserFound.items() if v.get("id") == None ] if len(NotFoundUser) > 0: logging.info("Some Users Not Found = %s", NotFoundUser) # récuperation des ID des taxo trouvées NotFoundTaxo = [] ResolveTaxoFound(self.param.TaxoFound, NotFoundTaxo) if len(NotFoundTaxo) > 0: logging.info("Some Taxo Not Found = %s", NotFoundTaxo) if len(NotFoundUser) == 0 and len(NotFoundTaxo) == 0 and len( WarnMessages ) == 0: # si tout est déjà résolue on enchaine sur la phase 2 self.SPStep2() else: self.task.taskstate = "Question" if len(WarnMessages) > 0: self.UpdateProgress( 20, "Taxo automatic resolution Done, <span style='color:red;font-weight:bold;'>Some Warning :\n-%s </span>" % ("\n-".join(WarnMessages))) else: self.UpdateProgress(20, "Taxo automatic resolution Done" % ())
def SPStep2(self): logging.info("Start Step 2 : Effective data import") logging.info("Taxo Mapping = %s", self.param.TaxoFound) logging.info("Users Mapping = %s", self.param.UserFound) AstralCache = { 'date': None, 'time': None, 'long': None, 'lat': None, 'r': '' } # Mise à jour du mapping en base Prj = database.Projects.query.filter_by( projid=self.param.ProjectId).first() Prj.mappingobj = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'obj_field' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) Prj.mappingsample = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'sample' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) Prj.mappingacq = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'acq' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) Prj.mappingprocess = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'process' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) db.session.commit() Ids = { "acq": { "tbl": "acquisitions", "pk": "acquisid" }, "sample": { "tbl": "samples", "pk": "sampleid" }, "process": { "tbl": "process", "pk": "processid" } } #recupération des orig_id des acq,sample,process for i in Ids: sql = "select orig_id," + Ids[i]['pk'] + " from " + Ids[i][ 'tbl'] + " where projid=" + str(self.param.ProjectId) Ids[i]["ID"] = {} for r in GetAll(sql): Ids[i]["ID"][r[0]] = int(r[1]) # recuperation des ID des objet du projet self.ExistingObject = {} self.pgcur.execute( "SELECT o.orig_id,o.objid from objects o where o.projid=" + str(self.param.ProjectId)) for rec in self.pgcur: self.ExistingObject[rec[0]] = rec[1] #logging.info("Ids = %s",Ids) random.seed() RealTableName = { "acq": 'acquisitions', "sample": 'samples', "process": 'process', "obj_head": "obj_head", "obj_field": "obj_field" } sd = Path(self.param.SourceDir) TotalRowCount = 0 for filter in ("**/ecotaxa*.txt", "**/ecotaxa*.tsv"): for CsvFile in sd.glob(filter): relname = CsvFile.relative_to( sd) # Nom relatif à des fins d'affichage uniquement logging.info("Analyzing file %s" % (relname.as_posix())) with open(CsvFile.as_posix(), encoding='latin_1') as csvfile: # lecture en mode dictionnaire basé sur la premiere ligne rdr = csv.DictReader(csvfile, delimiter='\t', quotechar='"') #lecture la la ligne des types (2nd ligne du fichier LType = { champ.strip(" \t").lower(): v for champ, v in rdr.__next__().items() } ListeChamps = [ champ.strip(" \t").lower() for champ in rdr.fieldnames ] # Chargement du contenu du fichier RowCount = 0 for rawlig in rdr: lig = { champ.strip(" \t").lower(): v for champ, v in rawlig.items() } Objs = { "acq": self.EmptyObject(), "sample": self.EmptyObject(), "process": self.EmptyObject(), "obj_head": self.EmptyObject(), "obj_field": self.EmptyObject() } ObjsNew = { "acq": database.Acquisitions(), "sample": database.Samples(), "process": database.Process() } RowCount += 1 TotalRowCount += 1 if 'object_annotation_category_id' in ListeChamps and 'object_annotation_category' in ListeChamps: if CleanValue( lig.get('object_annotation_category_id', '')) != '': del lig[ 'object_annotation_category'] # s'il y a un ID on ignore le texte for champ in ListeChamps: ColName = champ.strip(" \t").lower() m = self.param.Mapping.get(ColName, None) if m is None: continue # Le champ n'est pas considéré FieldName = m.get("field", None) FieldTable = m.get("table", None) FieldValue = None v = CleanValue(lig.get(champ)) if v != "": # si pas de valeurs, on laisse le champ null if m['type'] == 'n': if champ == 'object_annotation_category_id': if self.param.updateclassif != "Y": continue FieldValue = ToFloat(v) elif champ == 'object_date': FieldValue = datetime.date( int(v[0:4]), int(v[4:6]), int(v[6:8])) elif champ == 'object_time': v = v.zfill(6) FieldValue = datetime.time( int(v[0:2]), int(v[2:4]), int(v[4:6])) elif FieldName == 'classif_when': if self.param.updateclassif != "Y": continue v2 = CleanValue( lig.get('object_annotation_time', '000000')).zfill(6) FieldValue = datetime.datetime( int(v[0:4]), int(v[4:6]), int(v[6:8]), int(v2[0:2]), int(v2[2:4]), int(v2[4:6])) elif FieldName == 'classif_id': if self.param.updateclassif != "Y": continue FieldValue = self.param.TaxoFound[ntcv( v ).lower( )] # pour la version numerique, c'est traité par if type=n elif FieldName == 'classif_who': if self.param.updateclassif != "Y": continue FieldValue = self.param.UserFound[ntcv( v).lower()].get('id', None) elif FieldName == 'classif_qual': if self.param.updateclassif != "Y": continue FieldValue = database.ClassifQualRevert.get( v.lower()) else: # c'est un champ texte sans rien de special FieldValue = v # if FieldTable in ('image', 'sample'): if FieldTable in ('image', ): pass # les champs images sont ignorés lors d'un update elif FieldTable in Objs: if FieldName in db.metadata.tables[ RealTableName[FieldTable]].columns: setattr(Objs[FieldTable], FieldName, FieldValue) # logging.info("setattr %s %s %s",FieldTable,FieldName,FieldValue) # else: # logging.info("skip F %s %s %s",FieldTable,FieldName,FieldValue) else: logging.info("skip T %s %s %s", FieldTable, FieldName, FieldValue) # Calcul de la position du soleil if (hasattr(Objs["obj_head"], 'objdate') and hasattr(Objs["obj_head"], 'objtime') and hasattr(Objs["obj_head"], 'longitude') and hasattr(Objs["obj_head"], 'latitude')): if not (AstralCache['date']==Objs["obj_head"].objdate and AstralCache['time']==Objs["obj_head"].objtime \ and AstralCache['long']==Objs["obj_head"].longitude and AstralCache['lat']==Objs["obj_head"].latitude) : AstralCache = { 'date': Objs["obj_head"].objdate, 'time': Objs["obj_head"].objtime, 'long': Objs["obj_head"].longitude, 'lat': Objs["obj_head"].latitude, 'r': '' } from astral import AstralError try: AstralCache['r'] = appli.CalcAstralDayTime( AstralCache['date'], AstralCache['time'], AstralCache['lat'], AstralCache['long']) except AstralError as e: # dans certains endoit du globe il n'y a jamais de changement nuit/jour certains jours, ca provoque une erreur app.logger.error( "Astral error : %s for %s", e, AstralCache) except Exception as e: # autre erreurs par exemple si l'heure n'est pas valide; app.logger.error( "Astral error : %s for %s", e, AstralCache) Objs["obj_head"].sunpos = AstralCache['r'] # Affectation des ID Sample, Acq & Process et creation de ces dernier si necessaire for t in Ids: if hasattr(Objs[t], 'orig_id') and Objs[ t].orig_id is not None: # si des colonnes sont definis sur les tables auxilaires if Objs[t].orig_id in Ids[t][ "ID"]: # si on référence un auxiliaire existant if t == 'sample': # on ramene l'enregistrement ObjAux = database.Samples.query.filter_by( sampleid=Ids[t]["ID"][ Objs[t].orig_id]).first() elif t == 'acq': ObjAux = database.Acquisitions.query.filter_by( acquisid=Ids[t]["ID"][ Objs[t].orig_id]).first() elif t == 'process': ObjAux = database.Process.query.filter_by( processid=Ids[t]["ID"][ Objs[t].orig_id]).first() else: ObjAux = None if ObjAux is not None: # si on a bien trouvé l'enregistrement for attr, value in Objs[t].__dict__.items( ): # Pour les champs présents dans l'objet temporaire (dans le TSV donc) if hasattr( ObjAux, attr ) and getattr( ObjAux, attr ) != value: # si la valeur change setattr( ObjAux, attr, value ) # On met à jour l'enregistrement (qui sera commité avec l'objet plus bas) setattr(Objs["obj_head"], Ids[t]["pk"], Ids[t]["ID"][Objs[t].orig_id]) else: # on copie les données de l'objet dans le nouvel enregistrement for attr, value in Objs[t].__dict__.items( ): if hasattr(ObjsNew[t], attr): setattr(ObjsNew[t], attr, value) Objs[t] = ObjsNew[t] Objs[t].projid = self.param.ProjectId db.session.add(Objs[t]) db.session.commit() Ids[t]["ID"][Objs[t].orig_id] = getattr( Objs[t], Ids[t]["pk"]) setattr(Objs["obj_head"], Ids[t]["pk"], Ids[t]["ID"][Objs[t].orig_id]) logging.info("IDS %s %s", t, Ids[t]) # Recherche de l'objet si c'est une images complementaire if Objs["obj_field"].orig_id in self.ExistingObject: # Traitement des champs ObjField = database.ObjectsFields.query.filter_by( objfid=self.ExistingObject[ Objs["obj_field"].orig_id]).first() for attr, value in Objs[ "obj_field"].__dict__.items(): if hasattr(ObjField, attr): setattr(ObjField, attr, value) #traitement du header ObjHead = database.Objects.query.filter_by( objid=self.ExistingObject[ Objs["obj_field"].orig_id]).first() for attr, value in Objs["obj_head"].__dict__.items( ): if hasattr(ObjHead, attr): setattr(ObjHead, attr, value) db.session.commit() else: # ou Creation de l'objet logging.warning( "Object '%s' not found, can't be updated, row skipped", Objs["obj_field"].orig_id) continue # Objs["obj_head"].projid=self.param.ProjectId # Objs["obj_head"].random_value=random.randint(1,99999999) # Objs["obj_head"].img0id=Objs["image"].imgid # db.session.add(Objs["obj_head"]) # db.session.commit() # Objs["obj_field"].objfid=Objs["obj_head"].objid # db.session.add(Objs["obj_field"]) # db.session.commit() # self.ExistingObject[Objs["obj_field"].orig_id]=Objs["obj_head"].objid # Provoque un select object sauf si 'expire_on_commit':False if (TotalRowCount % 100) == 0: self.UpdateProgress( 100 * TotalRowCount / self.param.TotalRowCount, "Processing files %d/%d" % (TotalRowCount, self.param.TotalRowCount)) logging.info("File %s : %d row Processed", relname.as_posix(), RowCount) db.session.commit() self.pgcur.execute("""update obj_head o set imgcount=(select count(*) from images where objid=o.objid) ,img0id=(select imgid from images where objid=o.objid order by imgrank asc limit 1 ) where projid=""" + str(self.param.ProjectId)) self.pgcur.connection.commit() self.pgcur.execute( """update samples s set latitude=sll.latitude,longitude=sll.longitude from (select o.sampleid,min(o.latitude) latitude,min(o.longitude) longitude from obj_head o where projid=%(projid)s and o.latitude is not null and o.longitude is not null group by o.sampleid) sll where s.sampleid=sll.sampleid and projid=%(projid)s """, {'projid': self.param.ProjectId}) self.pgcur.connection.commit() appli.project.main.RecalcProjectTaxoStat(Prj.projid) appli.project.main.UpdateProjectStat(Prj.projid) self.task.taskstate = "Done" self.UpdateProgress(100, "Processing done")
def ExploreLoadRightPane(): # récupération des parametres d'affichage Filt = {} for k, v in FilterList.items(): Filt[k] = gvp(k, v) ipp = int(Filt["ipp"]) zoom = int(Filt["zoom"]) t = ["<a name='toppage'/>"] sqlparam = {'projid': gvp("projid")} # sql="""select o.objid,t.name taxoname,o.classif_qual,u.name classifwhoname,i.file_name # ,i.height,i.width,i.thumb_file_name,i.thumb_height,i.thumb_width # ,o.depth_min,o.depth_max,s.orig_id samplename,o.objdate,to_char(o.objtime,'HH24:MI') objtime # ,o.latitude,o.orig_id,o.imgcount # from objects o # left Join images i on o.img0id=i.imgid # left JOIN taxonomy t on o.classif_id=t.id # LEFT JOIN users u on o.classif_who=u.id # LEFT JOIN samples s on o.sampleid=s.sampleid # where o.classif_qual='V' # """ whereclause = "" sql = """select o.objid,o.classif_qual ,o.objdate,to_char(o.objtime,'HH24:MI') objtime ,o.imgcount,o.img0id,o.classif_id,o.classif_who,o.sampleid,random_value,o.projid from obj_head o where o.classif_qual='V' """ if gvp("taxo[]"): taxoids = ",".join( (str(int(x)) for x in request.form.getlist("taxo[]"))) if gvp("taxochild") == "1": # whereclause+=""" and o.classif_id in (WITH RECURSIVE rq(id) as ( select id FROM taxonomy where id in(%s) # union # SELECT t.id FROM rq JOIN taxonomy t ON rq.id = t.parent_id and (t.nbrobjcum>0 or t.nbrobj>0) # ) select id from rq)"""%(taxoids,) # Sur les petits nombres de ref le in est plus performant que la sous requete taxoids = ",".join((str(int(x[0])) for x in GetAll( """WITH RECURSIVE rq(id) as ( select id FROM taxonomy where id in(%s) union SELECT t.id FROM rq JOIN taxonomy t ON rq.id = t.parent_id ) select id from rq """ % (taxoids, )))) whereclause += " and o.classif_id in (" + taxoids + ")" # optimisation qui provoque de faux résultats : and (t.nbrobjcum>0 or t.nbrobj>0) else: whereclause += " and o.classif_id in (" + taxoids + ")" if gvp("MapN") != "" and gvp("MapW") != "" and gvp("MapE") != "" and gvp( "MapS") != "": whereclause += " and o.latitude between %(MapS)s and %(MapN)s and o.longitude between %(MapW)s and %(MapE)s " sqlparam['MapN'] = gvp("MapN") sqlparam['MapW'] = gvp("MapW") sqlparam['MapE'] = gvp("MapE") sqlparam['MapS'] = gvp("MapS") if gvp("depthmin") != "" and gvp("depthmax") != "": whereclause += " and o.depth_min between %(depthmin)s and %(depthmax)s and o.depth_max between %(depthmin)s and %(depthmax)s " sqlparam['depthmin'] = gvp("depthmin") sqlparam['depthmax'] = gvp("depthmax") if gvp("samples") != "": whereclause += " and o.sampleid= any (%(samples)s) " sqlparam['samples'] = [int(x) for x in gvp("samples").split(',')] if gvp("instrum") != "": whereclause += " and o.acquisid in (select acquisid from acquisitions where instrument ilike %(instrum)s " + ( "and projid= any (%(projid)s)" if gvp("projid") != "" else "") + " )" sqlparam['instrum'] = '%' + gvp("instrum") + '%' PageTopProjectLink = MapForProject = None if gvp("projid") != "": whereclause += " and o.projid= any (%(projid)s) " sqlparam['projid'] = [int(x) for x in gvp("projid").split(',')] if len(sqlparam['projid']) == 1: PageTopProjectLink = "<p class='bg-info' style='font-size: larger;font-weight: bold;'>You can explore this project in more details on its <a href='/prj/{0}'>dedicated page</a></p>".format( str(sqlparam['projid'][0])) if gvp("fromdate") != "": whereclause += " and o.objdate>= to_date(%(fromdate)s,'YYYY-MM-DD') " sqlparam['fromdate'] = gvp("fromdate") if gvp("todate") != "": whereclause += " and o.objdate<= to_date(%(todate)s,'YYYY-MM-DD') " sqlparam['todate'] = gvp("todate") if gvp("month") != "": whereclause += " and extract(month from o.objdate) = any (%(month)s) " sqlparam['month'] = [int(x) for x in gvp("month").split(',')] if gvp("daytime") != "": whereclause += " and o.sunpos= any (%(daytime)s) " sqlparam['daytime'] = [x for x in gvp("daytime").split(',')] if gvp("inverttime") == "1": if gvp("fromtime") != "" and gvp("totime") != "": whereclause += " and (o.objtime<= time %(fromtime)s or o.objtime>= time %(totime)s)" sqlparam['fromtime'] = gvp("fromtime") sqlparam['totime'] = gvp("totime") else: if gvp("fromtime") != "": whereclause += " and o.objtime>= time %(fromtime)s " sqlparam['fromtime'] = gvp("fromtime") if gvp("totime") != "": whereclause += " and objtime<= time %(totime)s " sqlparam['totime'] = gvp("totime") sql += whereclause ExtraEndScript = "" if whereclause == "": # si aucune clause, on prend un projet au hasard randomproject = GetAll( "select projid,title from projects where visible=true and pctvalidated>1 order by random() limit 1" ) if randomproject: randomproject = randomproject[0] MapForProject = str(randomproject[0]) sql += " and o.projid= %s " % (randomproject[0]) ExtraEndScript = """$('#headersubtitle').html('Randomly selected project : <a href="?projid={0}">{1}</a>');""".format( randomproject['projid'], XSSEscape(randomproject['title'])) sql += " order by random_value Limit %d" % (2 * ipp, ) # pour de meilleure perf plus de random ici et du coup on prend 20xipp pour créer un peu d'aléa # sql+=" Limit %d"%(20*ipp,) # desactivé suite à split table objects mais pourrait devoir revenir. #filt_fromdate,#filt_todate sql = """select o.*,t.display_name taxoname,u.name classifwhoname,i.file_name,s.orig_id samplename ,i.height,i.width,i.thumb_file_name,i.thumb_height,i.thumb_width,ofi.orig_id from (""" + sql + """)o left Join images i on o.img0id=i.imgid left JOIN taxonomy t on o.classif_id=t.id LEFT JOIN users u on o.classif_who=u.id LEFT JOIN samples s on o.sampleid=s.sampleid left Join obj_field ofi on ofi.objfid=o.objid where o.projid in (select projid from projects where visible=true)""" # if whereclause!="": # on ne tri pas en random global s'il n'y a aucune criteres, impact de perf sql += " order by random_value " # sql+=" order by random() " sql += " Limit %d " % (ipp, ) res = GetAll(sql, sqlparam, debug=False, doXSSEscape=True) trcount = 1 LineStart = "" if (PageTopProjectLink): t.append(PageTopProjectLink) if MapForProject: t.append( render_template("search/explore_inserted_popup.html", Projid=MapForProject)) t.append("<table class=imgtab><tr id=tr1>" + LineStart) WidthOnRow = 0 #récuperation et ajustement des dimensions de la zone d'affichage try: PageWidth = int( gvp("resultwidth" )) - 40 # on laisse un peu de marge à droite et la scroolbar if PageWidth < 200: PageWidth = 200 except: PageWidth = 200 try: WindowHeight = int( gvp("windowheight")) - 100 # on enleve le bandeau du haut if WindowHeight < 200: WindowHeight = 200 except: WindowHeight = 200 #print("PageWidth=%s, WindowHeight=%s"%(PageWidth,WindowHeight)) # Calcul des dimmensions et affichage des images for r in res: filename = r['file_name'] origwidth = r['width'] origheight = r['height'] thumbfilename = r['thumb_file_name'] thumbwidth = r['thumb_width'] if origwidth is None: # pas d'image associé, pas trés normal mais arrive pour les subset sans images width = 80 height = 40 else: width = origwidth * zoom // 100 height = origheight * zoom // 100 if max( width, height ) < 120: # en dessous de 120 px de coté on ne fait plus le scaling if max(origwidth, origheight) < 120: width = origwidth # si l'image originale est petite on l'affiche telle quelle height = origheight elif max(origwidth, origheight) == origwidth: width = 120 height = origheight * 120 // origwidth if height < 1: height = 1 else: height = 120 width = origwidth * 120 // origheight if width < 1: width = 1 # On limite les images pour qu'elles tiennent toujours dans l'écran if width > PageWidth: width = PageWidth height = math.trunc(r['height'] * width / r['width']) if height == 0: height = 1 if height > WindowHeight: height = WindowHeight width = math.trunc(r['width'] * height / r['height']) if width == 0: width = 1 if WidthOnRow != 0 and (WidthOnRow + width) > PageWidth: trcount += 1 t.append("</tr></table><table class=imgtab><tr id=tr%d>%s" % (trcount, LineStart)) WidthOnRow = 0 cellwidth = width + 22 if cellwidth < 80: cellwidth = 80 # on considère au moins 80 car avec les label c'est rarement moins # Met la fenetre de zoon la ou il y plus de place, sachant qu'elle fait 400px et ne peut donc pas être callée à gauche des premieres images. if (WidthOnRow + cellwidth) > (PageWidth / 2): pos = 'left' else: pos = 'right' #Si l'image affiché est plus petite que la miniature, afficher la miniature. if thumbwidth is None or thumbwidth < width or thumbfilename is None: # sinon (si la miniature est plus petite que l'image à afficher ) thumbfilename = filename # la miniature est l'image elle même txt = "<td width={0}>".format(cellwidth) if filename: txt+="<img class='lazy' id=I{3} data-src='/vault/{5}' data-zoom-image='{0}' width={1} height={2} pos={4}>"\ .format(filename,width,height,r['objid'],pos,thumbfilename) else: txt += "No Image" # Génération de la popover qui apparait pour donner quelques détails sur l'image poptitletxt = "%s" % (r['orig_id'], ) # poptxt="%s"%(r['taxoname'],) poptxt = "" if r['classifwhoname'] != "": poptxt += "<em>by</em> %s" % (r['classifwhoname']) poptxt += "<br><em>in</em> " + ntcv(r['samplename']) popattribute = "data-title=\"{0}\" data-content=\"{1}\" data-placement='{2}'".format( poptitletxt, poptxt, 'left' if WidthOnRow > 500 else 'right') txt+="""<div class='subimg {1}' {2}> <div class='taxo'>{0}</div> </div> <div class=ddet><span class=ddets><span class='glyphicon glyphicon-eye-open'></span> {3}</div>"""\ .format(r['taxoname'],"",popattribute ,"(%d)"%(r['imgcount'],) if r['imgcount'] is not None and r['imgcount']>1 else "") txt += "</td>" # WidthOnRow+=max(cellwidth,80) # on ajoute au moins 80 car avec les label c'est rarement moins WidthOnRow += cellwidth + 5 # 5 = border-spacing = espace inter image t.append(txt) t.append("</tr></table>") if len(res) == 0: t.append( """<div class='alert alert-warning' role='alert' style='margin: 10px;font-size:larger;' > <span class='glyphicon glyphicon-remove-sign' style='color:#DD2222;'></span> Your selection does not return any object. <br>It may be too limited by the selected filters. <br>Try again releasing the selection criteria</div>""" ) t.append(""" <script> PostAddImages(); %s </script>""" % ExtraEndScript) return "\n".join(t)
def doimportmassupdate(): # test avec D:\temp\Downloads\taxoexport_20181228_101007.tsv txt = "" uploadfile = request.files.get("fichier") if uploadfile is None: return PrintInCharte(FormatError("You must send a file")) app.logger.info('Load file {} by {}'.format(uploadfile.filename, current_user)) #creation d'un fichier temporaire qui s'efface automatiquement tmpfile = tempfile.TemporaryFile(mode='w+b') # app.logger.info('TMP file is {}'.format(tmpfile.name)) uploadfile.save(tmpfile) # on copie le contenu dedants tmpfile.seek(0) # on se remet au debut f = TextIOWrapper( tmpfile, encoding='ascii', errors='replace') # conversion du format binaire au format texte rdr = csv.DictReader( f, delimiter='\t', quotechar='"', ) # ouverture sous forme de reader dictionnaire champs = rdr.fieldnames app.logger.info("Loading file with this columns : %s" % str(champs)) if 'id' not in champs: return PrintInCharte(FormatError("A column named 'id' is required")) IdList = {x['id'] for x in database.GetAll("select id from taxonomy")} InstanceList = { x['id'] for x in database.GetAll("select id from ecotaxainst") } rowcount = 0 Errors = [] UpdatedTaxon = [] UpdatableCols = [ 'parent_id', 'name', 'taxotype', 'taxostatus', 'id_source', 'source_url', 'source_desc', 'creator_email', 'creation_datetime', 'id_instance', 'rename_to' ] # 'lastupdate_datetime', 'display_name', for r in rdr: rowcount += 1 id = int(r['id']) taxon = database.Taxonomy.query.filter_by(id=id).first() if taxon is None: Errors.append("id {} does not exists in the database".format(id)) continue valueschanged = False SkipRow = False for c in champs: if c in UpdatableCols: oldvalue = str(ntcv(getattr(taxon, c))).replace('\r', '') newvalue = r[c].replace("\\n", "\n").strip() if c in ('parent_id', 'rename_to') and newvalue != '': if int(newvalue) not in IdList: Errors.append( "id {} : {} {} does not exists in the database". format(id, c, newvalue)) SkipRow = True continue if c == 'taxotype': if newvalue not in ('P', 'M'): Errors.append("id {} : Invalid taxotype {} ".format( id, newvalue)) SkipRow = True continue if c == 'taxostatus': if newvalue not in database.TaxoStatus: Errors.append("id {} : Invalid status {} ".format( id, newvalue)) SkipRow = True continue if c == 'id_instance' and newvalue != '': if int(newvalue) not in InstanceList: Errors.append( "id {} : {} is not a valid instance id".format( id, newvalue)) SkipRow = True continue if oldvalue != newvalue: valueschanged = True setattr(taxon, c, newvalue) app.logger.info("id {} : update {} to {}".format( id, oldvalue, newvalue)) if SkipRow: continue if valueschanged: # db.session.add(taxon) UpdatedTaxon.append(id) taxon.lastupdate_datetime = datetime.datetime.utcnow() db.session.commit() # app.logger.info(str(r)) if len(UpdatedTaxon) > 0: ComputeDisplayName(UpdatedTaxon) txt += "<p style='color: green'> %s taxon updated </p> " % ( len(UpdatedTaxon)) if len(Errors): txt += "<p style='color: red'> %s errors <ul> " % (len(Errors)) txt += "\n".join("<li>%s</li>" % x for x in Errors) txt += "</ul></p> " txt += "<a href='/browsetaxo/' class='btn btn-primary'><i class='fas fa-arrow-left'></i> Back to Browse Taxonomy</a>" g.bodydivmargin = "10px" return PrintInCharte(txt)
def ComputeDisplayName(TaxoList): """ Compute display_name column in database, for the list of provided id :param TaxoList: :return: None """ # sql="""with duplicate as (select name from taxonomy GROUP BY name HAVING count(*)>1), # nt as (select t.id,case when t.name like '%% %%' or p.id is null then t.name # when t.taxotype='M' and P.taxotype='M' then concat(p2.name||'>',p.name||'>',t.name) # when t.name in (select name from duplicate) then concat(p.name||'>',t.name) # else t.name end # ||case when t.taxostatus='D' then ' (Deprecated)' else '' end # newname # from taxonomy t # left JOIN taxonomy p on t.parent_id=p.id # left JOIN taxonomy p2 on p.parent_id=p2.id # where (t.id = any ( %(taxo)s ) or p.id = any ( %(taxo)s ) or p2.id = any ( %(taxo)s )) # ) # update public.taxonomy t set display_name=newname,lastupdate_datetime=to_timestamp(%(ts)s,'YYYY-MM-DD HH24:MI:SS') # from nt # where nt.id=t.id and display_name IS DISTINCT FROM newname """ # # Pour chaque nom on cherche à determiner à quelle hauteur il n'y a plus de doublons # # quand plus de 2 doublons ça peut conduire à une inflation car on va prendre le plus long pour tous # # alors que par forcement necessaire ex : A<B , A<C<D , A<C<E A<B sera A<B<X inutilement rallongé # sql=""" # with duplicate as ( # select t.name, count(distinct t.id) cid, # count(distinct concat(t.name,'<'||p.name)) c2, # count(distinct concat(t.name,'<'||p.name,'<'||p2.name)) c3, # count(distinct concat(t.name,'<'||p.name,'<'||p2.name,'<'||p3.name)) c4 # from taxonomy t # left JOIN taxonomy p on t.parent_id=p.id # left JOIN taxonomy p2 on p.parent_id=p2.id # left JOIN taxonomy p3 on p2.parent_id=p3.id # group by t.name # having count(distinct t.id)>1 ) # ,nt as (select t.id,case when d.name is null then t.name # when cid=c2 then concat(t.name,'<'||p.name) # when cid=c3 then concat(t.name,'<'||p.name,'<'||p2.name) # else concat(t.name,'<'||p.name,'<'||p2.name,'<'||p3.name) # end newname # from taxonomy t # left JOIN duplicate d on t.name=d.name # left JOIN taxonomy p on t.parent_id=p.id # left JOIN taxonomy p2 on p.parent_id=p2.id # left JOIN taxonomy p3 on p2.parent_id=p3.id # where (t.id = any ( %(taxo)s ) or p.id = any ( %(taxo)s ) or p2.id = any(%(taxo)s) or p3.id = any(%(taxo)s) ) # ) # update public.taxonomy t set display_name=newname # from nt # where nt.id=t.id # # """ # database.ExecSQL(sql,{'taxo':TaxoList,'ts':datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')}) # on recalcule tous les doublons + ceux qui n'ont pas de noms + ceux ayant le même nom que ceux demandés dans leur lineage 3. sql = """with duplicate as (select lower(name) as name from taxonomy GROUP BY lower(name) HAVING count(*)>1) select t.id,t.name tname,p.name pname,p2.name p2name,p3.name p3name,t.display_name,t.taxostatus from taxonomy t left JOIN duplicate d on lower(t.name)=d.name left JOIN taxonomy p on t.parent_id=p.id left JOIN taxonomy p2 on p.parent_id=p2.id left JOIN taxonomy p3 on p2.parent_id=p3.id where d.name is not null or t.display_name is null or lower(t.name) in (select lower(st.name) from taxonomy st left JOIN taxonomy sp on st.parent_id=sp.id left JOIN taxonomy sp2 on sp.parent_id=sp2.id left JOIN taxonomy sp3 on sp2.parent_id=sp3.id where (st.id=any(%(taxo)s) or sp.id=any(%(taxo)s) or sp2.id=any(%(taxo)s) or sp3.id=any(%(taxo)s) ) ) """ Duplicates = database.GetAll(sql, {'taxo': TaxoList}, cursor_factory=psycopg2.extras.RealDictCursor) starttime = datetime.datetime.now() DStats = {} def AddToDefStat(clestat): clestat = clestat.lower() if clestat in DStats: DStats[clestat] += 1 else: DStats[clestat] = 1 for D in Duplicates: cle = ntcv(D['tname']) AddToDefStat(cle) cle += '<' + ntcv(D['pname']) AddToDefStat(cle) cle += '<' + ntcv(D['p2name']) AddToDefStat(cle) cle += '<' + ntcv(D['p3name']) AddToDefStat(cle) for i, D in enumerate(Duplicates): cle = ntcv(D['tname']) if DStats[cle.lower()] == 1: Duplicates[i]['newname'] = cle else: cle += '<' + ntcv(D['pname']) if DStats[cle.lower()] == 1: Duplicates[i]['newname'] = cle else: cle += '<' + ntcv(D['p2name']) if DStats[cle.lower()] == 1: Duplicates[i]['newname'] = cle else: cle += '<' + ntcv(D['p3name']) Duplicates[i]['newname'] = cle if D['taxostatus'] == 'D': Duplicates[i]['newname'] += " (Deprecated)" app.logger.debug("Compute time %s ", (datetime.datetime.now() - starttime).total_seconds()) starttime = datetime.datetime.now() UpdateParam = [] for D in Duplicates: if D['display_name'] != D['newname']: UpdateParam.append((int(D['id']), D['newname'])) if len(UpdateParam) > 0: cur = g.db.cursor() psycopg2.extras.execute_values( cur, """UPDATE taxonomy SET display_name = data.pdisplay_name,lastupdate_datetime=to_timestamp('{}','YYYY-MM-DD HH24:MI:SS') FROM (VALUES %s) AS data (pid, pdisplay_name) WHERE id = data.pid""".format( datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')), UpdateParam) cur.connection.commit() cur.close() app.logger.debug("Update time %s for %d rows", (datetime.datetime.now() - starttime).total_seconds(), len(UpdateParam))
def DoFullSync(): txt = "" try: UpdatableCols = [ 'parent_id', 'name', 'taxotype', 'taxostatus', 'id_source', 'id_instance', 'rename_to', 'display_name', 'source_desc', 'source_url', 'creation_datetime', 'creator_email' ] MaxUpdate = database.GetAll( "select coalesce(max(lastupdate_datetime),to_timestamp('2000-01-01','YYYY-MM-DD')) lastupdate from taxonomy" ) MaxUpdateDate = MaxUpdate[0]['lastupdate'] j = request_withinstanceinfo("/gettaxon/", { 'filtertype': 'since', 'startdate': MaxUpdateDate }) if 'msg' in j: return appli.ErrorFormat("Sync Error :" + j['msg']) NbrRow = len(j) NbrUpdate = NbrInsert = 0 txt += "Received {} rows<br>".format(NbrRow) if (NbrRow > 0): txt += "Taxo 0 = {}<br>".format(j[0]) for jtaxon in j: taxon = database.Taxonomy.query.filter_by( id=int(jtaxon['id'])).first() lastupdate_datetime = datetime.datetime.strptime( jtaxon['lastupdate_datetime'], '%Y-%m-%d %H:%M:%S') if taxon: if taxon.lastupdate_datetime == lastupdate_datetime: continue #already up to date NbrUpdate += 1 else: if ntcv(jtaxon['rename_to']) != '': continue # don't insert taxon that should be renamed if ntcv(jtaxon['taxostatus']) == 'D': continue # don't insert taxon that are deprecated and planned to be deleted NbrInsert += 1 taxon = database.Taxonomy() taxon.id = int(jtaxon['id']) db.session.add(taxon) for c in UpdatableCols: setattr(taxon, c, jtaxon[c]) taxon.lastupdate_datetime = lastupdate_datetime db.session.commit() # Manage rename_to sqlbase = "with taxorename as (select id,rename_to from taxonomy where rename_to is not null) " sql = sqlbase + """select distinct projid from obj_head o join taxorename tr on o.classif_id=tr.id """ ProjetsToRecalc = database.GetAll(sql) sql = sqlbase + """update obj_head o set classif_id=tr.rename_to from taxorename tr where o.classif_id=tr.id """ NbrRenamedObjects = ExecSQL(sql) sql = sqlbase + """update obj_head o set classif_auto_id=tr.rename_to from taxorename tr where o.classif_auto_id=tr.id """ ExecSQL(sql) sql = sqlbase + """update objectsclassifhisto o set classif_id=tr.rename_to from taxorename tr where o.classif_id=tr.id """ ExecSQL(sql) # on efface les taxon qui doivent être renomé car ils l'ont normalement été sql = """delete from taxonomy where rename_to is not null """ ExecSQL(sql) sql = """delete from taxonomy t where taxostatus='D' and not exists(select 1 from projects_taxo_stat where id=t.id) """ ExecSQL(sql) # il faut recalculer projects_taxo_stat et part_histocat,part_histocat_lst pour ceux qui referencaient un # taxon renomé et donc disparu if NbrRenamedObjects > 0: # cron.RefreshTaxoStat() operation trés longue (env 5 minutes en prod, il faut être plus selectif) # permet de recalculer projects_taxo_stat for Projet in ProjetsToRecalc: appli.project.main.RecalcProjectTaxoStat(Projet['projid']) #recalcul part_histocat,part_histocat_lst appli.part.prj.GlobalTaxoCompute() flash( "Received {} rows,Insertion : {} Update :{}".format( NbrRow, NbrInsert, NbrUpdate), "success") if gvp('updatestat') == 'Y': msg = DoSyncStatUpdate() flash("Taxon statistics update : " + msg, "success" if msg == 'ok' else 'error') # txt="<script>location.reload(true);</script>" # non car ça reprovoque le post de l'arrivée initiale txt = "<script>window.location=window.location;</script>" except: msg = "Error while syncing {}".format(sys.exc_info()) app.logger.error(msg) txt += appli.ErrorFormat(msg) return txt
def QuestionProcess(self): if not (current_user.has_role(database.AdministratorLabel) or current_user.has_role(database.ProjectCreatorLabel)): return PrintInCharte( "ACCESS DENIED for this feature, Admin or privilege creation Required" ) ServerRoot = Path(app.config['SERVERLOADAREA']) txt = "<h1>Database Importation Task</h1>" errors = [] if gvg("restart") == 'Y': # force redemarrage self.task.taskstep = 1 self.UpdateParam() if self.task.taskstep == 0: # ################## Question Creation txt += "<h3>Task Creation</h3>" if gvp('starttask') == "Y": FileToSave = None FileToSaveFileName = None # Verifier la coherence des données uploadfile = request.files.get("uploadfile") if uploadfile is not None and uploadfile.filename != '': # import d'un fichier par HTTP FileToSave = uploadfile # La copie est faite plus tard, car à ce moment là, le repertoire de la tache n'est pas encore créé FileToSaveFileName = "uploaded.zip" self.param.InData = "uploaded.zip" elif len(gvp("ServerPath")) < 2: errors.append("Input Folder/File Too Short") else: sp = ServerRoot.joinpath(Path(gvp("ServerPath"))) if not sp.exists(): #verifie que le repertoire existe errors.append("Input Folder/File Invalid") else: self.param.InData = sp.as_posix() if len(errors) > 0: for e in errors: flash(e, "error") else: return self.StartTask( self.param, FileToSave=FileToSave, FileToSaveFileName=FileToSaveFileName) return render_template('task/importdb_create.html', header=txt, data=self.param, ServerPath=gvp("ServerPath")) newschema = self.GetWorkingSchema() # self.task.taskstep=1 if self.task.taskstep == 1: # ################## Question Post Import DB if gvg("src") == "": # il faut choisir le projet source txt += "<h3>Select project to import</h3>" PrjList = GetAll( "select projid,title,status from {0}.projects order by lower(title)" .format(newschema), cursor_factory=None) txt += """<table class='table table-condensed table-bordered' style='width:600px;'>""" for r in PrjList: txt += "<tr><td><a class='btn btn-primary' href='?src={0}'>Select {0}</a></td><td>{1}</td><td>{2}</td></tr>".format( *r) txt += """</table>""" return PrintInCharte(txt) if gvg("dest") == "": # il faut choisir le projet destination if current_user.has_role(database.AdministratorLabel): PrjList = GetAll( "select projid,title,status from projects order by lower(title)", cursor_factory=None) else: PrjList = GetAll("""select p.projid,title,status from projects p join projectspriv pp on p.projid = pp.projid where member=%s and privilege='Manage' order by lower(title)""", [current_user.id], cursor_factory=None) txt += "<h3>Select project destination project</h3> or <a class='btn btn-primary' href='?src={0}&dest=new'>New project</a>".format( gvg("src")) txt += """<table class='table table-condensed table-bordered' style='width:600px;'>""" for r in PrjList: txt += "<tr><td><a class='btn btn-primary' href='?src={0}&dest={1}'>Select {1}</a></td><td>{2}</td><td>{3}</td></tr>".format( gvg("src"), *r) txt += """</table>""" return PrintInCharte(txt) if gvg( "prjok" ) == "": # Creation du projet destination ou MAJ des attributs & detection des Taxo&Users Founds SrcPrj = GetAll( "select * from {0}.projects where projid={1}".format( newschema, gvg("src")), cursor_factory=RealDictCursor) if gvg("dest") == "new": # Creation du projet destination Prj = database.Projects() Prj.title = "IMPORT " + SrcPrj[0]['title'] db.session.add(Prj) db.session.commit() PrjPriv = database.ProjectsPriv() PrjPriv.projid = Prj.projid PrjPriv.member = current_user.id PrjPriv.privilege = "Manage" db.session.add(PrjPriv) else: # MAJ du projet Prj = database.Projects.query.filter_by( projid=int(gvg("dest"))).first() NbrObj = GetAll( "select count(*) from obj_head WHERE projid=" + gvg("dest"))[0][0] if NbrObj > 0: flash("Destination project must be empty", 'error') return PrintInCharte( "<a href='#' onclick='history.back();'>Back</a>") for k, v in SrcPrj[0].items(): if k not in ("projid", "title"): setattr(Prj, k, v) db.session.commit() flash( "Project %s:%s created or updated successfuly" % (Prj.projid, Prj.title), 'success') # Controle du mapping Taxo sql = """select DISTINCT t.id,lower(t.name) as name,t.parent_id,lower(t.name)||' ('||coalesce(lower(t2.name),'No Parent')||')' as namefull from {0}.obj_head o join {0}.taxonomy t on o.classif_id=t.id left join {0}.taxonomy t2 on t.parent_id=t2.id where o.projid={1} and t.newid is null union select DISTINCT t.id,lower(t.name) as name,t.parent_id,lower(t.name)||' ('||coalesce(lower(t2.name),'No Parent')||')' as namefull from {0}.obj_head o join {0}.taxonomy t on o.classif_auto_id=t.id left join {0}.taxonomy t2 on t.parent_id=t2.id left join taxonomy tt on t.newid =tt.id where o.projid={1} and tt.id is null order by 2""".format(newschema, gvg("src")) self.param.TaxoFound = GetAssoc(sql, cursor_factory=RealDictCursor, keyid='id', debug=False) app.logger.info("TaxoFound=%s", self.param.TaxoFound) TaxoInDest = GetAssoc2Col( """select t.id,lower(t.name)||' ('||coalesce(lower(t2.name),'No Parent')||')' as name from taxonomy t left join taxonomy t2 on t.parent_id=t2.id where t.id = any (%s)""", (list(self.param.TaxoFound.keys()), )) # print(TaxoInDest) for id, v in self.param.TaxoFound.items(): self.param.TaxoFound[id][ 'newid'] = None # par defaut pas de correspondance if id in TaxoInDest: if TaxoInDest[id].lower() == v['namefull']: self.param.TaxoFound[id][ 'newid'] = id # ID inchangé lst = [ t["name"] for t in self.param.TaxoFound.values() if t["newid"] is None ] # liste des Taxon sans mapping TaxoInDest = GetAssoc2Col( """select lower(t.name)||' ('||coalesce(lower(t2.name),'No Parent')||')' as name,t.id from taxonomy t left join taxonomy t2 on t.parent_id=t2.id where lower(t.name) = any (%s)""", (lst, )) for id, v in self.param.TaxoFound.items(): if v['newid'] is None: if v["namefull"] in TaxoInDest: self.param.TaxoFound[id]['newid'] = TaxoInDest[v[ "namefull"]] # ID du même nom dans la base de destination NotFoundTaxo = [ t["name"] for t in self.param.TaxoFound.values() if t["newid"] is None ] # liste des Taxon sans mapping restant app.logger.info("NotFoundTaxo=%s", NotFoundTaxo) # Controle du mapping utilisateur sql = """select DISTINCT lower(t.name) as name,lower(email) as email,t.password,t.organisation from {0}.obj_head o join {0}.users t on o.classif_who=t.id where o.projid={1} and t.newid is null union select DISTINCT lower(t.name) as name,lower(email) as email,t.password,t.organisation from {0}.projectspriv pp join {0}.users t on pp.member=t.id where pp.projid={1} and t.newid is null """.format(newschema, gvg("src")) self.param.UserFound = GetAssoc(sql, cursor_factory=RealDictCursor, keyid='name') self.pgcur = db.engine.raw_connection().cursor() self.pgcur.execute( "select id,lower(name),lower(email) from users where lower(name) = any(%s) or email= any(%s) ", (list(self.param.UserFound.keys()), [x.get('email') for x in self.param.UserFound.values()])) # Résolution des noms à partir du nom ou de l'email for rec in self.pgcur: for u in self.param.UserFound: if u == rec[1] or self.param.UserFound[u].get( 'email') == rec[2]: self.param.UserFound[u]['id'] = rec[0] logging.info("Users Found = %s", self.param.UserFound) NotFoundUser = [ k for k, v in self.param.UserFound.items() if v.get("id") == None ] if len(NotFoundUser) > 0: logging.info("Some Users Not Found = %s", NotFoundUser) self.UpdateParam() # On met à jour la Taxo if len(NotFoundUser) > 0 or len(NotFoundTaxo) > 0: txt += "<a class='btn btn-primary' href='?src={0}&dest={1}&prjok=1'>Continue</a>".format( gvg("src"), Prj.projid) else: txt += "<a class='btn btn-primary' href='?src={0}&dest={1}&prjok=2'>Continue</a>".format( gvg("src"), Prj.projid) return PrintInCharte(txt) if gvg("prjok") == "1" or gvg( "prjok" ) == "2": # 2 pas de mapping requis, simule un start task NotFoundTaxo = [ t["name"] for t in self.param.TaxoFound.values() if t["newid"] is None ] # liste des Taxon sans mapping restant NotFoundUsers = [ k for k, v in self.param.UserFound.items() if v.get("id") == None ] app.logger.info("TaxoFound=%s", self.param.TaxoFound) app.logger.info("NotFoundTaxo=%s", NotFoundTaxo) app.logger.info("NotFoundUser=%s", NotFoundUsers) if gvp('starttask') == "Y" or gvg("prjok") == "2": app.logger.info("Form Data = %s", request.form) # Traitement des reponses sur la taxonomy for i in range(1, 1 + len(NotFoundTaxo)): orig = gvp( "orig%d" % (i) ) #Le nom original est dans origXX et la nouvelle valeur dans taxolbXX origname = gvp( "origname%d" % (i) ) #Le nom original est dans origXX et la nouvelle valeur dans taxolbXX action = gvp("action%d" % (i)) newvalue = gvp("taxolb%d" % (i)) if origname in NotFoundTaxo and action != "": if action == "M": if newvalue == "": errors.append( "Taxonomy Manual Mapping : No Value Selected for '%s'" % (orig, )) else: t = database.Taxonomy.query.filter( database.Taxonomy.id == int( newvalue)).first() app.logger.info(orig + " associated to " + t.name) self.param.TaxoFound[orig]['newid'] = t.id elif action == "U": #create Under if newvalue == "": errors.append( "Taxonomy Manual Mapping : No Parent Value Selected for '%s'" % (orig, )) else: t = database.Taxonomy() t.name = origname t.parent_id = int(newvalue) db.session.add(t) db.session.commit() self.param.TaxoFound[orig]['newid'] = t.id app.logger.info(orig + " created under " + t.name) else: errors.append( "Taxonomy Manual Mapping : No Action Selected for '%s'" % (orig, )) else: errors.append( "Taxonomy Manual Mapping : Invalid value '%s' for '%s'" % (newvalue, orig)) # Traitement des reponses sur les utilisateurs for i in range(1, 1 + len(NotFoundUsers)): orig = gvp( "origuser%d" % (i) ) #Le nom original est dans origuserXX et la nouvelle valeur dans userlbXX action = gvp("useraction%d" % (i)) newvalue = gvp("userlb%d" % (i)) if orig in NotFoundUsers and (newvalue != "" or action == "C"): if action == "C": u = database.users() u.email = self.param.UserFound[orig]["email"] u.name = self.param.UserFound[orig]["name"] u.password = self.param.UserFound[orig][ "password"] u.organisation = ntcv( self.param.UserFound[orig]["organisation"] ) #+" Imported on "+datetime.datetime.now().strftime("%Y-%m-%d %H:%M") u.active = True db.session.add(u) db.session.commit() else: u = database.users.query.filter( database.users.id == int( newvalue)).first() app.logger.info("User " + orig + " associated to " + u.name) self.param.UserFound[orig]['id'] = u.id else: errors.append( "User Manual Mapping : Invalid value '%s' for '%s'" % (newvalue, orig)) app.logger.info("Final Taxofound = %s", self.param.TaxoFound) self.param.ProjectId = int(gvg("dest")) self.param.ProjectSrcId = int(gvg("src")) self.param.IntraStep = 1 self.UpdateParam() # On met à jour ce qui à été accepté # Verifier la coherence des données if len(errors) == 0: return self.StartTask(self.param, step=2) for e in errors: flash(e, "error") NotFoundTaxo = [ v["name"] for k, v in self.param.TaxoFound.items() if v["newid"] == None ] NotFoundUsers = [ k for k, v in self.param.UserFound.items() if v.get('id') == None ] app.logger.info("Final NotFoundTaxo = %s", NotFoundTaxo) NotFoundTaxoForTemplate = [{ 'id': v["id"], 'name': v["name"], 'namefull': v["namefull"] } for k, v in self.param.TaxoFound.items() if v["newid"] == None] return render_template('task/importdb_question1.html', header=txt, taxo=NotFoundTaxoForTemplate, users=NotFoundUsers) return PrintInCharte(txt) if self.task.taskstep == 2: # ################## Question Post Import Effectif d'un projet # Propose de voir le projet, de cleanner, ou d'importer un autre projet return PrintInCharte(self.GetDoneExtraAction())
def SPStep2(self): # raise Exception("TEST") logging.info("Start Step 2 : Effective data import") logging.info("Taxo Mapping = %s", self.param.TaxoFound) logging.info("Users Mapping = %s", self.param.UserFound) AstralCache = { 'date': None, 'time': None, 'long': None, 'lat': None, 'r': '' } # Mise à jour du mapping en base Prj = database.Projects.query.filter_by( projid=self.param.ProjectId).first() LoadedFiles = ntcv(Prj.fileloaded).splitlines() logging.info("LoadedFiles = %s", LoadedFiles) Prj.mappingobj = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'obj_field' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) Prj.mappingsample = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'sample' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) Prj.mappingacq = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'acq' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) Prj.mappingprocess = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'process' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) db.session.commit() Ids = { "acq": { "tbl": "acquisitions", "pk": "acquisid" }, "sample": { "tbl": "samples", "pk": "sampleid" }, "process": { "tbl": "process", "pk": "processid" } } #recupération des orig_id des acq,sample,process for i in Ids: sql = "select orig_id," + Ids[i]['pk'] + " from " + Ids[i][ 'tbl'] + " where projid=" + str(self.param.ProjectId) Ids[i]["ID"] = {} for r in GetAll(sql): Ids[i]["ID"][r[0]] = int(r[1]) # recuperation de les ID objet du projet self.ExistingObject = {} self.pgcur.execute( "SELECT o.orig_id,o.objid from objects o where o.projid=" + str(self.param.ProjectId)) for rec in self.pgcur: self.ExistingObject[rec[0]] = rec[1] # recuperation de toutes les paire objet/Images du projet self.ExistingObjectAndImage = set() if self.param.SkipObjectDuplicate == 'Y': # cette liste n'est necessaire qui si on ignore les doublons self.pgcur.execute( # et doit être recahrgée depuis la base, car la phase 1 y a ajouté tous les objets pour le contrôle des doublons "SELECT concat(o.orig_id,'*',i.orig_file_name) from images i join objects o on i.objid=o.objid where o.projid=" + str(self.param.ProjectId)) for rec in self.pgcur: self.ExistingObjectAndImage.add(rec[0]) #logging.info("Ids = %s",Ids) random.seed() sd = Path(self.param.SourceDir) TotalRowCount = 0 for filter in ("**/ecotaxa*.txt", "**/ecotaxa*.tsv"): for CsvFile in sd.glob(filter): relname = CsvFile.relative_to( sd) # Nom relatif à des fins d'affichage uniquement if relname.as_posix( ) in LoadedFiles and self.param.SkipAlreadyLoadedFile == 'Y': logging.info("File %s skipped, already loaded" % (relname.as_posix())) continue logging.info("Analyzing file %s" % (relname.as_posix())) with open(CsvFile.as_posix(), encoding='latin_1') as csvfile: # lecture en mode dictionnaire basé sur la premiere ligne rdr = csv.DictReader(csvfile, delimiter='\t', quotechar='"') #lecture la la ligne des types (2nd ligne du fichier LType = { champ.strip(" \t").lower(): v for champ, v in rdr.__next__().items() } ListeChamps = [ champ.strip(" \t").lower() for champ in rdr.fieldnames ] # Chargement du contenu du fichier RowCount = 0 for rawlig in rdr: lig = { champ.strip(" \t").lower(): v for champ, v in rawlig.items() } Objs = { "acq": database.Acquisitions(), "sample": database.Samples(), "process": database.Process(), "obj_head": database.Objects(), "obj_field": database.ObjectsFields(), "image": database.Images() } RowCount += 1 TotalRowCount += 1 if 'object_annotation_category_id' in ListeChamps and 'object_annotation_category' in ListeChamps: if CleanValue( lig.get('object_annotation_category_id', '')) != '': del lig[ 'object_annotation_category'] # s'il y a un ID on ignore le texte for champ in ListeChamps: m = self.param.Mapping.get(champ, None) if m is None: continue # Le champ n'est pas considéré FieldName = m.get("field", None) FieldTable = m.get("table", None) FieldValue = None v = CleanValue(lig.get(champ)) if v != "": # si pas de valeurs, on laisse le champ null if m['type'] == 'n': FieldValue = ToFloat(v) elif champ == 'object_date': FieldValue = datetime.date( int(v[0:4]), int(v[4:6]), int(v[6:8])) elif champ == 'object_time': v = v.zfill(6) FieldValue = datetime.time( int(v[0:2]), int(v[2:4]), int(v[4:6])) elif FieldName == 'classif_when': v2 = CleanValue( lig.get('object_annotation_time', '000000')).zfill(6) FieldValue = datetime.datetime( int(v[0:4]), int(v[4:6]), int(v[6:8]), int(v2[0:2]), int(v2[2:4]), int(v2[4:6])) elif FieldName == 'classif_id': # pour la version numerique, c'est traité par if type=n v = self.param.TaxoMap.get( v.lower(), v ) # Applique le mapping initial d'entrée FieldValue = self.param.TaxoFound[ntcv( v).lower()] elif FieldName == 'classif_who': FieldValue = self.param.UserFound[ntcv( v).lower()].get('id', None) elif FieldName == 'classif_qual': FieldValue = database.ClassifQualRevert.get( v.lower()) else: # c'est un champ texte sans rien de special FieldValue = v if FieldTable in Objs: # if FieldName in Objs[FieldTable].__dict__: if hasattr(Objs[FieldTable], FieldName): setattr(Objs[FieldTable], FieldName, FieldValue) # logging.info("setattr %s %s %s",FieldTable,FieldName,FieldValue) # else: # logging.info("skip F %s %s %s",FieldTable,FieldName,FieldValue) else: logging.info("skip T %s %s %s", FieldTable, FieldName, FieldValue) # Calcul de la position du soleil if not (AstralCache['date']==Objs["obj_head"].objdate and AstralCache['time']==Objs["obj_head"].objtime \ and AstralCache['long']==Objs["obj_head"].longitude and AstralCache['lat']==Objs["obj_head"].latitude) : AstralCache = { 'date': Objs["obj_head"].objdate, 'time': Objs["obj_head"].objtime, 'long': Objs["obj_head"].longitude, 'lat': Objs["obj_head"].latitude, 'r': '' } from astral import AstralError try: AstralCache['r'] = appli.CalcAstralDayTime( AstralCache['date'], AstralCache['time'], AstralCache['lat'], AstralCache['long']) except AstralError as e: # dans certains endoit du globe il n'y a jamais de changement nuit/jour certains jours, ca provoque une erreur app.logger.error("Astral error : %s for %s", e, AstralCache) except Exception as e: # autre erreurs par exemple si l'heure n'est pas valide; app.logger.error("Astral error : %s for %s", e, AstralCache) Objs["obj_head"].sunpos = AstralCache['r'] # Affectation des ID Sample, Acq & Process et creation de ces dernier si necessaire for t in Ids: if Objs[t].orig_id is not None: if Objs[t].orig_id in Ids[t]["ID"]: setattr(Objs["obj_head"], Ids[t]["pk"], Ids[t]["ID"][Objs[t].orig_id]) else: Objs[t].projid = self.param.ProjectId db.session.add(Objs[t]) db.session.commit() Ids[t]["ID"][Objs[t].orig_id] = getattr( Objs[t], Ids[t]["pk"]) setattr(Objs["obj_head"], Ids[t]["pk"], Ids[t]["ID"][Objs[t].orig_id]) logging.info("IDS %s %s", t, Ids[t]) self.pgcur.execute("select nextval('seq_images')") Objs["image"].imgid = self.pgcur.fetchone()[0] CleExistObj = Objs["obj_field"].orig_id + '*' + Objs[ "image"].orig_file_name if self.param.SkipObjectDuplicate == 'Y' and CleExistObj in self.ExistingObjectAndImage: continue # Recherche de l'objet si c'est une images complementaire if Objs["obj_field"].orig_id in self.ExistingObject: Objs["obj_head"].objid = self.ExistingObject[ Objs["obj_field"].orig_id] else: # ou Creation de l'objet Objs["obj_head"].projid = self.param.ProjectId Objs["obj_head"].random_value = random.randint( 1, 99999999) Objs["obj_head"].img0id = Objs["image"].imgid db.session.add(Objs["obj_head"]) db.session.commit() Objs["obj_field"].objfid = Objs["obj_head"].objid db.session.add(Objs["obj_field"]) db.session.commit() self.ExistingObject[ Objs["obj_field"].orig_id] = Objs[ "obj_head"].objid # Provoque un select object sauf si 'expire_on_commit':False #Gestion de l'image, creation DB et fichier dans Vault Objs["image"].objid = Objs["obj_head"].objid ImgFilePath = CsvFile.with_name( Objs["image"].orig_file_name) VaultFolder = "%04d" % (Objs["image"].imgid // 10000) vaultroot = Path("../../vault") #creation du repertoire contenant les images si necessaire CreateDirConcurrentlyIfNeeded( vaultroot.joinpath(VaultFolder)) vaultfilename = "%s/%04d%s" % ( VaultFolder, Objs["image"].imgid % 10000, ImgFilePath.suffix) vaultfilenameThumb = "%s/%04d_mini%s" % ( VaultFolder, Objs["image"].imgid % 10000, '.jpg' ) #on Impose le format de la miniature Objs["image"].file_name = vaultfilename #copie du fichier image shutil.copyfile( ImgFilePath.as_posix(), vaultroot.joinpath(vaultfilename).as_posix()) im = Image.open( vaultroot.joinpath(vaultfilename).as_posix()) Objs["image"].width = im.size[0] Objs["image"].height = im.size[1] SizeLimit = app.config['THUMBSIZELIMIT'] # génération d'une miniature si une image est trop grande. if (im.size[0] > SizeLimit) or (im.size[1] > SizeLimit): im.thumbnail((SizeLimit, SizeLimit)) if im.mode == 'P': im = im.convert("RGB") im.save( vaultroot.joinpath( vaultfilenameThumb).as_posix()) Objs["image"].thumb_file_name = vaultfilenameThumb Objs["image"].thumb_width = im.size[0] Objs["image"].thumb_height = im.size[1] #ajoute de l'image en DB if Objs["image"].imgrank is None: Objs["image"].imgrank = 0 # valeur par defaut db.session.add(Objs["image"]) db.session.commit() if (TotalRowCount % 100) == 0: self.UpdateProgress( 100 * TotalRowCount / self.param.TotalRowCount, "Processing files %d/%d" % (TotalRowCount, self.param.TotalRowCount)) logging.info("File %s : %d row Loaded", relname.as_posix(), RowCount) LoadedFiles.append(relname.as_posix()) Prj.fileloaded = "\n".join(LoadedFiles) db.session.commit() self.pgcur.execute("""update obj_head o set imgcount=(select count(*) from images where objid=o.objid) ,img0id=(select imgid from images where objid=o.objid order by imgrank asc limit 1 ) where projid=""" + str(self.param.ProjectId)) self.pgcur.connection.commit() self.pgcur.execute( """update samples s set latitude=sll.latitude,longitude=sll.longitude from (select o.sampleid,min(o.latitude) latitude,min(o.longitude) longitude from obj_head o where projid=%(projid)s and o.latitude is not null and o.longitude is not null group by o.sampleid) sll where s.sampleid=sll.sampleid and projid=%(projid)s """, {'projid': self.param.ProjectId}) self.pgcur.connection.commit() appli.project.main.RecalcProjectTaxoStat(Prj.projid) appli.project.main.UpdateProjectStat(Prj.projid) self.task.taskstate = "Done" self.UpdateProgress(100, "Processing done")
def objectdetails(objid): #récuperation et ajustement des dimensions de la zone d'affichage try: PageWidth = int(gvg( "w")) - 40 # on laisse un peu de marge à droite et la scroolbar if PageWidth < 200: PageWidth = 20000 WindowHeight = int(gvg("h")) - 40 # on laisse un peu de marge en haut if WindowHeight < 200: WindowHeight = 20000 except: PageWidth = 20000 WindowHeight = 20000 obj = database.Objects.query.filter_by(objid=objid).first() t = list() # Dans cet écran on utilise ElevateZoom car sinon en mode popup il y a conflit avec les images sous la popup t.append("<script src='/static/jquery.elevatezoom.js'></script>") Prj = obj.project if Prj.visible == False and not Prj.CheckRight( 0): # Level 0 = Read, 1 = Annotate, 2 = Admin flash('You cannot view this project', 'error') return PrintInCharte("<a href=/>Back to home</a>") g.Projid = Prj.projid PrjManager = [(m.memberrel.email, m.memberrel.name) for m in Prj.projmembers if m.privilege == 'Manage'] t.append("<p>Project: <b><a href='/prj/%d'>%s</a></b> (managed by : %s)" % (Prj.projid, XSSEscape(Prj.title), ",".join( ("<a href ='mailto:%s'>%s</a>" % m for m in PrjManager)))) if len(PrjManager) > 0: t.append( "<br>To report a mistake, contact <a href ='mailto:{0}?subject=Ecotaxa%20mistake%20notification&body={2}'>{1}</a>" .format( PrjManager[0][0], PrjManager[0][1], urllib.parse.quote( "Hello,\n\nI have discovered a mistake on this page " + request.base_url + "\n"))) # //window.location="mailto:?subject=Ecotaxa%20page%20share&body="+encodeURIComponent("Hello,\n\nAn Ecotaxa user want share this page with you \n"+url); t.append("</p><p>Classification :") if obj.classif: t.append("<br> <b>%s</b>" % XSSEscape(obj.classif.display_name)) TaxoHierarchie = (r[0] for r in GetAll( """WITH RECURSIVE rq(id,name,parent_id) as ( select id,name,parent_id FROM taxonomy where id =%(taxoid)s union SELECT t.id,t.name,t.parent_id FROM rq JOIN taxonomy t ON t.id = rq.parent_id ) select name from rq""", {"taxoid": obj.classif.id})) t.append("<br> " + (" < ".join(TaxoHierarchie)) + " (id=%s)" % obj.classif_id) else: t.append("<br> <b>Unknown</b>") if obj.classiffier is not None: t.append( "<br> %s " % (database.ClassifQual.get(obj.classif_qual, "To be classified"))) t.append(" by %s (%s) " % (obj.classiffier.name, obj.classiffier.email)) if obj.classif_when is not None: t.append(" on %s " % (obj.classif_when.strftime("%Y-%m-%d %H:%M"))) t.append("</p>") if obj.objfrel.object_link is not None: t.append("<p>External link :<a href='{0}' target=_blank> {0}</a></p>". format(obj.objfrel.object_link)) t.append( "<table><tr><td valign=top>Complementaty information <a href='javascript:gotocommenttab();' > ( edit )</a>: </td><td> <span id=spancomplinfo> {0}</span></td></tr></table>" .format(ntcv(obj.complement_info).replace('\n', '<br>\n'))) # On affiche la liste des images, en selectionnant une image on changera le contenu de l'image Img1 + Redim # l'approche avec des onglets de marchait pas car les images sont superposées obj.images.sort(key=lambda x: x.imgrank) t.append("""<p>Image list : """) for img in obj.images: (width, height) = ComputeLimitForImage(img.width, img.height, PageWidth, WindowHeight) if img.thumb_file_name: minifile = img.thumb_file_name (miniwidth, miniheight) = ComputeLimitForImage(img.thumb_width, img.thumb_height, 30, 30) else: minifile = img.file_name (miniwidth, miniheight) = ComputeLimitForImage(img.width, img.height, 30, 30) t.append( """<a href="javascript:SwapImg1('{1}',{2},{3});" >{0} <img src=/vault/{4} width={5} height={6}></a> """ .format(img.imgrank + 1, img.file_name, width, height, minifile, miniwidth, miniheight)) # Ajout de la 1ère image (width, height) = ComputeLimitForImage(obj.images[0].width, obj.images[0].height, PageWidth, WindowHeight) t.append( "</p><p><img id=img1 src=/vault/{1} data-zoom-image=/vault/{1} width={2} height={0}></p>" .format(height, obj.images[0].file_name, width)) # Affichage de l'onglet de classification if Prj.CheckRight(1): t.append(""" <table><tr><td>Set a new classification :</td> <td style="width: 230px;"> <div class="input-group"> <select id="taxolbpop" name="taxolbpop" style="width: 200px" class='taxolb' > </select>""" ) if gvg("ajax", "0") == "0": t.append("""<span class="input-group-btn"> <button class="btn btn-default btn-sm" type="button" data-toggle="modal" data-target="#TaxoModal" data-mytargetid="taxolbpop" title="Search on Taxonomy Tree"> <span id=OpenTaxoLB class="glyphicon glyphicon-th-list" aria-hidden="true"/></button> </span>""") else: t.append("<br>") t.append("""</div><!-- /input-group --> <span id=PendingChangesPop></span></td><td width=30px></td><td valign=top> <button type="button" class="btn btn-success btn-xs" onclick="Save1Object('V');">Save as Validated</button> <button type="button" class="btn btn-warning btn-xs" onclick="Save1Object('D');">Save as dubious</button> <button id=btenableedit type="button" class="btn btn-gris btn-xs" onclick="EnableEdit();">Enable Editing</button> <button type="button" class="btn btn-default btn-xs" onclick="$('#PopupDetails').modal('hide');">Close</button> </td></tr></table> """) # Ajout des Onglets sous l'image t.append("""<br><div><ul class="nav nav-tabs" role="tablist"> <li role="presentation" class="active"><a href="#tabdobj" aria-controls="tabdobj" role="tab" data-toggle="tab"> Object details</a></li> <li role="presentation" ><a href="#tabdsample" aria-controls="tabdsample" role="tab" data-toggle="tab"> Sample details</a></li> <li role="presentation" ><a href="#tabdacquis" aria-controls="tabdacquis" role="tab" data-toggle="tab"> Acquisition details</a></li> <li role="presentation" ><a href="#tabdprocessrel" aria-controls="tabdprocess" role="tab" data-toggle="tab"> Processing details</a></li> <li role="presentation" ><a href="#tabdclassiflog" aria-controls="tabdclassiflog" role="tab" data-toggle="tab">Classification change log</a></li> <li role="presentation" ><a href="#tabdmap" aria-controls="tabdmap" role="tab" data-toggle="tab" id=atabdmap style="background: #5CB85C;color:white;">Map</a></li> """) if Prj.CheckRight(1): t.append( """<li role="presentation" ><a id=linktabdaddcomments href="#tabdaddcomments" aria-controls="tabdaddcomments" role="tab" data-toggle="tab">Edit complementary informations</a></li>""" ) if obj.classif_auto: classif_auto_name = obj.classif_auto.name if obj.classif_auto_score: classif_auto_name += " (%0.3f)" % (obj.classif_auto_score, ) else: classif_auto_name = '' t.append("""</ul> <div class="tab-content"> <div role="tabpanel" class="tab-pane active" id="tabdobj"> <table class='table table-bordered table-condensed' data-table='object'><tr> <td style=' background-color: #f2f2f2;' data-edit='longitude'><b>longitude</td><td>{0}</td> <td style=' background-color: #f2f2f2;' data-edit='latitude'><b>latitude</td><td>{1}</td> <td style=' background-color: #f2f2f2;' data-edit='objdate'><b>Date</td><td>{2}</td> <td style=' background-color: #f2f2f2;'><b>Time (daytime)</td><td>{3} ({10})</td> </tr><tr><td style=' background-color: #f2f2f2;' data-edit='depth_min'><b>Depth min</td><td>{4}</td> <td style=' background-color: #f2f2f2;' data-edit='depth_max'><b>Depth max</td><td>{5}</td> <td><b>Classif auto</td><td>{6}</td><td><b>Classif auto when</td><td>{7}</td> </tr><tr><td><b>Object #</td><td>{8}</td> <td data-edit='orig_id'><b>Original Object ID</td><td colspan=5>{9}</td></tr><tr>""" .format(nonetoformat(obj.longitude, '.5f'), nonetoformat(obj.latitude, '.5f'), obj.objdate, obj.objtime, obj.depth_min, obj.depth_max, classif_auto_name, obj.classif_auto_when, objid, obj.objfrel.orig_id, database.DayTimeList.get(obj.sunpos, '?'))) cpt = 0 # Insertion des champs object for k, v in collections.OrderedDict( sorted(DecodeEqualList(Prj.mappingobj).items())).items(): if cpt > 0 and cpt % 4 == 0: t.append("</tr><tr>") cpt += 1 t.append("<td data-edit='{2}'><b>{0}</td><td>{1}</td>".format( v, ScaleForDisplay(getattr(obj.objfrel, k, "???")), k)) t.append("</tr></table></div>") # insertion des champs Sample, Acquisition & Processing dans leurs onglets respectifs for r in (("Sample", "mappingsample", "sample"), ("Acquisition", "mappingacq", "acquis"), ("Processing", "mappingprocess", "processrel")): t.append( '<div role="tabpanel" class="tab-pane" id="tabd' + r[2] + '">' + r[0] + " details :<table class='table table-bordered table-condensed' data-table='" + r[2] + "'><tr>") cpt = 0 if getattr(obj, r[2]): if r[2] == "sample": t.append( "<td data-edit='orig_id'><b>{0}</td><td colspan=3>{1}</td><td data-edit='longitude'><b>{2}</td><td>{3}</td><td data-edit='latitude'><b>{4}</td><td>{5}</td></tr><tr>" .format( "Original ID", ScaleForDisplay(obj.sample.orig_id), "longitude", ScaleForDisplay(obj.sample.longitude), "latitude", ScaleForDisplay(obj.sample.latitude), )) elif r[2] == "acquis": t.append( "<td data-edit='orig_id'><b>{0}</td><td colspan=3>{1}</td><td data-edit='instrument'><b>{2}</td><td>{3}</td></tr><tr>" .format( "Original ID", ScaleForDisplay(obj.acquis.orig_id), "Instrument", ScaleForDisplay(obj.acquis.instrument), )) else: t.append( "<td data-edit='orig_id'><b>{0}</td><td>{1}</td></tr><tr>". format( "Original ID.", ScaleForDisplay( getattr(getattr(obj, r[2]), "orig_id", "???")))) for k, v in collections.OrderedDict( sorted(DecodeEqualList(getattr(Prj, r[1])).items())).items(): if cpt > 0 and cpt % 4 == 0: t.append("</tr><tr>") cpt += 1 t.append("<td data-edit='{2}'><b>{0}</td><td>{1}</td>".format( v, ScaleForDisplay(getattr(getattr(obj, r[2]), k, "???")), k)) if r[2] == "sample": t.append( "</tr><tr><td><b>{0}</td><td colspan=7>{1}</td></tr><tr>". format( "Dataportal Desc.", ScaleForDisplay( html.escape(ntcv( obj.sample.dataportal_descriptor))))) else: t.append("<td>No {0}</td>".format(r[0])) t.append("</tr></table></div>") # Affichage de l'historique des classification t.append("""<div role="tabpanel" class="tab-pane" id="tabdclassiflog"> Current Classification : Quality={} , date={} <table class='table table-bordered table-condensed'><tr> <td>Date</td><td>Type</td><td>Taxo</td><td>Author</td><td>Quality</td></tr>""" .format(obj.classif_qual, obj.classif_when)) Histo = GetAll( """SELECT to_char(classif_date,'YYYY-MM-DD HH24:MI:SS') datetxt,classif_type ,t.display_name as name,u.name username,classif_qual from objectsclassifhisto h left join taxonomy t on h.classif_id=t.id LEFT JOIN users u on u.id = h.classif_who WHERE objid=%(objid)s order by classif_date desc""", {"objid": objid}, doXSSEscape=True) for r in Histo: t.append("<tr><td>" + ("</td><td>".join([ str(r[x]) if r[x] else "-" for x in ("datetxt", "classif_type", "name", "username", "classif_qual") ])) + "</td></tr>") t.append("</table></div>") if Prj.CheckRight(1): t.append("""<div role="tabpanel" class="tab-pane" id="tabdaddcomments"> <textarea id=compinfo rows=5 cols=120 autocomplete=off>%s</textarea><br> <button type="button" class='btn btn-primary' onclick="UpdateComment();">Save additional comment</button> <span id=ajaxresultcomment></span> """ % (ntcv(obj.complement_info), )) t.append("</div>") # Affichage de la carte t.append(""" <div role="tabpanel" class="tab-pane" id="tabdmap"> <div id="map2" class="map2" style="width: 100%; height: 450px;"> Displaying Map requires Internet Access to load map from https://server.arcgisonline.com </div>""") t.append("</table></div>") t.append( render_template("common/objectdetailsscripts.html", Prj=Prj, objid=objid, obj=obj)) # En mode popup ajout en haut de l'écran d'un hyperlien pour ouvrir en fenete isolée # Sinon affichage sans lien dans la charte. if gvg("ajax", "0") == "1": return """<table width=100% style='margin: 3px'><tr><td><a href='/objectdetails/{0}?w={1}&h={2}' target=_blank><b>Open in a separate window</b> (right click to copy link)</a> </td><td align='right'><button type="button" class="btn btn-default" onclick="$('#PopupDetails').modal('hide');">Close</button> </td></tr></table><div style='margin: 0 5px;'>""".format( objid, gvg("w"), gvg("h")) + "\n".join(t) return PrintInCharte("<div style='margin-left:10px;'>" + "\n".join(t) + render_template('common/taxopopup.html'))