def indexExplore(): data = {'pageoffset': gvg("pageoffset", "0")} for k, v in FilterList.items(): data[k] = gvg(k, v) data['inexplore'] = True data["projid"] = gvg("projid", 0) data["taxochild"] = gvg("taxochild", '1') data["sample_for_select"] = "" if data["samples"]: for r in GetAll( "select sampleid,orig_id from samples where sampleid =any(%s)", ([int(x) for x in data["samples"].split(',')], ), doXSSEscape=True): data[ "sample_for_select"] += "\n<option value='{sampleid}' selected>{orig_id}</option> ".format( **r) data["projects_for_select"] = "" if data["projid"]: for r in GetAll( "select projid,title from projects where projid =any(%s) and visible=true", ([int(x) for x in data["projid"].split(',')], ), doXSSEscape=True): data[ "projects_for_select"] += "\n<option value='{projid}' selected>{title}</option> ".format( **r) data["taxo_for_select"] = "" if gvg("taxo[]"): print(gvg("taxo[]")) for r in GetAll( "SELECT id, display_name FROM taxonomy WHERE id =any(%s) order by name", ([int(x) for x in request.args.getlist("taxo[]")], ), debug=False): data[ "taxo_for_select"] += "\n<option value='{id}' selected>{display_name}</option> ".format( **r) data["month_for_select"] = "" for (k, v) in enumerate( ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'), start=1): data[ "month_for_select"] += "\n<option value='{1}' {0}>{2}</option> ".format( 'selected' if str(k) in data['month'].split(',') else '', k, v) data["daytime_for_select"] = "" for (k, v) in database.DayTimeList.items(): data[ "daytime_for_select"] += "\n<option value='{1}' {0}>{2}</option> ".format( 'selected' if str(k) in data['daytime'].split(',') else '', k, v) right = 'dodefault' classiftab = "" appli.AddTaskSummaryForTemplate() filtertab = getcommonfilters(data) return render_template('search/explore.html', top="", lefta=classiftab, leftb=filtertab, right=right, data=data)
def PrjEdit(PrjId): g.useselect4 = True Prj = database.Projects.query.filter_by(projid=PrjId).first() g.headcenter = "<h4><a href='/prj/{0}'>{1}</a></h4>".format( Prj.projid, XSSEscape(Prj.title)) if Prj is None: flash("Project doesn't exists", 'error') return PrintInCharte("<a href=/prj/>Select another project</a>") if not Prj.CheckRight(2): # Level 0 = Read, 1 = Annotate, 2 = Admin flash('You cannot edit settings for this project', 'error') return PrintInCharte("<a href=/prj/>Select another project</a>") if gvp('save') == "Y": PreviousCNN = Prj.cnn_network_id for f in request.form: if f in dir(Prj): setattr(Prj, f, gvp(f)) if PreviousCNN != Prj.cnn_network_id: database.ExecSQL( "delete from obj_cnn_features where objcnnid in (select objid from obj_head where projid=%s)", [PrjId]) flash("SCN features erased", "success") Prj.visible = True if gvp('visible') == 'Y' else False # print(request.form) for m in Prj.projmembers: if gvp('priv_%s_delete' % m.id) == 'Y': db.session.delete(m) elif gvp('priv_%s_member' % m.id) != '': # si pas delete c'est update m.member = int(gvp('priv_%s_member' % m.id)) m.privilege = gvp('priv_%s_privilege' % m.id) if gvp('priv_new_member') != '': new = database.ProjectsPriv(member=int(gvp('priv_new_member')), privilege=gvp('priv_new_privilege'), projid=PrjId) db.session.add(new) try: db.session.commit() flash("Project settings Saved successfuly", "success") except Exception as E: flash("Database exception : %s" % E, "error") db.session.rollback() if Prj.initclassiflist is None: lst = [] else: lst = [int(x) for x in Prj.initclassiflist.split(",") if x.isdigit()] g.predeftaxo = GetAll( """select t.id,t.display_name as name from taxonomy t left join taxonomy t2 on t.parent_id=t2.id where t.id= any(%s) order by upper(t.display_name) """, (lst, )) g.users = GetAssoc2Col("select id,name from users order by lower(name)", dicttype=collections.OrderedDict) g.maplist = [ 'objtime', 'objdate', 'latitude', 'longitude', 'depth_min', 'depth_max' ] + sorted(DecodeEqualList(Prj.mappingobj).values()) g.scn = GetSCNNetworks() return render_template('project/editproject.html', data=Prj)
def part_prj_vpgraph(PrjId,offset): Prj=GetAll("""select pp.* from part_projects pp where pprojid=%s""",(PrjId,)) if len(Prj)!=1: return PrintInCharte(ErrorFormat("Project doesn't exists")) Prj=Prj[0] if Prj['ownerid']!=current_user.id and not current_user.has_role(database.AdministratorLabel): return PrintInCharte(ErrorFormat("Access Denied")) g.headcenter="<h4>Particle Project %s : %s</h4><a href='/part/prj/%s'>Project home</a>"%(Prj['projid'],Prj['ptitle'],Prj['pprojid'],) dbsample = database.GetAll("""select psampleid,filename from part_samples s where pprojid=%s ORDER BY filename desc limit 50 OFFSET %s """ % (PrjId,offset)) txt="""<style> .idepth,.ipart {height: auto} </style> <button class='btn' onclick='dozoom(50);'>50%</button> <button class='btn' onclick='dozoom(75);'>75%</button> <button class='btn' onclick='dozoom(100);'>100%</button> <script> function dozoom(z) { $('.idepth').css('width',(800*z/100).toFixed().toString()+"px"); $('.ipart').css('width',(1600*z/100).toFixed().toString()+"px"); } </script> """ for s in dbsample: txt+="""<p class='ghead'>Graph for {psampleid} - {filename} - </p> <img src='/vault/{idepth}' class='idepth'> <img src='/vault/{ipart}' class='ipart'> """.format(psampleid=s['psampleid'],filename=s['filename'] ,idepth=uvp_sample_import.GetPathForImportGraph(s['psampleid'],'depth',True) ,ipart =uvp_sample_import.GetPathForImportGraph(s['psampleid'],'particle',True)) return PrintInCharte(txt)
def searchtaxo(): term = gvg("q") if len(term) <= 2: return "[]" terms = [x.strip().lower() + R"%" for x in term.split('*')] # psycopg2.extensions.QuotedString("""c'est ok "ici" à """).getquoted() param = {'term': terms[-1]} # le dernier term est toujours dans la requete terms = [ QuotedString(x).getquoted().decode('iso-8859-15', 'strict').replace("%", "%%") for x in terms[0:-1] ] ExtraWhere = ExtraFrom = "" if terms: for t in terms: ExtraWhere += "\n and (" # SQLI insensible, protégé par quotedstring ExtraWhere += ' or '.join( ("lower(p{0}.name) like {1}".format(i, t) for i in range(1, 6))) + ")" ExtraFrom = "\n".join([ "left join taxonomy p{0} on p{1}.parent_id=p{0}.id".format( i, i - 1) for i in range(2, 6) ]) sql = """SELECT tf.id, tf.display_name as name ,0 FROM taxonomy tf left join taxonomy p1 on tf.parent_id=p1.id {0} WHERE lower(tf.name) LIKE %(term)s {1} order by tf.name limit 200""".format(ExtraFrom, ExtraWhere) res = GetAll(sql, param, debug=False) return json.dumps([dict(id=r[0], text=r[1], pr=r[2]) for r in res])
def part_prj_main(PrjId): # Prj = partdatabase.part_projects.query.filter_by(pprojid=PrjId).first() Prj=GetAll("""select pp.* ,oldestsampledate+make_interval(0,public_visibility_deferral_month) visibility_date ,oldestsampledate+make_interval(0,public_partexport_deferral_month) partexport_date ,oldestsampledate+make_interval(0,public_zooexport_deferral_month) zooexport_date from part_projects pp where pprojid=%s""",(PrjId,)) if len(Prj)!=1: return PrintInCharte(ErrorFormat("Project doesn't exists")) Prj=Prj[0] if Prj['ownerid']!=current_user.id and not current_user.has_role(database.AdministratorLabel): return PrintInCharte(ErrorFormat("Access Denied")) g.headcenter="<h4>Particle Project %s : %s</h4><a href='/part/'>Particle Module Home</a>"%(Prj['projid'],Prj['ptitle']) dbsample = database.GetAll("""select profileid,psampleid,filename,stationid,firstimage,lastimg,lastimgused,sampleid ,histobrutavailable,comment,daterecalculhistotaxo,ctd_import_datetime,sampledate,imp_descent_filtered_row,imp_removed_empty_slice ,(select count(*) from part_histopart_det where psampleid=s.psampleid) nbrlinedet ,(select count(*) from part_histopart_reduit where psampleid=s.psampleid) nbrlinereduit ,(select count(*) from part_histocat where psampleid=s.psampleid) nbrlinetaxo ,(select count(*) from part_ctd where psampleid=s.psampleid) nbrlinectd from part_samples s where pprojid=%s ORDER BY filename desc """ % (PrjId)) MinSampleDate=Prj['oldestsampledate'] VisibilityText="" if MinSampleDate is not None : VisibilityText +="""<br> Oldest sample date is {0:%Y-%m-%d}, Visibility date is {1} , Particule export date is {2}, Zooplankton classification export date is {3} """.format(MinSampleDate , "Not Defined" if Prj['visibility_date'] is None else Prj['visibility_date'].strftime("%Y-%m-%d") , "Not Defined" if Prj['partexport_date'] is None else Prj['partexport_date'].strftime("%Y-%m-%d") , "Not Defined" if Prj['zooexport_date'] is None else Prj['zooexport_date'].strftime("%Y-%m-%d") ) return PrintInCharte( render_template('part/prj_index.html', PrjId=PrjId, dbsample=dbsample, Prj=Prj,VisibilityText=VisibilityText))
def browsetaxotsvexport(): sql = """select t.id,t.parent_id,t.name,t.taxotype,t.taxostatus ,t.id_source,t.source_url,t.source_desc ,t.creator_email,to_char(t.creation_datetime,'yyyy-mm-dd hh24:mi,ss') creation_datetime ,t.id_instance,t.rename_to ,to_char(t.lastupdate_datetime,'yyyy-mm-dd hh24:mi:ss') lastupdate_datetime ,i.name instance_name,tr.display_name rename_to_name,t.nbrobj,t.nbrobjcum ,t.display_name,{} from taxonomy t LEFT JOIN ecotaxainst i on t.id_instance=i.id LEFT JOIN taxonomy tr on tr.id=t.rename_to {} order by 1 """.format(SQLTreeSelect, SQLTreeJoin) lst = GetAll(sql) t = [] t.append( "id\tparent_id\tname\ttaxotype\ttaxostatus\tid_source\tsource_url\tsource_desc\tcreator_email\tcreation_datetime" + "\tid_instance\trename_to\tlastupdate_datetime\tinstance_name\trename_to_name\tnbrobj\tnbrobjcum\tdisplay_name\tlineage" ) for l in lst: # t.append("{id},{parent_id}".format(l)) t.append("\t".join( (str(ntcv(x)).replace("\n", "\\n").replace("\t", "\\t").replace("\r", "") for x in l[0:99]))) return Response("\n".join(t), mimetype="text/tsv", headers={ "Content-Disposition": "attachment; filename=taxoexport_%s.tsv" % datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S') })
def CreateXML(self): self.UpdateProgress(1,"Start XML export") Prj=database.Projects.query.filter_by(projid=self.param.ProjectId).first() self.param.OutFile= "export_{0:d}_{1:s}.xml".format(Prj.projid, datetime.datetime.now().strftime("%Y%m%d_%H%M")) fichier=os.path.join(self.GetWorkingDir(),self.param.OutFile) logging.info("Creating file %s"%(fichier,)) root = ET.Element('projects',{ "xmlns":"http://typo.oceanomics.abims.sbr.fr/ecotaxa-export" ,"xmlns:xsi":"http://www.w3.org/2001/XMLSchema-instance" ,"xsi:schemaLocation":"http://typo.oceanomics.abims.sbr.fr/ecotaxa-export platform:/resource/typo-shared/src/main/resources/ecotaxa-export-1.1.xsd"}) Project=ET.SubElement(root, 'project',{'id':"ecotaxa:%d"%Prj.projid}) projectdescription=ET.SubElement(Project, 'projectdescription') projectdescriptionname=ET.SubElement(projectdescription, 'name') projectdescriptionname.text=Prj.title ET.SubElement(projectdescription, 'link',{"url":"%sprj/%d"%(app.config['SERVERURL'],Prj.projid)}) projectdescriptionmanagers=ET.SubElement(projectdescription, 'managers') projectdescriptionanno=ET.SubElement(projectdescription, 'contributors') for pm in Prj.projmembers: if pm.privilege=="Manage": m=ET.SubElement(projectdescriptionmanagers, 'manager') elif pm.privilege=="Annotate": m=ET.SubElement(projectdescriptionanno, 'contributor') else: continue ET.SubElement(m, 'name').text=pm.memberrel.name ET.SubElement(m, 'email').text=pm.memberrel.email sql1="""SELECT s.sampleid,s.orig_id,s.dataportal_descriptor From samples s where projid=%(projid)s and dataportal_descriptor is not null """ sql3=" " params={'projid':int(self.param.ProjectId)} if self.param.samplelist!="": sql3+=" and s.orig_id= any(%(samplelist)s) " params['samplelist']=self.param.samplelist.split(",") sql=sql1+" "+sql3 logging.info("Execute SQL : %s"%(sql,)) logging.info("Params : %s"%(params,)) self.pgcur.execute(sql,params) samples=ET.SubElement(Project, 'samples') for r in self.pgcur: sel=ET.SubElement(samples, 'sample') dtpdesc=ET.fromstring(r['dataportal_descriptor']) sel.append(dtpdesc) ET.SubElement(sel, 'sampleregistryreference',barcode=r['orig_id']) sql= """SELECT distinct to1.name FROM objects o JOIN taxonomy to1 on o.classif_id=to1.id where o.sampleid={0:d} """.format(r['sampleid'], ) taxo=GetAll(sql) taxoel=ET.SubElement(sel, 'taxonomicassignments') for r in taxo: ET.SubElement(taxoel, 'taxonomicassignment',taxon=r[0]) ET.ElementTree(root).write(fichier,encoding="UTF-8", xml_declaration=True)
def dbadmin_viewsizes(): g.headcenter = "Database objects size (public schema only)<br><a href=/admin/>Back to admin home</a>" sql = """SELECT c.relname, c.relkind, CASE WHEN c.relkind='i' THEN c2.tablename ELSE c.relname END fromtable,pg_relation_size(('"' || c.relname || '"')::regclass)/(1024*1024) szMB FROM pg_namespace ns, pg_class c LEFT OUTER JOIN pg_indexes c2 ON c.relname = c2.indexname WHERE c.relnamespace = ns.oid AND ns.nspname = 'public' AND c.relkind IN ('r' ,'i') ORDER BY c.relkind DESC, pg_relation_size(('"' || c.relname || '"')::regclass) DESC """ res = GetAll(sql) #,debug=True txt = """<table class='table table-bordered table-condensed table-hover' style="width:500px;"> <tr><th width=200>Object</td><th witdth=200>Table</td><th width=100>Size (Mb)</td></tr>""" for r in res: txt += """<tr><td>{0}</td> <td>{2}</td> <td>{3}</td> </tr>""".format(*r) txt += "</table>" return PrintInCharte(txt)
def QuestionProcessScreenSelectSourceTaxo(self,Prj): # Second écran de configuration, choix des taxon utilisés dans la source # recupere les categories et le nombre d'occurence dans les projet de base/learning # sql = """select n.classif_id,t.name||case when p1.name is not null and t.name not like '%% %%' then ' ('||p1.name||')' else ' ' end as name sql = """select n.classif_id,t.display_name as name ,n.nbr from (select o.classif_id,count(*) nbr from obj_head o where projid in ({0}) and classif_qual='V' group by classif_id) n JOIN taxonomy t on n.classif_id=t.id left join taxonomy p1 on t.parent_id=p1.id order by nbr desc,name""".format(database.CSVIntStringToInClause(gvp('src', gvg('src')))) g.TaxoList = GetAll(sql, None, cursor_factory=None) s = sum([r[2] for r in g.TaxoList]) # Nbr total d'objet par categorie d = DecodeEqualList(Prj.classifsettings) TaxoCSV=d.get('seltaxo') if TaxoCSV: TaxoList={int(x) for x in TaxoCSV.split(',')} else: TaxoList = {} g.TaxoList = [[r[0], r[1], r[2], round(100 * r[2] / s, 1), 'checked' if len(TaxoList)==0 or r[0] in TaxoList else ''] for r in g.TaxoList] # Ajout du % d'objet par categorie ExtraHeader="<input type='hidden' name='src' value='{}'>".format(gvp('src', gvg('src'))) ExtraHeader += self.GetFilterText() return render_template('task/classifauto2_create_lsttaxo.html' ,url=request.query_string.decode('utf-8') ,ExtraHeader=ExtraHeader,prj=Prj)
def part_prj(): params={} sql="""select pprojid,ptitle,up.ownerid,u.name,u.email,rawfolder,instrumtype,ep.title ,(select count(*) from part_samples where pprojid=up.pprojid) samplecount from part_projects up left JOIN projects ep on up.projid=ep.projid LEFT JOIN users u on ownerid=u.id """ sql += " where 1=1 " if not current_user.has_role(database.AdministratorLabel): sql+=" and ownerid=%d"%(current_user.id,) if gvg('filt_title','')!='': sql +=" and ( up.ptitle ilike '%%'||%(title)s ||'%%' or to_char(up.pprojid,'999999') like '%%'||%(title)s or ep.title ilike '%%'||%(title)s ||'%%' or to_char(ep.projid,'999999') like '%%'||%(title)s) " params['title']=gvg('filt_title') if gvg('filt_instrum','')!='': sql +=" and up.instrumtype ilike '%%'||%(filt_instrum)s ||'%%' " params['filt_instrum']=gvg('filt_instrum') sql+=" order by lower(ep.title),lower(ptitle)" res = GetAll(sql,params) #,debug=True # app.logger.info("res=%s",res) CanCreate=False if current_user.has_role(database.AdministratorLabel) or current_user.has_role(database.ProjectCreatorLabel): CanCreate=True g.headcenter = "<h4>Particle Projects management</h4><a href='/part/'>Particle Module Home</a>" return PrintInCharte( render_template('part/list.html', PrjList=res, CanCreate=CanCreate, AppManagerMailto=appli.GetAppManagerMailto() , filt_title=gvg('filt_title'), filt_subset=gvg('filt_subset'), filt_instrum=gvg('filt_instrum')))
def browseinstance(): lst = GetAll( """select id,name,url,to_char(laststatupdate_datetime,'yyyy-mm-dd hh24:mi') laststatupdate_datetime,ecotaxa_version from ecotaxainst order by id """) return render_template('browseinstance.html', lst=lst)
def GetColsForTable(schema: str, table: str): ColList = GetAll("""select a.attname from pg_namespace ns join pg_class c on relnamespace=ns.oid join pg_attribute a on a.attrelid=c.oid where ns.nspname='{1}' and relkind='r' and a.attname not like '%.%' and attnum>0 and c.relname='{0}' order by attnum""". format(table, schema)) return [x[0] for x in ColList]
def browseinstanceeditpopup(instanceid): if instanceid == 'new': instance = {'id': 'new'} else: sql = """select i.* from ecotaxainst i where i.id = %(id)s """ instance = GetAll(sql, {'id': instanceid})[0] return render_template('browseinstanceeditpopup.html', instance=instance)
def browsetaxo(): lst = GetAll( """select t.id,t.parent_id,t.display_name as name,t.taxotype,t.taxostatus,t.creator_email,t.id_source ,to_char(t.creation_datetime,'yyyy-mm-dd hh24:mi') creation_datetime,to_char(t.lastupdate_datetime,'yyyy-mm-dd hh24:mi') lastupdate_datetime,{} from taxonomy t {} order by t.id LIMIT 200 """.format(SQLTreeSelect, SQLTreeJoin)) for lstitem in lst: # lstitem['tree']=PackTreeTxt(lstitem['tree']) #evite les problèmes de safe if lstitem['parent_id'] is None: lstitem['parent_id'] = "" nbrtaxon = GetAll("select count(*) from taxonomy")[0][0] g.AdminLists = GetAll( "select email,name from users where email like '%@%' and active=TRUE order by 2" ) return render_template('browsetaxo.html', lst=lst, nbrtaxon=nbrtaxon)
def taxotreerootjson(): parent=gvg("id") sql="""SELECT id, name,parent_id,coalesce(nbrobj,0)+coalesce(nbrobjcum,0) ,exists(select 1 from taxonomy te where te.parent_id=taxonomy.id) FROM taxonomy WHERE """ if parent=='#': sql+="parent_id is null" else: sql+="parent_id =%d"%(int(parent)) sql+=" order by name " res = GetAll(sql) # print(res) return json.dumps([dict(id=str(r[0]),text="<span class=v>"+r[1]+"</span> ("+str(r[3])+") <span class='TaxoSel label label-default'><span class='glyphicon glyphicon-ok'></span></span>",parent=r[2] or "#",children=r[4]) for r in res])
def searchtaxo(): term=gvg("q") if len(term)<=2: # return "[]" if not current_user.is_authenticated: return "[]" # current_user.id with app.MRUClassif_lock: # app.MRUClassif[current_user.id]=[{"id": 2904, "pr": 0, "text": "Teranympha (Eucomonymphidae-Teranymphidae)"}, # {"id": 12488, "pr": 0, "text": "Teranympha mirabilis "}, # {"id": 76677, "pr": 0, "text": "Terasakiella (Methylocystaceae)"}, # {"id": 82969, "pr": 0, "text": "Terasakiella pusilla "}] return json.dumps(app.MRUClassif.get(current_user.id,[])) # gère les MRU en utilisant les classif ltfound=term.find('<')>0 SQLWith=""" """ # * et espace comme % terms=[x.lower().replace("*","%").replace(" ","%")+R"%" for x in term.split('<')] param={'term':terms[0]} # le premier term est toujours appliqué sur le display name ExtraWhere=ExtraFrom="" if len(terms)>1: ExtraFrom = SQLTreeJoin terms = ['%%<'+x.replace("%","%%").replace("*","%%").replace(" ","%%") for x in terms[1:]] termsSQL=QuotedString("".join(terms)).getquoted().decode('iso-8859-15','strict') ExtraWhere= ' and '+SQLTreeExp+" ilike "+termsSQL sql="""SELECT tf.id, tf.display_name as name ,0 FROM taxonomy tf {0} WHERE lower(tf.display_name) LIKE %(term)s {1} order by lower(tf.display_name) limit 200""".format(ExtraFrom,ExtraWhere) PrjId=gvg("projid") if PrjId!="": PrjId=int(PrjId) Prj=database.Projects.query.filter_by(projid=PrjId).first() if ntcv(Prj.initclassiflist) != "": InitClassif=Prj.initclassiflist InitClassif=", ".join(["("+x.strip()+")" for x in InitClassif.split(",") if x.strip()!=""]) # ,tf.name||case when p1.name is not null and tf.name not like '%% %%' then ' ('||p1.name||')' else ' ' end as name sql=""" SELECT tf.id ,tf.display_name as name , case when id2 is null then 0 else 1 end inpreset FROM taxonomy tf join (select t.id id1,c.id id2 FROM taxonomy t full JOIN (VALUES """+InitClassif+""") c(id) ON t.id = c.id WHERE lower(display_name) LIKE %(term)s) tl2 on tf.id=coalesce(id1,id2) """+ExtraFrom+""" WHERE lower(tf.display_name) LIKE %(term)s """+ExtraWhere+""" order by inpreset desc,lower(tf.display_name),name limit 200 """ res = GetAll(sql, param,debug=False) return json.dumps([dict(id=r[0],text=r[1],pr=r[2]) for r in res])
def browsetaxoviewpopup(taxoid): sql = """select t.*,i.name inst_name,url inst_url,concat(rt.name,'<'||rt2.name,'<'||rt3.name,'<'||rt4.name) rename_to_name ,p.name parentname,to_char(t.creation_datetime,'YYYY-MM-DD HH24:MI') creationdatetimefmt,{} from taxonomy t {} left join ecotaxainst i on i.id=t.id_instance left join taxonomy rt on t.rename_to=rt.id left join taxonomy p on t.parent_id=p.id left join taxonomy rt2 on rt.parent_id=rt2.id left join taxonomy rt3 on rt2.parent_id=rt3.id left join taxonomy rt4 on rt3.parent_id=rt4.id where t.id = %(id)s """.format(SQLTreeSelect, SQLTreeJoin) taxon = GetAll(sql, {'id': taxoid})[0] g.TaxoType = database.TaxoType g.TaxoStatus = database.TaxoStatus stat = GetAll( """select i.name,i.url,s.nbr from ecotaxainststat s join ecotaxainst i on i.id=s.id_instance where id_taxon=%(id)s order by i.name """, {'id': taxoid}) return render_template('browsetaxoviewpopup.html', taxon=taxon, stat=stat)
def AutoClean(): TaskList = GetAll( """SELECT id, owner_id, taskclass, taskstate, taskstep, progresspct, progressmsg, inputparam, creationdate, lastupdate, questiondata, answerdata FROM temp_tasks where lastupdate<current_timestamp - interval '30 days' or ( lastupdate<current_timestamp - interval '7 days' and taskstate='Done' ) or ( creationdate<current_timestamp - interval '1 days' and lastupdate is null)""" ) txt = "Cleanning process result :<br>" for t in TaskList: txt += DoTaskClean(t['id']) return txt
def Prjpopupeditpreset(PrjId): sql = """select p.projid,title,initclassiflist ,(select string_agg(pts.id::varchar,',') objtaxon from projects_taxo_stat pts where pts.projid=p.projid) from projects p """ if not current_user.has_role(database.AdministratorLabel): sql += " Join projectspriv pp on p.projid = pp.projid and pp.member=%d" % ( current_user.id, ) sql += " order by upper(title) " Prj = GetAll(sql, cursor_factory=psycopg2.extras.RealDictCursor) # Prj2=Prj[:] # for i in range(200):Prj.extend(Prj2) # for test on bigger list TaxonList = {-1} for P in Prj: lst = ntcv(P['initclassiflist']) + "," + ntcv(P['objtaxon']) for t in lst.split(','): if t.isdecimal(): TaxonList.add(int(t)) # txt=",".join((str(x) for x in TaxonList)) TaxoMap = GetAssoc2Col( "select id,display_name from taxonomy where id = any (%s)", [[x for x in TaxonList]]) # txt = str(TaxoMap) txt = "" for P in Prj: result = [] initclassiflist = { int(x.strip()) for x in ntcv(P['initclassiflist']).split(',') if x.isdecimal() } objtaxon = { int(x.strip()) for x in ntcv(P['objtaxon']).split(',') if x.isdecimal() } objtaxon.difference_update(initclassiflist) for t in initclassiflist: resolved = TaxoMap.get(int(t), None) if resolved: result.append(resolved) P['presetids'] = ",".join((str(x) for x in initclassiflist)) P['preset'] = ", ".join(sorted(result)) result = [] for t in objtaxon: resolved = TaxoMap.get(int(t), None) if resolved: result.append(resolved) P['objtaxonnotinpreset'] = ", ".join(sorted(result)) P['objtaxonids'] = ",".join((str(x) for x in objtaxon)) # construction de la liste distincte tous les taxon return render_template('project/popupeditpreset.html', Prj=Prj, txt=txt)
def browsetaxoeditpopup(taxoid): sql = """select t.*,i.name inst_name,url inst_url,concat(rt.display_name) rename_to_name ,p.display_name parentname,to_char(t.creation_datetime,'YYYY-MM-DD HH24:MI') creationdatetimefmt,{} from taxonomy t {} left join ecotaxainst i on i.id=t.id_instance left join taxonomy rt on t.rename_to=rt.id left join taxonomy p on t.parent_id=p.id where t.id = %(id)s """.format(SQLTreeSelect, SQLTreeJoin) taxon = GetAll(sql, {'id': taxoid})[0] g.TaxoType = database.TaxoType g.TaxoStatus = database.TaxoStatus return render_template('browsetaxoeditpopup.html', taxon=taxon)
def SPStep1(self): logging.info("Input Param = %s"%(self.param.__dict__)) logging.info("Current directory = %s"%os.getcwd()) # Note liste des tables # select ns.oid oidns, ns.nspname, c.relname,c.oid reloid # from pg_namespace ns # join pg_class c on relnamespace=ns.oid # where ns.nspname='public' and relkind='r' # table_list=("users",) # pour test permet d'exporter moins de données toolsdir=GetDBToolsDir() os.environ["PGPASSWORD"] = app.config['DB_PASSWORD'] cmd=os.path.join( toolsdir,"pg_dump") cmd+=" -h "+app.config['DB_HOST']+" -U "+app.config['DB_USER']+" -p "+app.config.get('DB_PORT','5432') # -s = le schema , -F le format -f le fichier cmd+=" --schema-only --format=p -f schema.sql -E LATIN1 -n public --no-owner --no-privileges --no-security-labels --no-tablespaces "+app.config['DB_DATABASE']+" >dumpschemaout.txt 2>&1" logging.info("Export Schema : %s",cmd) os.system(cmd) zfile=ZipFile('ecotaxadb.zip', 'w',allowZip64 = True,compression= zipfile.ZIP_DEFLATED) zfile.write('schema.sql') for t in table_list: ColList=GetAll("""select a.attname from pg_namespace ns join pg_class c on relnamespace=ns.oid join pg_attribute a on a.attrelid=c.oid where ns.nspname='public' and relkind='r' and a.attname not like '%.%' and attnum>0 and c.relname='{0}' order by attnum""".format(t)) logging.info("Save table %s"%t) with open("temp.copy","w",encoding='latin_1') as f: query="select %s from %s t"%(",".join(["t."+x[0] for x in ColList]),t) if t in ('projects','projectspriv',"process","acquisitions","samples","obj_head"): query+=" where projid in (%s)"%(self.param.ProjectIds,) if t in ('objectsclassifhisto','images'): query+=" join obj_head o on o.objid=t.objid where o.projid in (%s)"%(self.param.ProjectIds,) if t in ("obj_field"): query+=" join obj_head o on o.objid=t.objfid where o.projid in (%s)"%(self.param.ProjectIds,) self.pgcur.copy_to(f,"("+query+")") zfile.write("temp.copy",arcname=t+".copy") logging.info("Save Images") vaultroot=Path("../../vault") self.pgcur.execute("select imgid,file_name,thumb_file_name from images i join obj_head o on o.objid=i.objid where o.projid in (%s)"%(self.param.ProjectIds,)) for r in self.pgcur: if r[1]: zfile.write(vaultroot.joinpath(r[1]).as_posix(),arcname="images/%s.img"%r[0]) if r[2]: zfile.write(vaultroot.joinpath(r[2]).as_posix(),arcname="images/%s.thumb"%r[0]) zfile.close() self.task.taskstate="Done" self.UpdateProgress(100,"Export successfull")
def routetaxobrowse(): BackProjectBtn = '' if gvp('updatestat') == 'Y': # DoSyncStatUpdate() DoFullSync() if gvg('fromprj'): BackProjectBtn = "<a href='/prj/{}' class='btn btn-default btn-primary'>{} Back to project</a> ".format( int(gvg('fromprj')), FAIcon('arrow-left')) if gvg('fromtask'): BackProjectBtn = "<a href='/Task/Question/{}' class='btn btn-default btn-primary'>{} Back to importation task</a> ".format( int(gvg('fromtask')), FAIcon('arrow-left')) if not (current_user.has_role(database.AdministratorLabel) or current_user.has_role(database.ProjectCreatorLabel)): # /prj/653 txt = "You cannot create tanonomy category, you must request to your project manager (check project page)" if gvg('fromprj'): txt += "<br>" + BackProjectBtn return PrintInCharte(FormatError(txt, DoNotEscape=True)) g.taxoserver_url = app.config.get('TAXOSERVER_URL') if current_user.has_role(database.AdministratorLabel): ExtraWehereClause = "" else: ExtraWehereClause = "and t.creator_email='{}'".format( current_user.email) lst = GetAll( """select t.id,t.parent_id,t.display_name as name,case t.taxotype when 'M' then 'Morpho' when 'P' then 'Phylo' else t.taxotype end taxotype,t.taxostatus,t.creator_email,t.id_source ,to_char(t.creation_datetime,'yyyy-mm-dd hh24:mi') creation_datetime,to_char(t.lastupdate_datetime,'yyyy-mm-dd hh24:mi') lastupdate_datetime,{} from taxonomy t {} where t.id_instance ={} {} order by case t.taxostatus when 'N' then 1 else 2 end,t.id LIMIT 400 """.format(SQLTreeSelect, SQLTreeJoin, app.config.get('TAXOSERVER_INSTANCE_ID'), ExtraWehereClause)) for lstitem in lst: # lstitem['tree']=PackTreeTxt(lstitem['tree']) #evite les problèmes de safe if lstitem['parent_id'] is None: lstitem['parent_id'] = "" # nbrtaxon=GetAll("select count(*) from taxonomy")[0][0] # return render_template('browsetaxo.html',lst=lst,nbrtaxon=nbrtaxon) return PrintInCharte( render_template('taxonomy/browse.html', lst=lst, BackProjectBtn=BackProjectBtn))
def RefreshTaxoStat(): n = ExecSQL( "UPDATE taxonomy SET nbrobj=Null,nbrobjcum=null where nbrobj is NOT NULL or nbrobjcum is not null" ) print("RefreshTaxoStat cleaned %d taxo" % n) print("Refresh projects_taxo_stat") for r in GetAll('select projid from projects'): RecalcProjectTaxoStat(r['projid']) n = ExecSQL("""UPDATE taxonomy SET nbrobj=q.nbr from (select id classif_id, sum(nbr_v) nbr from projects_taxo_stat pts join projects p on pts.projid=p.projid and p.visible=true where nbr_v>0 group by id )q where taxonomy.id=q.classif_id""") print("RefreshTaxoStat updated %d 1st level taxo" % (n)) n = ExecSQL("""UPDATE taxonomy SET nbrobjcum=q.nbr from (select parent_id,sum(nbrobj) nbr from taxonomy where nbrobj is NOT NULL group by parent_id ) q where taxonomy.id=q.parent_id""") print("RefreshTaxoStat updated %d 2st level taxo" % (n)) for i in range(50): n = ExecSQL("""UPDATE taxonomy SET nbrobjcum=q.nbr from (select parent_id,sum(nbrobjcum+coalesce(nbrobj,0)) nbr from taxonomy where nbrobjcum is NOT NULL group by parent_id ) q where taxonomy.id=q.parent_id and coalesce(taxonomy.nbrobjcum,0)<>q.nbr""") print("RefreshTaxoStat updated %d level %d taxo" % (n, i)) if n == 0: break appli.part.prj.GlobalTaxoCompute()
def QuestionProcessScreenSelectSource(self,Prj): # Premier écran de configuration, choix du projet de base PreviousTxt = self.GetFilterText() d = DecodeEqualList(Prj.classifsettings) TargetFeatures = set(DecodeEqualList(Prj.mappingobj).values()) PreviousLS=d.get("baseproject", "") if PreviousLS != "": BasePrj = GetAll("select projid,title from projects where projid in ({0})".format(PreviousLS)) if len(BasePrj): PreviousTxt += """<a class='btn btn-primary' href='?{0}&src={1}'> USE previous Learning Set : {2}</a><br><br>OR USE another project<br><br>""".format( request.query_string.decode("utf-8"),PreviousLS, " + ".join(["#{0} - {1}".format(*r) for r in BasePrj]) ) from flask_login import current_user sql = """select projid,title,round(coalesce(objcount,0)*coalesce(pctvalidated,0)/100) objvalid ,mappingobj,coalesce(cnn_network_id,'') cnn_network_id from projects where 1=1 """ sqlparam=[] if gvp('filt_title'): sql += " and title ilike (%s) " sqlparam.append(("%"+gvp('filt_title')+"%")) if gvp('filt_instrum', '') != '': sql += " and projid in (select distinct projid from acquisitions where instrument ilike '%%'||(%s) ||'%%' ) " sqlparam.append(gvp('filt_instrum')) sql += " order by title" ProjList = database.GetAll(sql,sqlparam,doXSSEscape=True) TblBody="" for r in ProjList: MatchingFeatures = len(set(DecodeEqualList(r['mappingobj']).values()) & TargetFeatures) if MatchingFeatures<int(gvp("filt_featurenbr") if gvp("filt_featurenbr") else 10): continue TblBody += """<tr><td><input type='checkbox' class='selproj' data-prjid='{projid}'></td> <td>#{projid} - {title}</td><td>{objvalid:0.0f}</td><td>{MatchingFeatures}</td><td>{cnn_network_id}</td> </tr>""".format(MatchingFeatures=MatchingFeatures, **r) return render_template('task/classifauto2_create_lstproj.html' ,url=request.query_string.decode('utf-8') ,TblBody=TblBody ,PreviousTxt=PreviousTxt)
def ResolveTaxoFound(TaxoFound, o_NotFoundTaxo): lowertaxonlist = [] regexsearchparenthese = re.compile('(.+) \((.+)\)$') for lowertaxon in TaxoFound.keys(): TaxoFound[lowertaxon] = {'nbr': 0, 'id': None} lowertaxonlist.append(lowertaxon) resregex = regexsearchparenthese.match(lowertaxon) if resregex: # Si on trouve des parenthèse à la fin lowertaxonLT = resregex.group(1) + '<' + resregex.group(2) TaxoFound[lowertaxon]['alterdisplayname'] = lowertaxonLT lowertaxonlist.append(lowertaxonLT) TaxonsFromDB = GetAll( """SELECT t.id,lower(t.name) AS name,lower(t.display_name) display_name,lower(t.name)||'<'||lower(p.name) AS computedchevronname FROM taxonomy t LEFT JOIN taxonomy p on t.parent_id=p.id WHERE lower(t.name) = ANY(%s) OR lower(t.display_name) = ANY(%s) or lower(t.name)||'<'||lower(p.name) = ANY(%s) """, [lowertaxonlist, lowertaxonlist, lowertaxonlist]) for recTaxon in TaxonsFromDB: for FoundK, FoundV in TaxoFound.items(): if (FoundK == recTaxon['name']) or (FoundK == recTaxon['display_name']) or (FoundK == recTaxon['computedchevronname']) \ or (('alterdisplayname' in FoundV) and (FoundV['alterdisplayname'] == recTaxon['display_name'])): TaxoFound[FoundK]['nbr'] += 1 TaxoFound[FoundK]['id'] = recTaxon['id'] logging.info("Taxo Found = %s", TaxoFound) for FoundK, FoundV in TaxoFound.items(): if FoundV['nbr'] == 0: logging.info("Taxo '%s' Not Found", FoundK) o_NotFoundTaxo.append(FoundK) elif FoundV['nbr'] > 1: logging.info("Taxo '%s' Found more than once", FoundK) # traité comme si pas trouvé o_NotFoundTaxo.append(FoundK) TaxoFound[FoundK]['id'] = None for FoundK, FoundV in TaxoFound.items(): TaxoFound[FoundK] = FoundV[ 'id'] # in fine on ne garde que l'id, les autres champs etaient temporaires.
def routetaxoedit(taxoid): sql = """select t.* ,p.display_name parentname,to_char(t.creation_datetime,'YYYY-MM-DD HH24:MI:SS') creationdatetimefmt,{} from taxonomy t {} left join taxonomy p on t.parent_id=p.id where t.id = %(id)s """.format(SQLTreeSelect, SQLTreeJoin) if taxoid > 0: taxon = GetAll(sql, {'id': taxoid})[0] else: taxon = { 'id': 0, 'creator_email': current_user.email, 'tree': '', 'creationdatetimefmt': datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') } g.TaxoType = database.TaxoType g.taxoserver_url = app.config.get('TAXOSERVER_URL') return render_template('taxonomy/edit.html', taxon=taxon)
def SPStep1(self): logging.info("Input Param = %s" % (self.param.__dict__)) logging.info("Start Step 1") Prj = database.Projects.query.filter_by( projid=self.param.ProjectId).first() WarnMessages = [] if getattr(self.param, 'IntraStep', 0) == 0: #Sous tache 1 On dezippe ou on pointe sur le repertoire source. if self.param.InData.lower().endswith("zip"): logging.info("SubTask0 : Unzip File on temporary folder") self.UpdateProgress(1, "Unzip File on temporary folder") self.param.SourceDir = os.path.normpath( os.path.join( os.path.dirname(os.path.realpath(__file__)), "../../temptask/task%06d/data" % (int(self.task.id)))) if not os.path.exists(self.param.SourceDir): os.mkdir(self.param.SourceDir) with zipfile.ZipFile(self.param.InData, 'r') as z: z.extractall(self.param.SourceDir) else: self.param.SourceDir = self.param.InData self.param.IntraStep = 1 if self.param.IntraStep == 1: self.param.Mapping = {} # Reset à chaque Tentative # Import du mapping existant dans le projet for k, v in DecodeEqualList(Prj.mappingobj).items(): self.param.Mapping['object_' + v] = { 'table': 'obj_field', 'title': v, 'type': k[0], 'field': k } for k, v in DecodeEqualList(Prj.mappingsample).items(): self.param.Mapping['sample_' + v] = { 'table': 'sample', 'title': v, 'type': k[0], 'field': k } for k, v in DecodeEqualList(Prj.mappingacq).items(): self.param.Mapping['acq_' + v] = { 'table': 'acq', 'title': v, 'type': k[0], 'field': k } for k, v in DecodeEqualList(Prj.mappingprocess).items(): self.param.Mapping['process_' + v] = { 'table': 'process', 'title': v, 'type': k[0], 'field': k } ProjectWasEmpty = len(self.param.Mapping) == 0 self.param.TaxoFound = {} # Reset à chaque Tentative self.param.UserFound = {} # Reset à chaque Tentative self.param.steperrors = [] # Reset des erreurs # La version 1.1 normalise le champ 'acq_instrument' donc s'il etait mappé sur un txx on le renome pour que la MAJ aille dans le champ instrument if 'acq_instrument' in self.param.Mapping: self.param.Mapping['acq_instrument_old'] = self.param.Mapping[ 'acq_instrument'] self.param.Mapping['acq_instrument_old'][ 'title'] = 'instrument_old' del self.param.Mapping['acq_instrument'] # recuperation de toutes les paire objet/Images du projet logging.info("SubTask1 : Analyze TSV Files") self.UpdateProgress(2, "Analyze TSV Files") self.LastNum = {x: {'n': 0, 't': 0} for x in PredefinedTables} # Extraction des Max des champs for m in self.param.Mapping.values(): v = int(m['field'][1:]) if v > self.LastNum[m['table']][m['field'][0]]: self.LastNum[m['table']][m['field'][0]] = v sd = Path(self.param.SourceDir) self.param.TotalRowCount = 0 Seen = set( ) # Memorise les champs pour lesquels il y a des valeurs ClassifIDSeen = set() for filter in ("**/ecotaxa*.txt", "**/ecotaxa*.tsv"): for CsvFile in sd.glob(filter): relname = CsvFile.relative_to( sd) # Nom relatif à des fins d'affichage uniquement logging.info("Analyzing file %s" % (relname.as_posix())) with open(CsvFile.as_posix(), encoding='latin_1') as csvfile: # lecture en mode dictionnaire basé sur la premiere ligne rdr = csv.DictReader(csvfile, delimiter='\t', quotechar='"') #lecture la la ligne des types (2nd ligne du fichier LType = { champ.strip(" \t").lower(): v for champ, v in rdr.__next__().items() } # Fabrication du mapping ListeChamps = [ champ.strip(" \t").lower() for champ in rdr.fieldnames ] for ColName in ListeChamps: if ColName in self.param.Mapping or ColName in ( 'object_annotation_parent_category', 'object_annotation_hierarchy'): continue # Le champ à déjà été détecté OU un des champs Bani de L'importation car fruit de l'export. ColSplitted = ColName.split("_", 1) if len(ColSplitted) != 2: self.LogErrorForUser( "Invalid Header '%s' in file %s. Format must be Table_Field. Field ignored" % (ColName, relname.as_posix())) continue Table = ColSplitted[ 0] # On isole la partie table avant le premier _ if ColName in PredefinedFields: Table = PredefinedFields[ColName]['table'] self.param.Mapping[ColName] = PredefinedFields[ ColName] else: # champs non predefinis donc dans nXX ou tXX if Table == "object": Table = "obj_field" if not Table in PredefinedTables: self.LogErrorForUser( "Invalid Header '%s' in file %s. Table Incorrect. Field ignored" % (ColName, relname.as_posix())) continue if Table != 'obj_head' and Table != 'obj_field': # Dans les autres tables les types sont forcés à texte SelType = 't' else: if LType[ColName] not in PredefinedTypes: self.LogErrorForUser( "Invalid Type '%s' for Field '%s' in file %s. Incorrect Type. Field ignored" % (LType[champ], ColName, relname.as_posix())) continue SelType = PredefinedTypes[LType[ColName]] self.LastNum[Table][SelType] += 1 self.param.Mapping[ColName] = { 'table': Table, 'field': SelType + "%02d" % self.LastNum[Table][SelType], 'type': SelType, 'title': ColSplitted[1] } logging.info("New field %s found in file %s", ColName, relname.as_posix()) if not ProjectWasEmpty: WarnMessages.append( "New field %s found in file %s" % (ColName, relname.as_posix())) # Test du contenu du fichier RowCount = 0 for lig in rdr: RowCount += 1 for champ in rdr.fieldnames: ColName = champ.strip(" \t").lower() m = self.param.Mapping.get(ColName, None) if m is None: continue # Le champ n'est pas considéré v = CleanValue(lig[champ]) Seen.add( ColName ) # V2.0 (1.1 sur main import) si la colonne est présente c'est considéré Seen, avant il fallait avoir vu une valeur. if v != "": # si pas de valeurs, pas de controle if m['type'] == 'n': vf = ToFloat(v) if vf is None: self.LogErrorForUser( "Invalid float value '%s' for Field '%s' in file %s." % (v, champ, relname.as_posix())) elif ColName == 'object_lat': if vf < -90 or vf > 90: self.LogErrorForUser( "Invalid Lat. value '%s' for Field '%s' in file %s. Incorrect range -90/+90°." % (v, champ, relname.as_posix())) elif ColName == 'object_long': if vf < -180 or vf > 180: self.LogErrorForUser( "Invalid Long. value '%s' for Field '%s' in file %s. Incorrect range -180/+180°." % (v, champ, relname.as_posix())) elif ColName == 'object_annotation_category_id': if self.param.updateclassif == "Y": ClassifIDSeen.add(int(v)) elif ColName == 'object_date': try: datetime.date( int(v[0:4]), int(v[4:6]), int(v[6:8])) except ValueError: self.LogErrorForUser( "Invalid Date value '%s' for Field '%s' in file %s." % (v, champ, relname.as_posix())) elif ColName == 'object_time': try: v = v.zfill(6) datetime.time( int(v[0:2]), int(v[2:4]), int(v[4:6])) except ValueError: self.LogErrorForUser( "Invalid Time value '%s' for Field '%s' in file %s." % (v, champ, relname.as_posix())) elif ColName == 'object_annotation_category': if self.param.updateclassif == "Y": if CleanValue( lig.get( 'object_annotation_category_id', '') ) == '': # traité que si un ID numérique non spécifié v = self.param.TaxoMap.get( v, v) # Applique le mapping self.param.TaxoFound[v.lower( )] = None #creation d'une entrée dans le dictionnaire. elif ColName == 'object_annotation_person_name': if self.param.updateclassif == "Y": self.param.UserFound[v.lower()] = { 'email': CleanValue( lig.get( 'object_annotation_person_email', '')) } elif ColName == 'object_annotation_status': if self.param.updateclassif == "Y": if v != 'noid' and v.lower( ) not in database.ClassifQualRevert: self.LogErrorForUser( "Invalid Annotation Status '%s' for Field '%s' in file %s." % (v, champ, relname.as_posix())) #Analyse l'existance du fichier Image ObjectId = CleanValue(lig.get('object_id', '')) if ObjectId == '': self.LogErrorForUser( "Missing object_id on line '%s' in file %s. " % (RowCount, relname.as_posix())) logging.info("File %s : %d row analysed", relname.as_posix(), RowCount) self.param.TotalRowCount += RowCount if self.param.TotalRowCount == 0: self.LogErrorForUser("No object found") if len(ClassifIDSeen) > 0: ClassifIDFoundInDB = GetAll( "select id from taxonomy where id = any (%s)", [list(ClassifIDSeen)]) ClassifIDFoundInDB = {int(r['id']) for r in ClassifIDFoundInDB} ClassifIDNotFoundInDB = ClassifIDSeen.difference( ClassifIDFoundInDB) if len(ClassifIDNotFoundInDB) > 0: msg = "Some specified classif_id doesn't exists, correct them prior to reload %s" % ( ",".join([str(x) for x in ClassifIDNotFoundInDB])) self.param.steperrors.append(msg) logging.error(msg) self.UpdateProgress(15, "TSV File Parsed" % ()) # print(self.param.Mapping) logging.info("Taxo Found = %s", self.param.TaxoFound) logging.info("Users Found = %s", self.param.UserFound) logging.info("For Information Not Seen Fields %s", [k for k in self.param.Mapping if k not in Seen]) if len(self.param.steperrors) > 0: self.task.taskstate = "Error" self.task.progressmsg = "Some errors founds during file parsing " db.session.commit() return self.param.IntraStep = 2 if self.param.IntraStep == 2: logging.info("Start Sub Step 1.2") self.pgcur.execute( "select id,lower(name),lower(email) from users where lower(name) = any(%s) or email= any(%s) ", ([x for x in self.param.UserFound.keys() ], [x.get('email') for x in self.param.UserFound.values()])) # Résolution des noms à partir du nom ou de l'email for rec in self.pgcur: for u in self.param.UserFound: if u == rec[1] or ntcv(self.param.UserFound[u].get( 'email')).lower() == rec[2]: self.param.UserFound[u]['id'] = rec[0] logging.info("Users Found = %s", self.param.UserFound) NotFoundUser = [ k for k, v in self.param.UserFound.items() if v.get("id") == None ] if len(NotFoundUser) > 0: logging.info("Some Users Not Found = %s", NotFoundUser) # récuperation des ID des taxo trouvées NotFoundTaxo = [] ResolveTaxoFound(self.param.TaxoFound, NotFoundTaxo) if len(NotFoundTaxo) > 0: logging.info("Some Taxo Not Found = %s", NotFoundTaxo) if len(NotFoundUser) == 0 and len(NotFoundTaxo) == 0 and len( WarnMessages ) == 0: # si tout est déjà résolue on enchaine sur la phase 2 self.SPStep2() else: self.task.taskstate = "Question" if len(WarnMessages) > 0: self.UpdateProgress( 20, "Taxo automatic resolution Done, <span style='color:red;font-weight:bold;'>Some Warning :\n-%s </span>" % ("\n-".join(WarnMessages))) else: self.UpdateProgress(20, "Taxo automatic resolution Done" % ())
def SPStep2(self): logging.info("Start Step 2 : Effective data import") logging.info("Taxo Mapping = %s", self.param.TaxoFound) logging.info("Users Mapping = %s", self.param.UserFound) AstralCache = { 'date': None, 'time': None, 'long': None, 'lat': None, 'r': '' } # Mise à jour du mapping en base Prj = database.Projects.query.filter_by( projid=self.param.ProjectId).first() Prj.mappingobj = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'obj_field' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) Prj.mappingsample = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'sample' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) Prj.mappingacq = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'acq' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) Prj.mappingprocess = EncodeEqualList({ v['field']: v.get('title') for k, v in self.param.Mapping.items() if v['table'] == 'process' and v['field'][0] in ( 't', 'n') and v.get('title') != None }) db.session.commit() Ids = { "acq": { "tbl": "acquisitions", "pk": "acquisid" }, "sample": { "tbl": "samples", "pk": "sampleid" }, "process": { "tbl": "process", "pk": "processid" } } #recupération des orig_id des acq,sample,process for i in Ids: sql = "select orig_id," + Ids[i]['pk'] + " from " + Ids[i][ 'tbl'] + " where projid=" + str(self.param.ProjectId) Ids[i]["ID"] = {} for r in GetAll(sql): Ids[i]["ID"][r[0]] = int(r[1]) # recuperation des ID des objet du projet self.ExistingObject = {} self.pgcur.execute( "SELECT o.orig_id,o.objid from objects o where o.projid=" + str(self.param.ProjectId)) for rec in self.pgcur: self.ExistingObject[rec[0]] = rec[1] #logging.info("Ids = %s",Ids) random.seed() RealTableName = { "acq": 'acquisitions', "sample": 'samples', "process": 'process', "obj_head": "obj_head", "obj_field": "obj_field" } sd = Path(self.param.SourceDir) TotalRowCount = 0 for filter in ("**/ecotaxa*.txt", "**/ecotaxa*.tsv"): for CsvFile in sd.glob(filter): relname = CsvFile.relative_to( sd) # Nom relatif à des fins d'affichage uniquement logging.info("Analyzing file %s" % (relname.as_posix())) with open(CsvFile.as_posix(), encoding='latin_1') as csvfile: # lecture en mode dictionnaire basé sur la premiere ligne rdr = csv.DictReader(csvfile, delimiter='\t', quotechar='"') #lecture la la ligne des types (2nd ligne du fichier LType = { champ.strip(" \t").lower(): v for champ, v in rdr.__next__().items() } ListeChamps = [ champ.strip(" \t").lower() for champ in rdr.fieldnames ] # Chargement du contenu du fichier RowCount = 0 for rawlig in rdr: lig = { champ.strip(" \t").lower(): v for champ, v in rawlig.items() } Objs = { "acq": self.EmptyObject(), "sample": self.EmptyObject(), "process": self.EmptyObject(), "obj_head": self.EmptyObject(), "obj_field": self.EmptyObject() } ObjsNew = { "acq": database.Acquisitions(), "sample": database.Samples(), "process": database.Process() } RowCount += 1 TotalRowCount += 1 if 'object_annotation_category_id' in ListeChamps and 'object_annotation_category' in ListeChamps: if CleanValue( lig.get('object_annotation_category_id', '')) != '': del lig[ 'object_annotation_category'] # s'il y a un ID on ignore le texte for champ in ListeChamps: ColName = champ.strip(" \t").lower() m = self.param.Mapping.get(ColName, None) if m is None: continue # Le champ n'est pas considéré FieldName = m.get("field", None) FieldTable = m.get("table", None) FieldValue = None v = CleanValue(lig.get(champ)) if v != "": # si pas de valeurs, on laisse le champ null if m['type'] == 'n': if champ == 'object_annotation_category_id': if self.param.updateclassif != "Y": continue FieldValue = ToFloat(v) elif champ == 'object_date': FieldValue = datetime.date( int(v[0:4]), int(v[4:6]), int(v[6:8])) elif champ == 'object_time': v = v.zfill(6) FieldValue = datetime.time( int(v[0:2]), int(v[2:4]), int(v[4:6])) elif FieldName == 'classif_when': if self.param.updateclassif != "Y": continue v2 = CleanValue( lig.get('object_annotation_time', '000000')).zfill(6) FieldValue = datetime.datetime( int(v[0:4]), int(v[4:6]), int(v[6:8]), int(v2[0:2]), int(v2[2:4]), int(v2[4:6])) elif FieldName == 'classif_id': if self.param.updateclassif != "Y": continue FieldValue = self.param.TaxoFound[ntcv( v ).lower( )] # pour la version numerique, c'est traité par if type=n elif FieldName == 'classif_who': if self.param.updateclassif != "Y": continue FieldValue = self.param.UserFound[ntcv( v).lower()].get('id', None) elif FieldName == 'classif_qual': if self.param.updateclassif != "Y": continue FieldValue = database.ClassifQualRevert.get( v.lower()) else: # c'est un champ texte sans rien de special FieldValue = v # if FieldTable in ('image', 'sample'): if FieldTable in ('image', ): pass # les champs images sont ignorés lors d'un update elif FieldTable in Objs: if FieldName in db.metadata.tables[ RealTableName[FieldTable]].columns: setattr(Objs[FieldTable], FieldName, FieldValue) # logging.info("setattr %s %s %s",FieldTable,FieldName,FieldValue) # else: # logging.info("skip F %s %s %s",FieldTable,FieldName,FieldValue) else: logging.info("skip T %s %s %s", FieldTable, FieldName, FieldValue) # Calcul de la position du soleil if (hasattr(Objs["obj_head"], 'objdate') and hasattr(Objs["obj_head"], 'objtime') and hasattr(Objs["obj_head"], 'longitude') and hasattr(Objs["obj_head"], 'latitude')): if not (AstralCache['date']==Objs["obj_head"].objdate and AstralCache['time']==Objs["obj_head"].objtime \ and AstralCache['long']==Objs["obj_head"].longitude and AstralCache['lat']==Objs["obj_head"].latitude) : AstralCache = { 'date': Objs["obj_head"].objdate, 'time': Objs["obj_head"].objtime, 'long': Objs["obj_head"].longitude, 'lat': Objs["obj_head"].latitude, 'r': '' } from astral import AstralError try: AstralCache['r'] = appli.CalcAstralDayTime( AstralCache['date'], AstralCache['time'], AstralCache['lat'], AstralCache['long']) except AstralError as e: # dans certains endoit du globe il n'y a jamais de changement nuit/jour certains jours, ca provoque une erreur app.logger.error( "Astral error : %s for %s", e, AstralCache) except Exception as e: # autre erreurs par exemple si l'heure n'est pas valide; app.logger.error( "Astral error : %s for %s", e, AstralCache) Objs["obj_head"].sunpos = AstralCache['r'] # Affectation des ID Sample, Acq & Process et creation de ces dernier si necessaire for t in Ids: if hasattr(Objs[t], 'orig_id') and Objs[ t].orig_id is not None: # si des colonnes sont definis sur les tables auxilaires if Objs[t].orig_id in Ids[t][ "ID"]: # si on référence un auxiliaire existant if t == 'sample': # on ramene l'enregistrement ObjAux = database.Samples.query.filter_by( sampleid=Ids[t]["ID"][ Objs[t].orig_id]).first() elif t == 'acq': ObjAux = database.Acquisitions.query.filter_by( acquisid=Ids[t]["ID"][ Objs[t].orig_id]).first() elif t == 'process': ObjAux = database.Process.query.filter_by( processid=Ids[t]["ID"][ Objs[t].orig_id]).first() else: ObjAux = None if ObjAux is not None: # si on a bien trouvé l'enregistrement for attr, value in Objs[t].__dict__.items( ): # Pour les champs présents dans l'objet temporaire (dans le TSV donc) if hasattr( ObjAux, attr ) and getattr( ObjAux, attr ) != value: # si la valeur change setattr( ObjAux, attr, value ) # On met à jour l'enregistrement (qui sera commité avec l'objet plus bas) setattr(Objs["obj_head"], Ids[t]["pk"], Ids[t]["ID"][Objs[t].orig_id]) else: # on copie les données de l'objet dans le nouvel enregistrement for attr, value in Objs[t].__dict__.items( ): if hasattr(ObjsNew[t], attr): setattr(ObjsNew[t], attr, value) Objs[t] = ObjsNew[t] Objs[t].projid = self.param.ProjectId db.session.add(Objs[t]) db.session.commit() Ids[t]["ID"][Objs[t].orig_id] = getattr( Objs[t], Ids[t]["pk"]) setattr(Objs["obj_head"], Ids[t]["pk"], Ids[t]["ID"][Objs[t].orig_id]) logging.info("IDS %s %s", t, Ids[t]) # Recherche de l'objet si c'est une images complementaire if Objs["obj_field"].orig_id in self.ExistingObject: # Traitement des champs ObjField = database.ObjectsFields.query.filter_by( objfid=self.ExistingObject[ Objs["obj_field"].orig_id]).first() for attr, value in Objs[ "obj_field"].__dict__.items(): if hasattr(ObjField, attr): setattr(ObjField, attr, value) #traitement du header ObjHead = database.Objects.query.filter_by( objid=self.ExistingObject[ Objs["obj_field"].orig_id]).first() for attr, value in Objs["obj_head"].__dict__.items( ): if hasattr(ObjHead, attr): setattr(ObjHead, attr, value) db.session.commit() else: # ou Creation de l'objet logging.warning( "Object '%s' not found, can't be updated, row skipped", Objs["obj_field"].orig_id) continue # Objs["obj_head"].projid=self.param.ProjectId # Objs["obj_head"].random_value=random.randint(1,99999999) # Objs["obj_head"].img0id=Objs["image"].imgid # db.session.add(Objs["obj_head"]) # db.session.commit() # Objs["obj_field"].objfid=Objs["obj_head"].objid # db.session.add(Objs["obj_field"]) # db.session.commit() # self.ExistingObject[Objs["obj_field"].orig_id]=Objs["obj_head"].objid # Provoque un select object sauf si 'expire_on_commit':False if (TotalRowCount % 100) == 0: self.UpdateProgress( 100 * TotalRowCount / self.param.TotalRowCount, "Processing files %d/%d" % (TotalRowCount, self.param.TotalRowCount)) logging.info("File %s : %d row Processed", relname.as_posix(), RowCount) db.session.commit() self.pgcur.execute("""update obj_head o set imgcount=(select count(*) from images where objid=o.objid) ,img0id=(select imgid from images where objid=o.objid order by imgrank asc limit 1 ) where projid=""" + str(self.param.ProjectId)) self.pgcur.connection.commit() self.pgcur.execute( """update samples s set latitude=sll.latitude,longitude=sll.longitude from (select o.sampleid,min(o.latitude) latitude,min(o.longitude) longitude from obj_head o where projid=%(projid)s and o.latitude is not null and o.longitude is not null group by o.sampleid) sll where s.sampleid=sll.sampleid and projid=%(projid)s """, {'projid': self.param.ProjectId}) self.pgcur.connection.commit() appli.project.main.RecalcProjectTaxoStat(Prj.projid) appli.project.main.UpdateProjectStat(Prj.projid) self.task.taskstate = "Done" self.UpdateProgress(100, "Processing done")
def PrjMerge(PrjId): Prj = database.Projects.query.filter_by(projid=PrjId).first() if Prj is None: flash("Project doesn't exists", 'error') return PrintInCharte("<a href=/prj/>Select another project</a>") if not Prj.CheckRight(2): # Level 0 = Read, 1 = Annotate, 2 = Admin flash('You cannot edit settings for this project', 'error') return PrintInCharte("<a href=/prj/>Select another project</a>") g.headcenter = "<h4><a href='/prj/{0}'>{1}</a></h4>".format( Prj.projid, XSSEscape(Prj.title)) txt = "<h3>Project Merge / Fusion </h3>" if not gvg('src'): txt += """<ul><li>You are allowed to merge projects that you are allowed to manage <li>User privileges from both projects will be added <li>This tool allow to merge two projects in a single projet (called Current project). The added project will then be automatically deleted. If object data are not consistent between both projects : <ul><li>New data fields are added to the Current project <li>The resulting project will thus contain partially documented datafields. </ul><li>Note : Next screen will indicate compatibility issues (if exists) and allow you to Confirm the merging operation. </ul> """ sql = "select p.projid,title,status,coalesce(objcount,0) objcount,coalesce(pctvalidated,0) pctvalidated,coalesce(pctclassified,0) pctclassified from projects p" if not current_user.has_role(database.AdministratorLabel): sql += " Join projectspriv pp on p.projid = pp.projid and pp.member=%d" % ( current_user.id, ) sql += " where p.projid!=%d order by title" % Prj.projid res = GetAll(sql, doXSSEscape=True) #,debug=True txt += """<table class='table table-bordered table-hover table-verycondensed'> <tr><th width=120>ID</td><th>Title</td><th width=100>Status</th><th width=100>Nbr Obj</th> <th width=100>% Validated</th><th width=100>% Classified</th></tr>""" for r in res: txt += """<tr><td><a class="btn btn-primary" href='/prj/merge/{activeproject}?src={projid}'>Select</a> {projid}</td> <td>{title}</td> <td>{status}</td> <td>{objcount:0.0f}</td> <td>{pctvalidated:0.2f}</td> <td>{pctclassified:0.2f}</td> </tr>""".format(activeproject=Prj.projid, **r) txt += "</table>" return PrintInCharte(txt) PrjSrc = database.Projects.query.filter_by(projid=int(gvg('src'))).first() if PrjSrc is None: flash("Source project doesn't exists", 'error') return PrintInCharte("<a href=/prj/>Select another project</a>") if not PrjSrc.CheckRight(2): # Level 0 = Read, 1 = Annotate, 2 = Admin flash('You cannot merge for this project', 'error') return PrintInCharte("<a href=/prj/>Select another project</a>") txt += """<h4>Source Project : {0} - {1} (This project will be destroyed)</h4> """.format(PrjSrc.projid, XSSEscape(PrjSrc.title)) if not gvg('merge'): # Ici la src à été choisie et vérifiée if PrjSrc.mappingobj != Prj.mappingobj: flash("Object mapping differ With source project ", "warning") if PrjSrc.mappingsample != Prj.mappingsample: flash("Sample mapping differ With source project ", "warning") if PrjSrc.mappingacq != Prj.mappingacq: flash("Acquisition mapping differ With source project ", "warning") if PrjSrc.mappingprocess != Prj.mappingprocess: flash("Process mapping differ With source project ", "warning") txt += FormatError( """ <span class='glyphicon glyphicon-warning-sign'></span> Warning project {1} - {2}<br> Will be destroyed, its content will be transfered in the target project.<br> This operation is irreversible</p> <br><a class='btn btn-lg btn-warning' href='/prj/merge/{0}?src={1}&merge=Y'>Start Project Fusion</a> """, Prj.projid, PrjSrc.projid, XSSEscape(PrjSrc.title), DoNotEscape=True) return PrintInCharte(txt) if gvg('merge') == 'Y': ExecSQL("update acquisitions set projid={0} where projid={1}".format( Prj.projid, PrjSrc.projid)) ExecSQL("update process set projid={0} where projid={1}".format( Prj.projid, PrjSrc.projid)) ExecSQL("update samples set projid={0} where projid={1}".format( Prj.projid, PrjSrc.projid)) ExecSQL("update obj_head set projid={0} where projid={1}".format( Prj.projid, PrjSrc.projid)) ExecSQL("update part_projects set projid={0} where projid={1}".format( Prj.projid, PrjSrc.projid)) # garde le privilege le plus elevé des 2 projets ExecSQL("""UPDATE projectspriv ppdst set privilege=case when 'Manage' in (ppsrc.privilege,ppdst.privilege) then 'Manage' when 'Annotate' in (ppsrc.privilege,ppdst.privilege) then 'Annotate' else 'View' end from projectspriv ppsrc where ppsrc.projid={1} and ppdst.projid={0} and ppsrc.member=ppdst.member""" .format(Prj.projid, PrjSrc.projid), debug=True) # Transfere les privilege depuis le projet source ExecSQL("""update projectspriv set projid={0} where projid={1} and member not in (select member from projectspriv where projid={0})""" .format(Prj.projid, PrjSrc.projid)) # Efface ceux qui etait des 2 cotés ExecSQL("delete from projectspriv where projid={0}".format( PrjSrc.projid)) ExecSQL("delete from projects where projid={0}".format(PrjSrc.projid)) appli.project.main.RecalcProjectTaxoStat(Prj.projid) appli.project.main.UpdateProjectStat(Prj.projid) txt += "<div class='alert alert-success' role='alert'>Fusion Done successfully</div>" txt += "<br><a class='btn btn-lg btn-primary' href='/prj/%s'>Back to target project</a>" % Prj.projid return PrintInCharte(txt)
def doimporttext(): # test avec D:\temp\Downloads\taxoexport_20181228_101007.tsv txt="" uploadfile = request.files.get("uploadfile") if uploadfile is None: return PrintInCharte(FormatError("You must send a file")) Desctxt="{0} on {1:%Y-%m-%d %H:%M:%S} ".format(uploadfile.filename,datetime.datetime.now()) app.logger.info('Load file {} by {}'.format(uploadfile.filename,current_user)) #creation d'un fichier temporaire qui s'efface automatiquement tmpfile =tempfile.TemporaryFile(mode='w+b') # app.logger.info('TMP file is {}'.format(tmpfile.name)) uploadfile.save(tmpfile) # on copie le contenu dedants tmpfile.seek(0) # on se remet au debut fichier= TextIOWrapper(tmpfile, encoding = 'latin_1', errors = 'replace') # conversion du format binaire au format texte app.logger.info("Analyzing file %s" % (fichier)) # lecture en mode dictionnaire basé sur la premiere ligne rdr = csv.reader(fichier, delimiter='\t', quotechar='"', ) # lecture la la ligne des titre LType = rdr.__next__() # Lecture du contenu du fichier RowCount = 0 ExecSQL("truncate table temp_taxo") sqlinsert = "INSERT INTO temp_taxo(idparent,idtaxo,name,status,typetaxo) values(%s,%s,%s,%s,%s)" for lig in rdr: if lig[0].strip() == '': # Ligne vide continue database.ExecSQL(sqlinsert, ( lig[0].strip(), lig[1].strip(), lig[2].replace('+', ' ').replace('_', ' ').strip(), lig[3].strip(), lig[4].strip())) if RowCount > 0 and RowCount % 1000 == 0: app.logger.info("Inserted %s lines" % RowCount) RowCount += 1 app.logger.info("count=%d" % RowCount) # app.logger.info(str(r)) # if len(UpdatedTaxon)>0: # ComputeDisplayName(UpdatedTaxon) txt+="<p style='color: green'> %s taxon loaded </p> "%(RowCount) # MAJ des IDFinal dans la table temp pour tout ce qui existe. n = ExecSQL("""UPDATE temp_taxo tt set idfinal=tf.id from taxonomy tf where tf.id_source=tt.idtaxo or (lower(tf.name)=lower(tt.name) and tf.id_source is null)""") app.logger.info("%d Nodes already exists " % n) TSVal="to_timestamp('{}','YYYY-MM-DD HH24:MI:SS')".format(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')) TSUpdate="lastupdate_datetime="+TSVal # insertion des nouveaux noeud racines n = ExecSQL("""INSERT INTO taxonomy (id, parent_id, name, id_source,lastupdate_datetime,source_desc) select nextval('seq_taxonomy'),NULL,t.name,t.idtaxo,{} ,%s from temp_taxo t where idparent='-1' and idfinal is null and status='1'""".format(TSVal),["Created by "+Desctxt]) app.logger.info("Inserted %d Root Nodes" % n) # MAJ de la table import existante n = ExecSQL("""UPDATE temp_taxo tt set idfinal=tf.id from taxonomy tf where tf.id_source=tt.idtaxo and tt.idfinal is null and idparent='-1'""") app.logger.info("Updated %d inserted Root Nodes" % n) while True: # insertion des nouveaux noeud enfants à partir des parents deja insérés # n=ExecSQL("""INSERT INTO taxonomy (id, parent_id, name, id_source) # select nextval('seq_taxonomy'),ttp.idfinal,tt.name,tt.idtaxo from temp_taxo tt join temp_taxo ttp on tt.idparent=ttp.idtaxo # where tt.idfinal is null and ttp.idfinal is not null and status='1'""") n = ExecSQL("""INSERT INTO taxonomy (id, parent_id, name, id_source,taxotype,lastupdate_datetime,source_desc) select nextval('seq_taxonomy'),ttp.id,tt.name,tt.idtaxo,case when lower(tt.typetaxo)='taxa' then 'P' else 'M' end,{},%s from temp_taxo tt join taxonomy ttp on tt.idparent=ttp.id_source where tt.idfinal is null and status='1'""".format(TSVal),["Created by "+Desctxt]) if n == 0: app.logger.info("No more data to import") break else: app.logger.info("Inserted %d Child Nodes" % n) # MAJ de la table import existante n = ExecSQL("""UPDATE temp_taxo tt set idfinal=tf.id from taxonomy tf where tf.id_source=tt.idtaxo and tt.idfinal is null """) app.logger.info("Updated %d inserted Child Nodes" % n) n = ExecSQL("""UPDATE taxonomy tf set name=tt.name,{},taxotype=case when lower(tt.typetaxo)='taxa' then 'P' else 'M' end ,source_desc=%s from temp_taxo tt where tf.id_source=tt.idtaxo and tt.status='1' and (tf.name!=tt.name or tf.taxotype!=case when lower(tt.typetaxo)='taxa' then 'P' else 'M' end ) """.format(TSUpdate),["Updated by "+Desctxt]) app.logger.info("Updated %d Nodes names" % n) n = ExecSQL("""UPDATE taxonomy tfu set parent_id=sq.idfinal,{},source_desc=%s from (select tf.id, ttp.idfinal from taxonomy tf ,temp_taxo tt LEFT JOIN temp_taxo ttp on tt.idparent=ttp.idtaxo where tf.id_source=tt.idtaxo and tt.status='1' and coalesce(tf.parent_id,-1)!=coalesce(ttp.idfinal,-1) and (ttp.idfinal is not null or tt.idparent='-1' )) sq where tfu.id=sq.id""".format(TSUpdate),["Updated by "+Desctxt]) app.logger.info("Updated %d Nodes Parents" % n) # while True: # n = ExecSQL("""delete from taxonomy t # using temp_taxo tt # where t.id=tt.idfinal and tt.status='0' # and not exists (select 1 from taxonomy where parent_id=t.id ) # and not exists (select 1 from objects where classif_id=t.id or classif_auto_id=t.id)""") # if n == 0: # app.logger.info("No more data to delete") # break # else: # app.logger.info("Deleted %d Nodes" % n) # Lst = GetAll("""select t.name from taxonomy t,temp_taxo tt # where t.id=tt.idfinal and tt.status='0' # and (exists (select 1 from taxonomy where parent_id=t.id ) # or exists (select 1 from objects where classif_id=t.id or classif_auto_id=t.id))""") # for r in Lst: # app.logger.info("Can't Delete '%s' because it's used " % r[0]) txt+="<br><a href='/browsetaxo/' class='btn btn-primary'><i class='fas fa-arrow-left'></i> Back to taxonomy</a>" LstId=[x['idfinal'] for x in GetAll("select idfinal from temp_taxo where idfinal is not null")] ComputeDisplayName(LstId) app.logger.info("Updated Display name" ) # if len(Errors): # txt += "<p style='color: red'> %s errors <ul> " % (len(Errors)) # txt += "\n".join("<li>%s</li>"%x for x in Errors) # txt += "</ul></p> " g.bodydivmargin="10px" return PrintInCharte(txt)