def files(request): # Ist der User Angemeldet? if not request.user.is_authenticated(): return httpOutput(json.dumps({'error': 'login'}), 'application/json') if not request.user.has_perm('DB.dateien'): return httpOutput(json.dumps({'error': 'no file permission'}), 'application/json') from DB.funktionenDateien import view_dateien return view_dateien(request, True)
def transcriptCreate(request): if not request.user.is_authenticated(): return httpOutput(json.dumps({'error': 'login'}), 'application/json') try: nId = -1 sData = json.loads(request.body.decode('utf-8')) aV_id_einzelerhebung = sData[ 'id_einzelerhebung'] if 'id_einzelerhebung' in sData else 0 aV_name = sData['name'] aV_default_tier = sData['default_tier'] if aV_id_einzelerhebung: aErhebung = dbmodels.EinzelErhebung.objects.get( pk=aV_id_einzelerhebung) if aErhebung: try: aElement = adbmodels.transcript.objects.get( pk=int(sData['pk']) if 'pk' in sData else 0) except adbmodels.transcript.DoesNotExist: aElement = adbmodels.transcript() aElement.name = aV_name aElement.default_tier = aV_default_tier aElement.save() nId = aElement.pk aErhebung.id_transcript = aElement aErhebung.save() if 'aTiers' in sData: for aTierPk, aTierData in sData['aTiers'].items(): if int(aTierPk) < 1: aTier = adbmodels.tbl_tier() else: aTier = adbmodels.tbl_tier.objects.get( pk=int(aTierPk)) aTier.transcript_id_id = nId aTier.tier_name = aTierData['tier_name'] aTier.save() if int(aTierPk) < 1: sData['aTiers'][aTierPk]['newPk'] = aTier.pk else: return httpOutput( json.dumps({ 'error': 'Erhebung mit ID "' + str(aV_id_einzelerhebung) + '" nicht gefunden!' }), 'application/json') else: return httpOutput( json.dumps({'error': '"id_einzelerhebung" fehlt!'}), 'application/json') except Exception as e: return httpOutput(json.dumps({'error': str(type(e)) + ' - ' + str(e)}), 'application/json') return httpOutput(json.dumps({ 'transcript_id': str(nId), 'error': None }), 'application/json')
def auth(request): if not request.user.is_authenticated(): return httpOutput(json.dumps({'error': 'not authenticated'}), 'application/json') return httpOutput( json.dumps({ 'ok': True, 'user': { 'id': request.user.id, 'name': request.user.username } }), 'application/json')
def resetidseq(request, app_name, tabelle_name): # Ist der User Angemeldet? if not request.user.is_authenticated(): return redirect('dissdb_login') # Gibt es die Tabelle? try: amodel = apps.get_model(app_name, tabelle_name) except LookupError: return HttpResponseNotFound('<h1>Tabelle "' + tabelle_name + '" nicht gefunden!</h1>') # Reset id sequence try: cursor = connection.cursor() cursor.execute("SELECT setval('\"" + amodel._meta.db_table + "_id_seq\"', (SELECT MAX(id) FROM \"" + amodel._meta.db_table + "\")+1, FALSE)") success = json.dumps({ 'success': 'success', 'db_table': str(amodel._meta.db_table), }) except Exception as e: success = json.dumps({ 'error': str(type(e)) + ' - ' + str(e), 'db_table': str(amodel._meta.db_table), }) return httpOutput(success, mimetype='application/json')
def annoSaveTokenSet(aTokensIds, aTokenSetId, adbmodels): """Token Set Speichern. (annoSent und annoCheck).""" if len(aTokensIds) < 1: return httpOutput(json.dumps({'error': 'Keine Tokens übergeben!'}, 'application/json')) with connection.cursor() as cursor: cursor.execute(''' ( SELECT "token"."id", "token"."token_reihung" FROM "token" WHERE ("token"."id" IN %s) ORDER BY "token"."token_reihung" ASC LIMIT 1 ) UNION ALL ( SELECT "token"."id", "token"."token_reihung" FROM "token" WHERE ("token"."id" IN %s) ORDER BY "token"."token_reihung" DESC LIMIT 1 ) ''', [tuple(aTokensIds), tuple(aTokensIds)]) vTokenId, vTokenReihung = cursor.fetchone() bTokenId, bTokenReihung = cursor.fetchone() vTokenObj = adbmodels.token.objects.get(pk=vTokenId) vbTokenCount = adbmodels.token.objects.filter(ID_Inf_id=vTokenObj.ID_Inf_id, transcript_id_id=vTokenObj.transcript_id_id, token_reihung__gte=vTokenReihung, token_reihung__lte=bTokenReihung).order_by('token_reihung').count() # print(vTokenObj, vTokenId, bTokenId, len(aTokensIds), vbTokenCount) try: aTokenSet = adbmodels.tbl_tokenset.objects.get(id=aTokenSetId) except adbmodels.tbl_tokenset.DoesNotExist: aTokenSet = adbmodels.tbl_tokenset() aTokenSet.save() if len(aTokensIds) == vbTokenCount: # Ist ein Token Set Bereich adbmodels.tbl_tokentoset.objects.filter(id_tokenset=aTokenSet).delete() aTokenSet.id_von_token = vTokenObj aTokenSet.id_bis_token_id = bTokenId else: # Ist eine Token Set Liste aTokenSet.id_von_token_id = None aTokenSet.id_bis_token_id = None adbmodels.tbl_tokentoset.objects.filter(id_tokenset=aTokenSet).exclude(id_token__in=aTokensIds).delete() for aTokenId in aTokensIds: obj, created = adbmodels.tbl_tokentoset.objects.update_or_create(id_tokenset_id=aTokenSet.id, id_token_id=aTokenId, defaults={'id_tokenset_id': aTokenSet.id, 'id_token_id': aTokenId}) aTokenSet.save() return httpOutput(json.dumps({'OK': True, 'tokenset_id': aTokenSet.id}, 'application/json'))
def search(request): info = '' # Ist der User Angemeldet? if not request.user.is_authenticated(): return redirect('dissdb_login') # Nach OpenStreetMap Orten in der tbl_orte suchen ... if 'sucheorte' in request.POST: suchorte = json.loads(request.POST.get('suchorte')) ortModel = apps.get_model('PersonenDB', 'tbl_orte') for suchort in suchorte: print(suchort['osm_id'] + ' - ' + suchort['osm_type']) try: ortObjekt = ortModel.objects.filter( osm_id=suchort['osm_id'], osm_type=suchort['osm_type']).order_by('pk').first() suchort['ort_pk'] = ortObjekt.pk except: pass return httpOutput('OK' + json.dumps(suchorte)) # Nach Ort in der tbl_orte suchen und als Json ausgeben if 'getort' in request.POST: ortData = {} ortModel = apps.get_model('PersonenDB', 'tbl_orte') try: ortObjekt = ortModel.objects.get(pk=request.POST.get('getort')) ortData['pk'] = ortObjekt.pk ortData['ort_namelang'] = ortObjekt.ort_namelang ortData['lat'] = ortObjekt.lat ortData['lon'] = ortObjekt.lon ortData['osm_id'] = ortObjekt.osm_id ortData['osm_type'] = ortObjekt.osm_type except: pass return httpOutput('OK' + json.dumps(ortData)) return httpOutput('Error: Keine kompatible Suche!')
def transcripts(request): if not request.user.is_authenticated(): return httpOutput(json.dumps({'error': 'login'}), 'application/json') aTranscripts = [] try: for aTranscript in adbmodels.transcript.objects.all(): aTranscripts.append({ 'pk': aTranscript.pk, 'n': aTranscript.name, 'ut': aTranscript.update_time.strftime("%d.%m.%Y- %H:%M"), 'default_tier': aTranscript.default_tier }) except Exception as e: return httpOutput(json.dumps({'error': str(type(e)) + ' - ' + str(e)}), 'application/json') return httpOutput(json.dumps({ 'transcripts': aTranscripts, 'error': None }), 'application/json')
def getTokenSetsSatz(aTokenSetsIds, adbmodels): """getTokenSetsSatz. (annoSent und annoCheck).""" aTokenSetSatz = {} for aTokenSetId in aTokenSetsIds: aTokenSet = adbmodels.tbl_tokenset.objects.get(pk=aTokenSetId) if aTokenSet.id_von_token and aTokenSet.id_bis_token: startToken = aTokenSet.id_von_token endToken = aTokenSet.id_bis_token else: startToken = adbmodels.tbl_tokentoset.objects.filter(id_tokenset=aTokenSet).order_by('id_token__token_reihung')[0].id_token endToken = adbmodels.tbl_tokentoset.objects.filter(id_tokenset=aTokenSet).order_by('-id_token__token_reihung')[0].id_token with connection.cursor() as cursor: cursor.execute(''' SELECT array_to_json(array_agg(row_to_json(atok))) FROM ( ( SELECT "token".*, 0 AS tb FROM "token" WHERE ("token"."ID_Inf_id" = %s AND "token"."transcript_id_id" = %s AND "token"."token_reihung" < %s) ORDER BY "token"."token_reihung" DESC LIMIT 10 ) UNION ALL ( SELECT "token".*, 1 AS tb FROM "token" WHERE ("token"."ID_Inf_id" = %s AND "token"."transcript_id_id" = %s AND "token"."token_reihung" >= %s AND "token"."token_reihung" <= %s) ORDER BY "token"."token_reihung" ASC ) UNION ALL ( SELECT "token".*, 2 AS tb FROM "token" WHERE ("token"."ID_Inf_id" = %s AND "token"."transcript_id_id" = %s AND "token"."token_reihung" > %s) ORDER BY "token"."token_reihung" ASC LIMIT 10 ) ) AS atok ''', [ startToken.ID_Inf_id, startToken.transcript_id_id, startToken.token_reihung, startToken.ID_Inf_id, startToken.transcript_id_id, startToken.token_reihung, endToken.token_reihung, startToken.ID_Inf_id, startToken.transcript_id_id, endToken.token_reihung ]) aTokenSetSatz[aTokenSetId] = cursor.fetchone()[0] return httpOutput(json.dumps({'OK': True, 'aTokenSetSatz': aTokenSetSatz}, 'application/json'))
def getDuration(request, app_name, tabelle_name): # Ist der User Angemeldet? if not request.user.is_authenticated(): return redirect('dissdb_login') # Gibt es die Tabelle? try: amodel = apps.get_model(app_name, tabelle_name) except LookupError: return HttpResponseNotFound('<h1>Tabelle "' + tabelle_name + '" nicht gefunden!</h1>') try: success = json.dumps({ 'success': 'success', 'db_table': str(amodel._meta.db_table), 'refreshCache': amodel.getDuration(), }) except Exception as e: success = json.dumps({ 'error': str(type(e)) + ' - ' + str(e), 'db_table': str(amodel._meta.db_table), }) return httpOutput(success, mimetype='application/json')
def getTokenSatz(aTokenId, adbmodels): """getTokenSatz. (annoSent und annoCheck).""" aToken = adbmodels.token.objects.get(pk=aTokenId) with connection.cursor() as cursor: cursor.execute(''' SELECT array_to_json(array_agg(row_to_json(atok))) FROM ( ( SELECT "token".* FROM "token" WHERE ("token"."ID_Inf_id" = %s AND "token"."transcript_id_id" = %s AND "token"."token_reihung" < %s) ORDER BY "token"."token_reihung" DESC LIMIT 10 ) UNION ALL ( SELECT "token".* FROM "token" WHERE ("token"."ID_Inf_id" = %s AND "token"."transcript_id_id" = %s AND "token"."token_reihung" >= %s) ORDER BY "token"."token_reihung" ASC LIMIT 11 ) ) AS atok ''', [aToken.ID_Inf_id, aToken.transcript_id_id, aToken.token_reihung, aToken.ID_Inf_id, aToken.transcript_id_id, aToken.token_reihung]) aTokenSatz = cursor.fetchone()[0] return httpOutput(json.dumps({'OK': True, 'aTokenSatz': aTokenSatz}, 'application/json'))
def view_diagramm(request): """Standard Anzeige für Model Diagramme.""" from .models import sys_diagramm_tabellenpositionen info = '' error = '' # Modelposition speichern if 'speichere' in request.POST: if request.POST.get('speichere') == 'positionen': positionen = json.loads(request.POST.get('positionen')) for position in positionen: if request.user.has_perm(position['app'] + '.edit'): try: amodel = sys_diagramm_tabellenpositionen.objects.get( zu_app=position['app'], zu_model=position['model']) except: amodel = sys_diagramm_tabellenpositionen() amodel.zu_app = position['app'] amodel.zu_model = position['model'] amodel.xt = position['xt'] amodel.yt = position['yt'] amodel.save() return httpOutput('OK') # Models auslesen tabellen = [] applist = settings.DIOEDB_APPLIST for aapp in applist: if request.user.has_perm(aapp + '.edit'): for model in apps.get_app_config(aapp).models.items(): if str(model[0])[:4] != 'sys_': amodel = apps.get_model(aapp, model[0]) aFields = [] xt = 0 yt = 0 try: asdtp = sys_diagramm_tabellenpositionen.objects.get( zu_app=aapp, zu_model=str(model[0])) xt = asdtp.xt yt = asdtp.yt except: pass for f in amodel._meta.get_fields(): if not f.auto_created or amodel._meta.pk.name == f.name: aField = { 'field_name': f.name, 'verbose_name': f._verbose_name, 'internal_type': f.get_internal_type(), 'unique': f.unique, 'blank': f.blank, 'null': f.null, } if amodel._meta.pk.name == f.name: aField['pk'] = True if f.is_relation: aField[ 'related_db_table'] = f.related_model._meta.db_table aFields.append(aField) tabellen.append({ 'model': model[0], 'app': aapp, 'verbose_name': amodel._meta.verbose_name, 'verbose_name_plural': amodel._meta.verbose_name_plural, 'count': amodel.objects.count(), 'db_table': amodel._meta.db_table, 'get_fields': aFields, 'xt': xt, 'yt': yt, }) tabellen = json.dumps(tabellen) # Ausgabe der Seite return render_to_response( 'DB/diagramm.html', RequestContext(request, { 'tabellen': tabellen, 'error': error, 'info': info }), )
def views_annocheck(request): """Anno-Check Tool Ansicht/Daten.""" # Token Set löschen if 'delTokenSet' in request.POST: from .funktionenAnno import annoDelTokenSet return annoDelTokenSet(int(request.POST.get('tokenSetId')), adbmodels) # Token Set Speichern if 'saveTokenSet' in request.POST: from .funktionenAnno import annoSaveTokenSet return annoSaveTokenSet(json.loads(request.POST.get('tokens')), int(request.POST.get('tokenSetId')), adbmodels) # Antworten mit Tags speichern/ändern/löschen if 'saveAntworten' in request.POST: from .funktionenAnno import annoSaveAntworten annoSaveAntworten(json.loads(request.POST.get('antworten')), adbmodels, dbmodels) # getTokenSetsSatz if 'getTokenSetsSatz' in request.POST: from .funktionenAnno import getTokenSetsSatz return getTokenSetsSatz(request.POST.getlist('tokenSetsIds[]'), adbmodels) # getTokenSatz if 'getTokenSatz' in request.POST: from .funktionenAnno import getTokenSatz return getTokenSatz(request.POST.get('tokenId'), adbmodels) # Basisdaten für Filter laden if 'getBaseData' in request.POST: return httpOutput(json.dumps({'OK': True}, 'application/json')) # Filter Daten ausgeben if 'getFilterData' in request.POST: aFilter = json.loads(request.POST.get('filter')) aAntwortenElement = dbmodels.Antworten.objects.all() aShowCount = True if request.POST.get('showCount') == "true" else False showCountTrans = True if aShowCount and request.POST.get( 'showCountTrans') == "true" else False # Tag Ebenen ermitteln aAntwortenElementF = filternSuchen(aAntwortenElement, 0, aFilter['tag'], aFilter['nichttag'], int(aFilter['trans']), int(aFilter['inf']), int(aFilter['aufgabenset']), int(aFilter['aufgabe']), aFilter['antwortenids']) nTagEbenen = {} aTagEbenen = [{ 'pk': 0, 'title': 'Alle', 'count': aAntwortenElementF.distinct().count() if aShowCount else -1 }] for aTE in dbmodels.TagEbene.objects.all(): nTagEbenen[aTE.pk] = str(aTE) aTagEbenen.append({ 'pk': aTE.pk, 'title': str(aTE), 'count': aAntwortenElementF.filter( antwortentags__id_TagEbene_id=aTE.pk).distinct().count() if aShowCount else -1 }) # Informanten ermitteln aAntwortenElementF = filternSuchen(aAntwortenElement, int(aFilter['ebene']), aFilter['tag'], aFilter['nichttag'], int(aFilter['trans']), 0, int(aFilter['aufgabenset']), int(aFilter['aufgabe']), aFilter['antwortenids']) aInformanten = [{ 'pk': 0, 'kuerzelAnonym': 'Alle', 'count': aAntwortenElementF.distinct().count() if aShowCount else -1 }] for aInf in dbmodels.Informanten.objects.all(): aInformanten.append({ 'pk': aInf.pk, 'kuerzelAnonym': aInf.Kuerzel_anonym, 'count': aAntwortenElementF.filter( von_Inf_id=aInf.pk).distinct().count() if aShowCount else -1 }) # Transkripte ermitteln aAntwortenElementF = filternSuchen(aAntwortenElement, int(aFilter['ebene']), aFilter['tag'], aFilter['nichttag'], 0, int(aFilter['inf']), int(aFilter['aufgabenset']), int(aFilter['aufgabe']), aFilter['antwortenids']) aTranskripte = [{ 'pk': 0, 'name': 'Alle', 'count': aAntwortenElementF.distinct().count() if aShowCount else -1 }] aTranskripte.append({ 'pk': -1, 'name': 'Keine Transkripte', 'count': aAntwortenElementF.filter(ist_token=None, ist_tokenset=None).distinct().count() if aShowCount else -1 }) aTranskripte.append({ 'pk': -2, 'name': 'Nur Transkripte', 'count': aAntwortenElementF.filter( Q(ist_token__gt=0) | Q(ist_tokenset__gt=0)).distinct().count() if aShowCount else -1 }) for aTrans in adbmodels.transcript.objects.all(): aTranskripte.append({ 'pk': aTrans.pk, 'name': aTrans.name, 'count': aAntwortenElementF.filter( Q(ist_token__gt=0) | Q(ist_tokenset__gt=0), Q(ist_token__transcript_id_id=aTrans.pk) | Q(ist_tokenset__id_von_token__transcript_id_id=aTrans.pk) | Q(ist_tokenset__tbl_tokentoset__id_token__transcript_id_id= aTrans.pk)).distinct().count() if aShowCount and showCountTrans else -1 }) # Aufgabensets ermitteln aAntwortenElementF = filternSuchen(aAntwortenElement, int(aFilter['ebene']), aFilter['tag'], aFilter['nichttag'], int(aFilter['trans']), int(aFilter['inf']), 0, 0, aFilter['antwortenids']) aAufgabensets = [{ 'pk': 0, 'name': 'Alle', 'count': aAntwortenElementF.distinct().count() if aShowCount else -1 }] for aAufgabenset in dbmodels.Aufgabensets.objects.all(): aAufgabensets.append({ 'pk': aAufgabenset.pk, 'name': str(aAufgabenset), 'count': aAntwortenElementF.filter( zu_Aufgabe__von_ASet_id=aAufgabenset.pk).distinct().count( ) if aShowCount else -1 }) # Aufgaben ermitteln aAntwortenElementF = filternSuchen(aAntwortenElement, int(aFilter['ebene']), aFilter['tag'], aFilter['nichttag'], int(aFilter['trans']), int(aFilter['inf']), int(aFilter['aufgabenset']), 0, aFilter['antwortenids']) aAufgaben = [{ 'pk': 0, 'name': 'Alle', 'count': aAntwortenElementF.distinct().count() if aShowCount else -1 }] if int(aFilter['aufgabenset']) > 0: for aAufgabe in dbmodels.Aufgaben.objects.filter( von_ASet_id=int(aFilter['aufgabenset'])): aAufgaben.append({ 'pk': aAufgabe.pk, 'name': str(aAufgabe), 'count': aAntwortenElementF.filter( zu_Aufgabe_id=aAufgabe.pk).distinct().count() if aShowCount else -1 }) return httpOutput( json.dumps({ 'OK': True, 'tagEbenen': aTagEbenen, 'informanten': aInformanten, 'transcripts': aTranskripte, 'aufgabensets': aAufgabensets, 'aufgaben': aAufgaben }), 'application/json') # Einträge auslesen if 'getEntries' in request.POST: aSeite = int( request.POST.get('seite')) if request.POST.get('seite') else 0 aEps = int(request.POST.get('eps')) if request.POST.get('eps') else 0 aFilter = json.loads(request.POST.get('filter')) # aSuche = json.loads(request.POST.get('suche')) if request.POST.get('suche') else [] # Tagnamen cachen nTags = {x.pk: x.Tag for x in dbmodels.Tags.objects.all()} aSortierung = json.loads(request.POST.get( 'sortierung')) if request.POST.get('sortierung') else [] aElemente = dbmodels.Antworten.objects.distinct().all() # Suchen / Filtern aElemente = filternSuchen(aElemente, int(aFilter['ebene']), aFilter['tag'], aFilter['nichttag'], int(aFilter['trans']), int(aFilter['inf']), int(aFilter['aufgabenset']), int(aFilter['aufgabe']), aFilter['antwortenids']) # Sortieren aElemente = aElemente.order_by( ('-' if not aSortierung['asc'] else '') + aSortierung['spalte']) # Einträge laden aEintraege = [] for aEintrag in aElemente[aSeite * aEps:aSeite * aEps + aEps]: # Satz/Tokens ermitteln [ aTokens, aTokensText, aTokensOrtho, aAntwortType, transName, aTransId, aSaetze, aOrtho, prev_text, vSatz, next_text, nSatz, o_f_token_reihung, r_f_token_reihung, o_l_token_reihung, r_l_token_reihung, o_l_token_type, transcript_id, informanten_id ] = getAntwortenSatzUndTokens(aEintrag, adbmodels) # Tagebenen und Tags ermitteln # tetstart = time.time() aAntTags = [] for xval in dbmodels.AntwortenTags.objects.filter( id_Antwort=aEintrag.pk).values('id_TagEbene').annotate( total=Count('id_TagEbene')).order_by('id_TagEbene'): aEbene = dbmodels.TagEbene.objects.get(id=xval['id_TagEbene']) aAntTags.append({ 'eId': aEbene.id, 'e': str(aEbene), 't': ', '.join([ nTags[x['id_Tag_id']] for x in dbmodels.AntwortenTags.objects.filter( id_Antwort=aEintrag.pk, id_TagEbene=xval['id_TagEbene']).values( 'id_Tag_id').order_by('Reihung') ]) }) # print('Tag Ebene mit Tags', time.time() - tetstart) # 0.00 Sek aEintraege.append({ 'id': aEintrag.id, 'antType': aAntwortType, 'Reihung': aEintrag.Reihung, 'Transkript': transName, 'tId': aTransId, 'zu_Aufgabe_id': aEintrag.zu_Aufgabe_id, 'aufBe': aEintrag.zu_Aufgabe.Beschreibung_Aufgabe if aEintrag.zu_Aufgabe_id else None, 'aufVar': aEintrag.zu_Aufgabe.Variante if aEintrag.zu_Aufgabe_id else None, 'aInf': aEintrag.von_Inf.Kuerzel_anonym, 'von_Inf_id': aEintrag.von_Inf_id, 'aTokensText': ' '.join(str(x) for x in aTokensText), 'aTokens': ', '.join(str(x) for x in aTokens), 'aOrtho': aOrtho, 'aSaetze': aSaetze, 'vSatz': vSatz, 'nSatz': nSatz, 'Tagebenen': aAntTags, 'ist_token_id': aEintrag.ist_token_id, 'ist_tokenset_id': aEintrag.ist_tokenset_id, 'antwortentags_raw': [{ "id": aAT.id, "id_Antwort_id": aAT.id_Antwort_id, "id_Tag_id": aAT.id_Tag_id, "id_TagEbene_id": aAT.id_TagEbene_id, "primaer": aAT.primaer, "Reihung": aAT.Reihung } for aAT in dbmodels.AntwortenTags.objects.filter( id_Antwort_id=aEintrag.id).order_by( 'id_TagEbene_id', 'Reihung')] }) # Einträge ausgeben return httpOutput( json.dumps({ 'OK': True, 'seite': aSeite, 'eps': aEps, 'eintraege': aEintraege, 'zaehler': aElemente.count() }), 'application/json') return render_to_response('AnnotationsDB/annocheck.html', RequestContext(request))
def annoDelTokenSet(aTokenSetId, adbmodels): """Token Set löschen. (annoSent und annoCheck).""" aTokenSet = adbmodels.tbl_tokenset.objects.get(id=aTokenSetId) adbmodels.tbl_tokentoset.objects.filter(id_tokenset=aTokenSet).delete() aTokenSet.delete() return httpOutput(json.dumps({'OK': True}, 'application/json'))
def annoSaveAntworten(sAntworten, adbmodels, dbmodels): """Antworten mit Tags speichern/ändern/löschen. (annoSent und annoCheck).""" for sAntwort in sAntworten: print(json.dumps(sAntwort)) if 'deleteIt' in sAntwort: if sAntwort['id'] > 0: aElement = dbmodels.Antworten.objects.get(id=sAntwort['id']) aElement.delete() else: if sAntwort['id'] > 0: aElement = dbmodels.Antworten.objects.get(id=sAntwort['id']) else: aElement = dbmodels.Antworten() setattr(aElement, 'start_Antwort', datetime.timedelta(microseconds=0)) setattr(aElement, 'stop_Antwort', datetime.timedelta(microseconds=0)) setattr(aElement, 'von_Inf_id', (sAntwort['von_Inf_id'] if 'von_Inf_id' in sAntwort else None)) if 'ist_nat' in sAntwort: setattr(aElement, 'ist_nat', sAntwort['ist_nat']) if 'ist_Satz_id' in sAntwort: setattr(aElement, 'ist_Satz_id', sAntwort['ist_Satz_id']) if 'ist_bfl' in sAntwort: setattr(aElement, 'ist_bfl', sAntwort['ist_bfl']) if 'ist_token_id' in sAntwort: setattr(aElement, 'ist_token_id', sAntwort['ist_token_id']) if 'ist_token_id' in sAntwort: setattr(aElement, 'ist_token_id', sAntwort['ist_token_id']) if 'ist_tokenset_id' in sAntwort: setattr(aElement, 'ist_tokenset_id', sAntwort['ist_tokenset_id']) if 'bfl_durch_S' in sAntwort: setattr(aElement, 'bfl_durch_S', sAntwort['bfl_durch_S']) if 'Kommentar' in sAntwort: setattr(aElement, 'Kommentar', sAntwort['Kommentar']) aElement.save() sAntwort['nId'] = aElement.pk # AntwortenTags speichern if 'tags' in sAntwort: pass for eValue in sAntwort['tags']: aEbene = eValue['e'] if aEbene > 0: for antwortenTag in dbmodels.AntwortenTags.objects.filter(id_Antwort=sAntwort['nId'], id_TagEbene=aEbene): delIt = True for tValue in eValue['t']: if int(tValue['i']) == antwortenTag.pk: delIt = False if delIt: antwortenTag.delete() reihung = 0 if aEbene > 0: for tValue in eValue['t']: tagId = int(tValue['i']) if tagId > 0: aElement = dbmodels.AntwortenTags.objects.get(id=tagId) else: aElement = dbmodels.AntwortenTags() setattr(aElement, 'id_Antwort_id', sAntwort['nId']) setattr(aElement, 'id_Tag_id', tValue['t']) setattr(aElement, 'id_TagEbene_id', aEbene) setattr(aElement, 'Reihung', reihung) reihung += 1 aElement.save() else: for tValue in eValue['t']: tagId = int(tValue['i']) if tagId > 0: aElement = dbmodels.AntwortenTags.objects.get(id=tagId) aElement.delete() return httpOutput(json.dumps({'OK': True}, 'application/json'))
def transcript(request, aPk, aNr): if not request.user.is_authenticated(): return httpOutput(json.dumps({'error': 'login'}), 'application/json') tpk = int(aPk) aNr = int(aNr) if tpk > 0: maxQuerys = 250 dataout = {'aPk': aPk, 'aNr': aNr, 'error': None} # Startinformationen laden: (transcript, EinzelErhebung, Informanten, Saetze) if aNr == 0: aTranskriptData = adbmodels.transcript.objects.get(pk=tpk) aTranskript = { 'pk': aTranskriptData.pk, 'ut': aTranskriptData.update_time.strftime("%d.%m.%Y- %H:%M"), 'n': aTranskriptData.name, 'default_tier': aTranskriptData.default_tier } aTiersData = adbmodels.tbl_tier.objects.filter( transcript_id=aTranskriptData) aTiers = { aTier.pk: { "tier_name": aTier.tier_name } for aTier in aTiersData } aEinzelErhebung = {} aEinzelErhebungData = dbmodels.EinzelErhebung.objects.filter( id_transcript_id=tpk) if aEinzelErhebungData: aEinzelErhebungData = aEinzelErhebungData[0] aEinzelErhebung = { 'pk': aEinzelErhebungData.pk, 'trId': aEinzelErhebungData.id_transcript_id, 'd': aEinzelErhebungData.Datum.strftime("%d.%m.%Y- %H:%M"), 'e': aEinzelErhebungData.Explorator, 'k': aEinzelErhebungData.Kommentar, 'dp': aEinzelErhebungData.Dateipfad, 'af': aEinzelErhebungData.Audiofile, 'lf': aEinzelErhebungData.Logfile, 'o': aEinzelErhebungData.Ort, 'b': aEinzelErhebungData.Besonderheiten } aTokenTypes = {} for aTokenType in adbmodels.token_type.objects.filter( token__transcript_id_id=tpk): aTokenTypes[aTokenType.pk] = {'n': aTokenType.token_type_name} aInformanten = {} for aInf in adbmodels.token.objects.filter( transcript_id_id=tpk).values('ID_Inf').annotate( total=Count('ID_Inf')).order_by('ID_Inf'): aInfM = dbmodels.Informanten.objects.get(id=aInf['ID_Inf']) aInformanten[aInfM.pk] = { 'k': aInfM.Kuerzel, 'ka': aInfM.Kuerzel_anonym } aSaetze = {} for aSatz in dbmodels.Saetze.objects.filter( token__transcript_id_id=tpk): aSaetze[aSatz.pk] = { 't': aSatz.Transkript, 's': aSatz.Standardorth, 'k': aSatz.Kommentar } aTmNr = int( adbmodels.event.objects.prefetch_related('rn_token_event_id'). filter(rn_token_event_id__transcript_id_id=tpk).distinct( ).order_by('start_time').count() / maxQuerys) dataout.update({ 'aTranskript': aTranskript, 'aTiers': aTiers, 'aEinzelErhebung': aEinzelErhebung, 'aTokenTypes': aTokenTypes, 'aInformanten': aInformanten, 'aSaetze': aSaetze, 'aTmNr': aTmNr }) # Events laden: aEvents = [] aTokens = {} nNr = aNr startQuery = aNr * maxQuerys endQuery = startQuery + maxQuerys for aEvent in adbmodels.event.objects.prefetch_related( 'rn_token_event_id').filter( rn_token_event_id__transcript_id_id=tpk).distinct( ).order_by('start_time')[startQuery:endQuery]: aEITokens = {} for aEIToken in sorted(list(aEvent.rn_token_event_id.all()), key=operator.attrgetter("token_reihung")): if aEIToken.ID_Inf_id not in aEITokens: aEITokens[aEIToken.ID_Inf_id] = [] aEITokens[aEIToken.ID_Inf_id].append(aEIToken.id) aTokenData = { 't': aEIToken.text, 'tt': aEIToken.token_type_id_id, 'tr': aEIToken.token_reihung, 'e': aEIToken.event_id_id, 'to': aEIToken.text_in_ortho, 'i': aEIToken.ID_Inf_id, } if aEIToken.ortho: aTokenData['o'] = aEIToken.ortho if aEIToken.phon: aTokenData['p'] = aEIToken.phon if aEIToken.sentence_id_id: aTokenData['s'] = aEIToken.sentence_id_id if aEIToken.sequence_in_sentence: aTokenData['sr'] = aEIToken.sequence_in_sentence if aEIToken.fragment_of_id: aTokenData['fo'] = aEIToken.fragment_of_id if aEIToken.likely_error: aTokenData['le'] = 1 aTokens[aEIToken.pk] = aTokenData aEventsTiers = {} for aEventTier in aEvent.tbl_event_tier_set.all(): if aEventTier.ID_Inf_id not in aEventsTiers: aEventsTiers[aEventTier.ID_Inf_id] = {} aEventsTiers[aEventTier.ID_Inf_id][aEventTier.pk] = { 't': aEventTier.text, 'ti': aEventTier.tier_id_id } aEvents.append({ 'pk': aEvent.pk, 's': str(aEvent.start_time), 'e': str(aEvent.end_time), 'l': str(aEvent.layer if aEvent.layer else 0), 'tid': aEITokens, 'event_tiers': aEventsTiers }) if len(aEvents) == maxQuerys: nNr += 1 aTokenIds = [aTokenId for aTokenId in aTokens] maxVars = 500 aTokenSets = {} nTokenSets = [] aTokenIdsTemp = deepcopy(aTokenIds) # Token Sets zu Events laden: while len(aTokenIdsTemp) > 0: nTokenSets += adbmodels.tbl_tokenset.objects.distinct().filter( id_von_token_id__in=aTokenIdsTemp[:maxVars]) nTokenSets += adbmodels.tbl_tokenset.objects.distinct().filter( tbl_tokentoset__id_token__in=aTokenIdsTemp[:maxVars]) aTokenIdsTemp = aTokenIdsTemp[maxVars:] for nTokenSet in nTokenSets: if nTokenSet.pk not in aTokenSets: aTokenSet = {} if nTokenSet.id_von_token: aTokenSet['ivt'] = nTokenSet.id_von_token_id if nTokenSet.id_bis_token: aTokenSet['ibt'] = nTokenSet.id_bis_token_id nTokenToSets = [] for nTokenToSet in nTokenSet.tbl_tokentoset_set.all(): nTokenToSets.append(nTokenToSet.id_token_id) if nTokenToSets: aTokenSet['t'] = nTokenToSets aTokenSets[nTokenSet.pk] = (aTokenSet) # Antworten zu Tokens und Tokensets laden: aTokenSetIds = [aTokenSetId for aTokenSetId in aTokenSets] maxVars = 500 aAntworten = {} nAntworten = [] aTokenIdsTemp = deepcopy(aTokenIds) aTokenSetIdsTemp = deepcopy(aTokenSetIds) while len(aTokenIdsTemp) > 0: nAntworten += dbmodels.Antworten.objects.distinct().filter( ist_token_id__in=aTokenIdsTemp[:maxVars]) aTokenIdsTemp = aTokenIdsTemp[maxVars:] while len(aTokenSetIdsTemp) > 0: nAntworten += dbmodels.Antworten.objects.distinct().filter( ist_tokenset_id__in=aTokenSetIdsTemp[:maxVars]) aTokenSetIdsTemp = aTokenSetIdsTemp[maxVars:] for nAntwort in nAntworten: if nAntwort.pk not in aAntworten: aAntwort = {'vi': nAntwort.von_Inf_id} aAntwort['inat'] = nAntwort.ist_nat if nAntwort.ist_Satz: aAntwort['is'] = nAntwort.ist_Satz_id aAntwort['ibfl'] = nAntwort.ist_bfl if nAntwort.ist_token: aAntwort['it'] = nAntwort.ist_token_id if nAntwort.ist_tokenset: aAntwort['its'] = nAntwort.ist_tokenset_id aAntwort['bds'] = nAntwort.bfl_durch_S if nAntwort.start_Antwort: aAntwort['sa'] = str(nAntwort.start_Antwort) if nAntwort.stop_Antwort: aAntwort['ea'] = str(nAntwort.stop_Antwort) aAntwort['k'] = nAntwort.Kommentar # AntwortenTags laden: nAntTags = [] for xval in dbmodels.AntwortenTags.objects.filter( id_Antwort=nAntwort.pk).values('id_TagEbene').annotate( total=Count('id_TagEbene')).order_by( 'id_TagEbene'): nAntTags.append({ 'e': xval['id_TagEbene'], 't': getTagFamilie( dbmodels.AntwortenTags.objects.filter( id_Antwort=nAntwort.pk, id_TagEbene=xval['id_TagEbene']).order_by( 'Reihung')) }) if nAntTags: aAntwort['pt'] = nAntTags aAntworten[nAntwort.pk] = (aAntwort) dataout.update({ 'nNr': nNr, 'aEvents': aEvents, 'aTokens': aTokens, 'aTokenSets': aTokenSets, 'aAntworten': aAntworten }) return httpOutput(json.dumps(dataout), 'application/json') # return httpOutput(json.dumps({'aPk': aPk, 'aNr': aNr, 'error': None}), 'application/json') return httpOutput(json.dumps({'error': 'Fehlerhafte PK'}), 'application/json')
def transcriptSave(request, aPk): # if not request.user.is_authenticated(): # return httpOutput(json.dumps({'error': 'login'}), 'application/json') tpk = int(aPk) # Testen: $.post( "/routes/transcript/save/1/", '{"aTokens": {"6061": {"e": -3,"i": 2,"s": 15010,"sr": 2,"t": "tich","to": "","tr": 6061,"tt": 1,"fo": 6060,"status": "update"},"-5": {"e": 1479,"i": 2,"s": -1,"sr": -1,"t": ",","to": "","tr": 6069,"tt": 2,"status": "insert"}}}').always(function(x) { console.log(x); }); if tpk > 0: sData = json.loads(request.body.decode('utf-8')) eventPkChanges = {} aEventKey = {} starttime = time.time() sData['sys_timer'] = {} if 'aTiers' in sData: for aTierPk, aTierData in sData['aTiers'].items(): if int(aTierPk) < 1: aTier = adbmodels.tbl_tier() else: aTier = adbmodels.tbl_tier.objects.get(pk=int(aTierPk)) aTier.transcript_id_id = tpk aTier.tier_name = aTierData['tier_name'] aTier.save() if int(aTierPk) < 1: sData['aTiers'][aTierPk]['newPk'] = aTier.pk sData['sys_timer']['aTiers'] = time.time() - starttime starttime = time.time() if 'aEvents' in sData: for key, aEvent in enumerate(sData['aEvents']): try: aEventKey[sData['aEvents'][key]['pk']] = key if aEvent['status'] == 'delete': aElement = adbmodels.event.objects.get( id=sData['aEvents'][key]['pk']) aElement.delete() sData['aEvents'][key]['newStatus'] = 'deleted' # print('event', key, 'deleted') elif aEvent['pk'] < 1: eventUpdateAndInsert(sData, key, aEvent, aEventKey, eventPkChanges) except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() sData['aEvents'][key]['newStatus'] = 'error' sData['aEvents'][key]['error'] = str( exc_tb.tb_lineno) + ' | ' + str( type(e)) + ' - ' + str(e) # print('event', key, 'error', sData['aEvents'][key]['error']) with transaction.atomic(): for key, aEvent in enumerate(sData['aEvents']): try: if aEvent['status'] != 'delete' and aEvent['pk'] > 0: eventUpdateAndInsert(sData, key, aEvent, aEventKey, eventPkChanges) except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() sData['aEvents'][key]['newStatus'] = 'error' sData['aEvents'][key]['error'] = str( exc_tb.tb_lineno) + ' | ' + str( type(e)) + ' - ' + str(e) # print('event', key, 'error', sData['aEvents'][key]['error']) sData['sys_timer']['aEvents'] = time.time() - starttime # print('aEvents', sData['sys_timer']['aEvents'], 'sec.') starttime = time.time() if 'aTokens' in sData: for key, aToken in sData['aTokens'].items(): aId = int(key) try: if aToken['status'] == 'delete': aElement = adbmodels.token.objects.get(id=aId) aElement.delete() sData['aTokens'][key]['newStatus'] = 'deleted' # print('token', key, 'deleted') elif aId < 1: tokenUpdateAndInsert(sData, key, aToken, aEventKey, aId, tpk) except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() sData['aTokens'][key]['newStatus'] = 'error' sData['aTokens'][key]['error'] = str( exc_tb.tb_lineno) + ' | ' + str( type(e)) + ' - ' + str(e) # print('token:', key, 'error:', sData['aTokens'][key]['error'], sData['aTokens'][key]) with transaction.atomic(): for key, aToken in sData['aTokens'].items(): aId = int(key) try: if aToken['status'] != 'delete' and aId > 0: tokenUpdateAndInsert(sData, key, aToken, aEventKey, aId, tpk) except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() sData['aTokens'][key]['newStatus'] = 'error' sData['aTokens'][key]['error'] = str( exc_tb.tb_lineno) + ' | ' + str( type(e)) + ' - ' + str(e) # print('token:', key, 'error:', sData['aTokens'][key]['error'], sData['aTokens'][key]) sData['sys_timer']['aTokens'] = time.time() - starttime # print('aTokens', sData['sys_timer']['aTokens'], 'sec.') starttime = time.time() if 'aEvents' in sData: # Update tid for key, aEvent in enumerate(sData['aEvents']): ePk = aEvent['newPk'] if 'newPk' in aEvent else aEvent['pk'] sData['aEvents'][key]['tid'] = {} for av in adbmodels.token.objects.filter(event_id_id=ePk): if str(av.ID_Inf_id) not in sData['aEvents'][key]['tid']: sData['aEvents'][key]['tid'][str(av.ID_Inf_id)] = [] sData['aEvents'][key]['tid'][str(av.ID_Inf_id)].append( av.pk) sData['sys_timer']['aEventsTid'] = time.time() - starttime # print('aEventsTid', sData['sys_timer']['aEventsTid'], 'sec.') return httpOutput(json.dumps(sData), 'application/json') return httpOutput(json.dumps({'error': 'Fehlerhafte PK'}), 'application/json')
def views_annosent(request): # Token Set löschen if 'delTokenSet' in request.POST: from .funktionenAnno import annoDelTokenSet return annoDelTokenSet(int(request.POST.get('tokenSetId')), adbmodels) # Token Set Speichern if 'saveTokenSet' in request.POST: from .funktionenAnno import annoSaveTokenSet return annoSaveTokenSet(json.loads(request.POST.get('tokens')), int(request.POST.get('tokenSetId')), adbmodels) # Antworten mit Tags speichern/ändern/löschen if 'saveAntworten' in request.POST: from .funktionenAnno import annoSaveAntworten annoSaveAntworten(json.loads(request.POST.get('antworten')), adbmodels, dbmodels) # Materialized View Informationen und Aktuallisierung if 'getMatViewData' in request.POST: if 'refresh' in request.POST and request.POST.get('refresh') == 'true': adbmodels.tbl_refreshlog_mat_adhocsentences.refresh() adavg = datetime.timedelta() adavgdg = 0 for aRl in adbmodels.tbl_refreshlog_mat_adhocsentences.objects.all( ).order_by('-created_at')[:5]: adavg += aRl.duration adavgdg += 1 if adavgdg > 0: adavg = adavg / adavgdg return httpOutput( json.dumps( { 'OK': True, 'mvDurchschnitt': adavg.total_seconds(), 'mvLastUpdate': str(adbmodels.tbl_refreshlog_mat_adhocsentences.objects. all().order_by('-created_at')[0].created_at.strftime( "%d.%m.%Y %H:%M:%S")) }, 'application/json')) # getTokenSetsSatz if 'getTokenSetsSatz' in request.POST: from .funktionenAnno import getTokenSetsSatz return getTokenSetsSatz(request.POST.getlist('tokenSetsIds[]'), adbmodels) # getTokenSatz if 'getTokenSatz' in request.POST: from .funktionenAnno import getTokenSatz return getTokenSatz(request.POST.get('tokenId'), adbmodels) # Basisdaten für Filter laden if 'getBaseData' in request.POST: return httpOutput(json.dumps({'OK': True}, 'application/json')) # Einträge auslesen if 'getEntries' in request.POST or 'getXLS' in request.POST: if 'getXLS' in request.POST: aSeite = 0 aEps = 999999999 else: aSeite = int(request.POST.get('seite')) aEps = int(request.POST.get('eps')) aFilter = json.loads(request.POST.get('filter')) aSuche = json.loads(request.POST.get('suche')) aSortierung = json.loads(request.POST.get('sortierung')) aElemente = adbmodels.mat_adhocsentences.objects.all() # Suchen / Filtern aSucheMuss = [] aSucheKann = [] if int(aFilter['trans']) > 0: aSucheMuss.append(Q(transid=aFilter['trans'])) if int(aFilter['inf']) > 0: aSucheMuss.append(Q(infid=aFilter['inf'])) # [{'value': 'zwei', 'kannmuss': 'kann', 'methode': 'ci', 'name': 'sentorig'}, {'value': '', 'kannmuss': 'kann', 'methode': 'ci', 'name': 'sentorth'}, {'value': '', 'kannmuss': 'kann', 'methode': 'ci', 'name': 'sentttpos'}, {'value': '', 'kannmuss': 'kann', 'methode': 'ci', 'name': 'sentsptag'}] for aSuchFeld in aSuche: if aSuchFeld['value'].strip(): aSuchValue = aSuchFeld['value'].strip() # print(aSuchFeld) if 'fx' in aSuchFeld and aSuchFeld['fx'] and aSuchFeld[ 'name'] == 'adhoc_sentence': aTyp = 'in' aSuchValue = [ int(aSV.strip()) for aSV in aSuchValue.split(',') ] else: if 'regex' in aSuchFeld['methode']: aTyp = aSuchFeld['methode'] aSuchValue = r"{0}".format(aSuchValue) else: aTyp = 'icontains' if aSuchFeld[ 'methode'] == 'ci' else 'contains' print(aSuchFeld['methode'], aTyp) if aSuchFeld['kannmuss'] == 'muss': aSucheMuss.append( Q(**{aSuchFeld['name'] + '__' + aTyp: aSuchValue})) if aSuchFeld['kannmuss'] == 'nicht': aSucheMuss.append(~Q( **{aSuchFeld['name'] + '__' + aTyp: aSuchValue})) if aSuchFeld['kannmuss'] == 'kann': aSucheKann.append( Q(**{aSuchFeld['name'] + '__' + aTyp: aSuchValue})) if aSucheMuss: import operator aSucheMussX = aSucheMuss[0] for aMuss in aSucheMuss[1:]: aSucheMussX = operator.and_(aSucheMussX, aMuss) if aSucheKann: import operator aSucheKannX = aSucheKann[0] for aMuss in aSucheKann[1:]: aSucheKannX = operator.or_(aSucheKannX, aMuss) if aSucheMuss and aSucheKann: aElemente = aElemente.filter(aSucheMussX, aSucheKannX) elif aSucheMuss: aElemente = aElemente.filter(aSucheMussX) elif aSucheKann: aElemente = aElemente.filter(aSucheKannX) # Sortieren aElemente = aElemente.order_by( ('-' if not aSortierung['asc'] else '') + aSortierung['spalte']) # Einträge ausgeben aMatIds = [ aEintrag['id'] for aEintrag in aElemente.values('id')[aSeite * aEps:aSeite * aEps + aEps] ] if 'getXLS' in request.POST: import xlwt response = HttpResponse(content_type='text/ms-excel') response[ 'Content-Disposition'] = 'attachment; filename="as_' + datetime.datetime.now( ).today().strftime('%Y_%m_%d_%H_%M_%S') + '.xls"' aInfs = { aInf.id: aInf.Kuerzel for aInf in dbmodels.Informanten.objects.all() } aTranscripts = { aTranscript.id: aTranscript.name for aTranscript in adbmodels.transcript.objects.all() } aEintraege = [] aQuery = adbmodels.mat_adhocsentences.objects.raw( ''' SELECT "mat_adhocsentences".* FROM "mat_adhocsentences" WHERE "mat_adhocsentences"."id" IN %s ORDER BY "mat_adhocsentences"."adhoc_sentence" ASC ''', [tuple(aMatIds)]) for aEintrag in aQuery: aEintraege.append({ 'adhoc_sentence': aEintrag.adhoc_sentence, 'tokenids': ', '.join(str(v) for v in aEintrag.tokenids) if aEintrag.tokenids else aEintrag.tokenids, 'inf': aInfs[aEintrag.infid], 'infid': aEintrag.infid, 'trans': aTranscripts[aEintrag.transid], 'transid': aEintrag.transid, 'tokreih': ', '.join(str(v) for v in aEintrag.tokreih) if aEintrag.tokreih else aEintrag.tokreih, 'seqsent': ', '.join(str(v) for v in aEintrag.seqsent) if aEintrag.seqsent else aEintrag.seqsent, 'sentorig': aEintrag.sentorig, 'sentorth': aEintrag.sentorth, 'left_context': aEintrag.left_context, 'senttext': aEintrag.senttext, 'right_context': aEintrag.right_context, 'sentttlemma': aEintrag.sentttlemma, 'sentttpos': aEintrag.sentttpos, 'sentsplemma': aEintrag.sentsplemma, 'sentsppos': aEintrag.sentsppos, 'sentsptag': aEintrag.sentsptag, 'sentspdep': aEintrag.sentspdep, 'sentspenttype': aEintrag.sentspenttype }) aColTitel = [ 'adhoc_sentence', 'inf', 'trans', 'sentorig', 'sentorth', 'left_context', 'senttext', 'right_context', 'sentttlemma', 'sentttpos', 'sentsplemma', 'sentsppos', 'sentsptag', 'sentspdep', 'sentspenttype', 'tokreih', 'seqsent', 'infid', 'transid', 'tokenids' ] wb = xlwt.Workbook(encoding='utf-8') ws = wb.add_sheet('Anno-sent') row_num = 0 columns = [(ct, 2000) for ct in aColTitel] font_style = xlwt.XFStyle() font_style.font.bold = True for col_num in range(len(columns)): ws.write(row_num, col_num, columns[col_num][0], font_style) font_style = xlwt.XFStyle() for aEintrag in aEintraege: row_num += 1 for cti in range(len(aColTitel)): ws.write(row_num, cti, aEintrag[aColTitel[cti]], font_style) wb.save(response) return response if not aMatIds: aEintraege = [] else: aEintraege = [{ 'adhoc_sentence': aEintrag.adhoc_sentence, 'tokenids': aEintrag.tokenids, 'tokens': aEintrag.tokens, 'infid': aEintrag.infid, 'transid': aEintrag.transid, 'tokreih': aEintrag.tokreih, 'seqsent': aEintrag.seqsent, 'sentorig': aEintrag.sentorig, 'sentorth': aEintrag.sentorth, 'left_context': aEintrag.left_context, 'senttext': aEintrag.senttext, 'right_context': aEintrag.right_context, 'sentttlemma': aEintrag.sentttlemma, 'sentttpos': aEintrag.sentttpos, 'sentsplemma': aEintrag.sentsplemma, 'sentsppos': aEintrag.sentsppos, 'sentsptag': aEintrag.sentsptag, 'sentspdep': aEintrag.sentspdep, 'sentspenttype': aEintrag.sentspenttype } for aEintrag in adbmodels.mat_adhocsentences.objects.raw( ''' SELECT "mat_adhocsentences".*, ( SELECT array_to_json(array_agg(row_to_json(atok))) FROM ( SELECT "token".*, ( SELECT array_to_json(array_agg(row_to_json(aantwort))) FROM ( SELECT "Antworten".*, ( SELECT array_to_json(array_agg(row_to_json(aAntwortenTags))) FROM ( SELECT "AntwortenTags".* FROM "AntwortenTags" WHERE "AntwortenTags"."id_Antwort_id" = "Antworten"."id" ORDER BY "AntwortenTags"."id_TagEbene_id" ASC, "AntwortenTags"."Reihung" ASC ) AS aAntwortenTags ) AS AntwortenTags_raw FROM "Antworten" WHERE "Antworten"."ist_token_id" = "token"."id" ) AS aantwort ) AS antworten, ( SELECT array_to_json(array_agg(row_to_json(atokenset))) FROM ( SELECT "tokenset".*, ( SELECT array_to_json(array_agg(row_to_json(aantwort))) FROM ( SELECT "Antworten".*, ( SELECT array_to_json(array_agg(row_to_json(aAntwortenTags))) FROM ( SELECT "AntwortenTags".* FROM "AntwortenTags" WHERE "AntwortenTags"."id_Antwort_id" = "Antworten"."id" ORDER BY "AntwortenTags"."id_TagEbene_id" ASC, "AntwortenTags"."Reihung" ASC ) AS aAntwortenTags ) AS AntwortenTags_raw FROM "Antworten" WHERE "Antworten"."ist_tokenset_id" = "tokenset"."id" ) AS aantwort ) AS antworten, ( SELECT array_to_json(array_agg(row_to_json(atokentoset_cache))) FROM ( SELECT "tokentoset"."id_token_id" FROM "tokentoset" WHERE "tokentoset"."id_tokenset_id" = "tokenset"."id" ) AS atokentoset_cache ) AS tokentoset FROM "tokenset" LEFT OUTER JOIN "tokentoset" ON ( "tokenset"."id" = "tokentoset"."id_tokenset_id" ) WHERE "tokentoset"."id_token_id" = "token"."id" UNION ALL SELECT "tokenset".*, ( SELECT array_to_json(array_agg(row_to_json(aantwort))) FROM ( SELECT "Antworten".*, ( SELECT array_to_json(array_agg(row_to_json(aAntwortenTags))) FROM ( SELECT "AntwortenTags".* FROM "AntwortenTags" WHERE "AntwortenTags"."id_Antwort_id" = "Antworten"."id" ORDER BY "AntwortenTags"."id_TagEbene_id" ASC, "AntwortenTags"."Reihung" ASC ) AS aAntwortenTags ) AS AntwortenTags_raw FROM "Antworten" WHERE "Antworten"."ist_tokenset_id" = "tokenset"."id" ) AS aantwort ) AS antworten, ( SELECT array_to_json(array_agg(row_to_json(atokentoset_cache))) FROM ( SELECT "tokentoset_cache"."id_token_id" FROM "tokentoset_cache" WHERE "tokentoset_cache"."id_tokenset_id" = "tokenset"."id" ) AS atokentoset_cache ) AS tokentoset FROM "tokenset" LEFT OUTER JOIN "tokentoset_cache" ON ( "tokenset"."id" = "tokentoset_cache"."id_tokenset_id" ) WHERE "tokentoset_cache"."id_token_id" = "token"."id" ) AS atokenset ) AS tokensets FROM "token" WHERE "token"."id" = ANY("mat_adhocsentences"."tokenids") ORDER BY "token"."token_reihung" ASC ) atok ) AS "tokens" FROM "mat_adhocsentences" WHERE "mat_adhocsentences"."id" IN %s ORDER BY "mat_adhocsentences"."adhoc_sentence" ASC ''', [tuple(aMatIds)])] # print(connection.queries) return httpOutput( json.dumps({ 'OK': True, 'seite': aSeite, 'eps': aEps, 'eintraege': aEintraege, 'zaehler': aElemente.count() }), 'application/json') return render_to_response('AnnotationsDB/annosent.html', RequestContext(request))
def tagsystemvue(request): # Ist der User Angemeldet? if not request.user.is_authenticated(): return redirect('dissdb_login') import Datenbank.models as dbmodels output = {} if 'getBase' in request.POST: tagebenen = [] for TagEbene in dbmodels.TagEbene.objects.all(): tagebenen.append({'pk': TagEbene.pk, 't': TagEbene.Name}) output['tagebenen'] = tagebenen phaenomene = {} for phaenomen in dbmodels.Phaenomene.objects.all(): phaenomene[phaenomen.pk] = { 'b': phaenomen.Bez_Phaenomen, 'bs': phaenomen.Beschr_Phaenomen, 'zpb': phaenomen.zu_PhaenBer, 'k': phaenomen.Kommentar } output['phaenomene'] = phaenomene if 'getTags' in request.POST: tags = {} tagsReihung = [] for tag in dbmodels.Tags.objects.prefetch_related( 'tagebenezutag_set', 'id_ParentTag', 'id_ChildTag').all(): tagsReihung.append(tag.pk) tags[tag.pk] = { 't': tag.Tag, 'tl': tag.Tag_lang, 'k': tag.Kommentar, 'r': tag.AReihung, 'g': tag.Generation, } if tag.zu_Phaenomen: tags[tag.pk]['zppk'] = tag.zu_Phaenomen_id try: tmpTezt = [] for tezt in tag.tagebenezutag_set.all(): tmpTezt.append(tezt.id_TagEbene_id) if tmpTezt: tags[tag.pk]['tezt'] = tmpTezt except: pass try: tmpChilds = [] for aCTags in tag.id_ParentTag.all().order_by( 'id_ParentTag__AReihung'): tmpChilds.append(aCTags.id_ChildTag_id) if tmpChilds: tags[tag.pk]['c'] = tmpChilds except: pass try: tmpParents = [] for aCTags in tag.id_ChildTag.all().order_by( 'id_ChildTag__AReihung'): tmpParents.append(aCTags.id_ParentTag_id) if tmpParents: tags[tag.pk]['p'] = tmpParents except: pass output['tags'] = {'tags': tags, 'tagsReihung': tagsReihung} if 'getPresets' in request.POST: import bearbeiten.models as bmodels aPresetTags = [] # for val in bmodels.PresetTags.objects.filter(Q(presettagszuaufgabe__id_Aufgabe__pk=apk) | Q(presettagszuaufgabe=None)): for val in bmodels.PresetTags.objects.prefetch_related( 'id_Tags').all(): tfVal = getTagFamiliePT(val.id_Tags.all()) if tfVal: aPresetTags.append({'tf': tfVal}) output['presets'] = aPresetTags return httpOutput(json.dumps(output), mimetype='application/json')
def views_tagauswertung(request): if not request.user.is_authenticated(): return redirect('dissdb_login') with connection.cursor() as cursor: if 'get' in request.GET: if request.GET.get('get') == 'data': cursor.execute(''' SELECT anttags."id_Tag_id" as tag_id, anttags."id_TagEbene_id" as tagebene_id, COUNT("id_Tag_id") as count, ( SELECT json_agg( json_strip_nulls(json_build_object( 'id', subanttags.id, 'aid', subanttags."id_Antwort_id", 'r', subanttags."Reihung", 't', ant."ist_token_id", 'ts', ant."ist_tokenset_id", 's', ant."ist_Satz_id", 'tr', COALESCE (tok."transcript_id_id", tokts."transcript_id_id", toktst."transcript_id_id"), 'tc', ( json_build_object( 't', CASE WHEN tok.id is NOT NULL THEN 1 ELSE 0 END, 'wt', CASE WHEN tok.token_type_id_id in (1,4,8) THEN 1 ELSE 0 END ) ), 'tsc', ( SELECT json_build_object( 't', COUNT(xtokts.*), 'wt', COUNT(xtokts.*) FILTER(WHERE xtokts.fragment_of_id is NULL AND xtokts.token_type_id_id in (1,4,8)) ) FROM public."tokentoset_cache" as xtoktosetc LEFT JOIN public."token" as xtokts ON xtokts.id = xtoktosetc."id_token_id" WHERE xtoktosetc."id_tokenset_id" = tokset.id ), 'tstc', ( SELECT json_build_object( 't', COUNT(xtoktst.*), 'wt', COUNT(xtoktst.*) FILTER(WHERE xtoktst.fragment_of_id is NULL AND xtoktst.token_type_id_id in (1,4,8)) ) FROM public."tokentoset" as xtoktoset LEFT JOIN public."token" as xtoktst ON xtoktst.id = xtoktoset."id_token_id" WHERE xtoktoset."id_tokenset_id" = tokset.id ) )) ) FROM public."AntwortenTags" as subanttags LEFT JOIN public."Antworten" as ant ON ant.id = subanttags."id_Antwort_id" LEFT JOIN public."token" as tok ON tok.id = ant."ist_token_id" LEFT JOIN public."tokenset" as tokset ON tokset.id = ant."ist_tokenset_id" LEFT JOIN public."token" as tokts ON tokts.id = tokset."id_von_token_id" LEFT JOIN public."tokentoset" as toktoset ON toktoset."id_tokenset_id" = tokset.id LEFT JOIN public."token" as toktst ON toktst.id = toktoset."id_token_id" WHERE subanttags."id_Tag_id" = anttags."id_Tag_id" AND subanttags."id_TagEbene_id" = anttags."id_TagEbene_id" ) as antworten FROM public."AntwortenTags" as anttags GROUP BY "id_Tag_id", "id_TagEbene_id" ORDER BY "count" DESC ''') allTags = [{'id': x[0], 'eId': x[1], 'count': x[2], 'data': x[3]} for x in cursor.fetchall()] return httpOutput(json.dumps({'tagList': allTags}), 'application/json') if request.GET.get('get') == 'tagKontext': rql = request.GET.getlist('l[]') # print(rql, ','.join(rql), [tuple(rql)]) cursor.execute(''' SELECT ( SELECT JSON_BUILD_OBJECT( 'trId', tat.transcript_id_id, 'trTxt', ( SELECT tr.name FROM PUBLIC."transcript" AS tr WHERE tr.id = tat.transcript_id_id ) ) FROM PUBLIC."token" AS tat WHERE tat.id = tokendata.tokens[1] ) AS transkript, ( SELECT JSON_AGG(ROW_TO_JSON(antw.*)) FROM ( SELECT antwt.id, ( SELECT JSON_AGG(json_build_object('r', atag."Reihung", 'tId', atag."id_Tag_id", 'eId', atag."id_TagEbene_id")) FROM PUBLIC."AntwortenTags" AS atag WHERE atag."id_Antwort_id" = antwt.id ) AS tags FROM PUBLIC."Antworten" AS antwt WHERE antwt.ist_token_id = ANY(tokendata.tokens) OR antwt.ist_tokenset_id = ANY( SELECT tsi."id_tokenset_id" FROM ( SELECT tts2."id_tokenset_id" AS "id_tokenset_id" FROM PUBLIC."tokentoset" AS tts2 WHERE tts2."id_token_id" = ANY(tokendata.tokens) UNION ALL SELECT ttsc2."id_tokenset_id" AS "id_tokenset_id" FROM PUBLIC."tokentoset_cache" AS ttsc2 WHERE ttsc2."id_token_id" = ANY(tokendata.tokens) ) AS tsi ) ) AS antw ) AS antworten, tokendata.* FROM ( SELECT CASE WHEN ant."ist_token_id" > 0 THEN ARRAY[ant."ist_token_id"] ELSE ( SELECT ARRAY_AGG(tokids.tid) FROM ( SELECT tts."id_token_id" AS tid FROM PUBLIC."tokentoset" AS tts WHERE tts."id_tokenset_id" = ant."ist_tokenset_id" UNION ALL SELECT ttsc."id_token_id" as tid FROM PUBLIC."tokentoset_cache" AS ttsc WHERE ttsc."id_tokenset_id" = ant."ist_tokenset_id" ) AS tokids ) END AS tokens, row_to_json(at.*) AS data FROM ( SELECT "id_Antwort_id", "id_TagEbene_id", ( SELECT array_agg( "id_Tag_id" ORDER BY "Reihung" ) as d FROM public."AntwortenTags" as sat WHERE sat."id_Antwort_id" = at."id_Antwort_id" ) as t FROM public."AntwortenTags" as at WHERE "id_Tag_id" IN (''' + ','.join(rql) + ''') GROUP BY "id_Antwort_id", "id_TagEbene_id" ) as at LEFT JOIN public."Antworten" as ant ON id = at."id_Antwort_id" WHERE (ant."ist_token_id" > 0 OR ant."ist_tokenset_id" > 0) ''' + (''' AND array_to_string(at.t, ',', '*') LIKE '%''' + ','.join(rql) + '''%' ''' if request.GET.get('s') == 'true' else ''' AND at.t @> ARRAY[''' + ', '.join(rql) + ''']''') + ''' ) AS tokendata ''') antwortenListe = [{'transkript': x[0], 'antworten': x[1], 'tokens': x[2], 'data': x[3]} for x in cursor.fetchall()] return httpOutput(json.dumps({'antwortenListe': antwortenListe}), 'application/json') return render_to_response('AnnotationsDB/tagauswertungstart.html', RequestContext(request))
def views_annotool(request, ipk=0, tpk=0): ipk = int(ipk) tpk = int(tpk) # Speichern: if 'speichern' in request.POST: sData = json.loads(request.POST.get('speichern')) sData['errors'] = [] print(sData) # deletedTokenSets löschen: if 'deletedTokenSets' in sData and sData['deletedTokenSets']: for key in sData['deletedTokenSets']: aId = key if aId > 0: try: adbmodels.tbl_tokenset.objects.get(id=aId).delete() except Exception as e: sData['errors'].append({'type': 'deletedTokenSets', 'id': aId, 'error': str(type(e)) + ' - ' + str(e)}) # changedTokens speichern: if 'changedTokens' in sData and sData['changedTokens']: for key, value in sData['changedTokens'].items(): try: aId = int(key) aElement = adbmodels.token.objects.get(id=aId) setattr(aElement, 'text', (value['t'] if 't' in value else None)) setattr(aElement, 'token_type_id_id', (value['tt'] if 'tt' in value else None)) setattr(aElement, 'token_reihung', (value['tr'] if 'tr' in value else None)) setattr(aElement, 'event_id_id', (value['e'] if 'e' in value else None)) setattr(aElement, 'text_in_ortho', (value['to'] if 'to' in value else None)) setattr(aElement, 'ID_Inf_id', (value['i'] if 'i' in value else None)) setattr(aElement, 'ortho', (value['o'] if 'o' in value else None)) setattr(aElement, 'sentence_id_id', (value['s'] if 's' in value else None)) setattr(aElement, 'sequence_in_sentence', (value['sr'] if 'sr' in value else None)) setattr(aElement, 'fragment_of_id', (value['fo'] if 'fo' in value else None)) setattr(aElement, 'likely_error', (value['le'] if 'le' in value else False)) aElement.save() value['saved'] = True except Exception as e: sData['errors'].append({'type': 'changedTokens', 'id': aId, 'error': str(type(e)) + ' - ' + str(e)}) # changedTokenSets speichern: if 'changedTokenSets' in sData and sData['changedTokenSets']: for key, value in sData['changedTokenSets'].items(): error = False try: aId = int(key) if aId > 0: aElement = adbmodels.tbl_tokenset.objects.get(id=aId) else: aElement = adbmodels.tbl_tokenset() setattr(aElement, 'id_von_token_id', (value['ivt'] if 'ivt' in value else None)) setattr(aElement, 'id_bis_token_id', (value['ibt'] if 'ibt' in value else None)) aElement.save() except Exception as e: error = True sData['errors'].append({'type': 'changedTokenSets', 'id': aId, 'error': str(type(e)) + ' - ' + str(e)}) if not error: value['nId'] = aElement.pk if 't' in value: for aTokenId in value['t']: try: try: aTokenToSet = adbmodels.tbl_tokentoset.objects.get(id_tokenset=value['nId'], id_token=aTokenId) except adbmodels.tbl_tokentoset.DoesNotExist: aTokenToSet = adbmodels.tbl_tokentoset() setattr(aTokenToSet, 'id_tokenset_id', value['nId']) setattr(aTokenToSet, 'id_token_id', aTokenId) aTokenToSet.save() except Exception as e: error = True sData['errors'].append({'type': 'changedTokenSets', 'id': aId, 'error': str(type(e)) + ' - ' + str(e)}) # tbl_tokentoset löschen wenn nicht mehr vorhanden: aTokenToSets = adbmodels.tbl_tokentoset.objects.filter(id_tokenset=value['nId']) for aTokenToSet in aTokenToSets: if aTokenToSet.id_token_id not in value['t']: aTokenToSet.delete() if not error: value['saved'] = True else: aElement.delete() # deletedAntworten löschen: if 'deletedAntworten' in sData and sData['deletedAntworten']: for key in sData['deletedAntworten']: aId = key if aId > 0: try: dbmodels.Antworten.objects.get(id=aId).delete() except Exception as e: sData['errors'].append({'type': 'deletedAntworten', 'id': aId, 'error': str(type(e)) + ' - ' + str(e)}) # changedAntworten speichern: if 'changedAntworten' in sData and sData['changedAntworten']: for key, value in sData['changedAntworten'].items(): error = False aId = int(key) try: if aId > 0: aElement = dbmodels.Antworten.objects.get(id=aId) else: aElement = dbmodels.Antworten() setattr(aElement, 'von_Inf_id', (value['vi'] if 'vi' in value else None)) if 'inat' in value: setattr(aElement, 'ist_nat', value['inat']) if 'is' in value: setattr(aElement, 'ist_Satz_id', value['is']) if 'ibfl' in value: setattr(aElement, 'ist_bfl', value['ibfl']) if 'it' in value: setattr(aElement, 'ist_token_id', value['it']) if 'its' in value: if ('changedTokenSets' in sData and str(value['its']) in sData['changedTokenSets'] and 'nId' in sData['changedTokenSets'][str(value['its'])]): setattr(aElement, 'ist_tokenset_id', sData['changedTokenSets'][str(value['its'])]['nId']) sData['changedAntworten'][key]['its'] = sData['changedTokenSets'][str(value['its'])]['nId'] else: setattr(aElement, 'ist_tokenset_id', value['its']) if 'bds' in value: setattr(aElement, 'bfl_durch_S', value['bds']) setattr(aElement, 'start_Antwort', datetime.timedelta(microseconds=int(value['sa'] if 'sa' in value else 0))) setattr(aElement, 'stop_Antwort', datetime.timedelta(microseconds=int(value['ea'] if 'ea' in value else 0))) if 'k' in value: setattr(aElement, 'Kommentar', value['k']) aElement.save() value['nId'] = aElement.pk value['saved'] = True except Exception as e: error = True sData['errors'].append({'type': 'changedAntworten', 'id': aId, 'error': str(type(e)) + ' - ' + str(e)}) # AntwortenTags speichern if not error and 'tags' in value: try: if value['tags']: for eValue in value['tags']: aEbene = eValue['e'] if aEbene > 0: for antwortenTag in dbmodels.AntwortenTags.objects.filter(id_Antwort=value['nId'], id_TagEbene=aEbene): delIt = True for tValue in eValue['t']: if int(tValue['i']) == antwortenTag.pk: delIt = False if delIt: antwortenTag.delete() reihung = 0 if aEbene > 0: for tValue in eValue['t']: tagId = int(tValue['i']) if tagId > 0: aElement = dbmodels.AntwortenTags.objects.get(id=tagId) else: aElement = dbmodels.AntwortenTags() setattr(aElement, 'id_Antwort_id', value['nId']) setattr(aElement, 'id_Tag_id', tValue['t']) setattr(aElement, 'id_TagEbene_id', aEbene) setattr(aElement, 'Reihung', reihung) reihung += 1 aElement.save() else: for tValue in eValue['t']: tagId = int(tValue['i']) if tagId > 0: aElement = dbmodels.AntwortenTags.objects.get(id=tagId) aElement.delete() except Exception as e: error = True sData['errors'].append({'type': 'changedAntwortenTags', 'id': aId, 'error': str(type(e)) + ' - ' + str(e)}) # Aktuelle AntwortenTags laden nAntTags = [] for xval in dbmodels.AntwortenTags.objects.filter(id_Antwort=value['nId']).values('id_TagEbene').annotate(total=Count('id_TagEbene')).order_by('id_TagEbene'): nAntTags.append({'e': xval['id_TagEbene'], 't': getTagFamilie(dbmodels.AntwortenTags.objects.filter(id_Antwort=value['nId'], id_TagEbene=xval['id_TagEbene']).order_by('Reihung'))}) del sData['changedAntworten'][key]['tags'] sData['changedAntworten'][key]['pt'] = nAntTags return httpOutput(json.dumps({'OK': True, 'gespeichert': sData}), 'application/json') # Transkript: if 'getTranskript' in request.POST: tpk = int(request.POST.get('getTranskript')) if tpk > 0: maxQuerys = 250 dataout = {} # Startinformationen laden: (transcript, EinzelErhebung, Informanten, Saetze) if 'aType' in request.POST and request.POST.get('aType') == 'start': aTranskriptData = adbmodels.transcript.objects.get(pk=tpk) aTranskript = {'pk': aTranskriptData.pk, 'ut': aTranskriptData.update_time.strftime("%d.%m.%Y- %H:%M"), 'n': aTranskriptData.name} aEinzelErhebung = {} aEinzelErhebungData = dbmodels.EinzelErhebung.objects.filter(id_transcript_id=tpk) if aEinzelErhebungData: aEinzelErhebungData = aEinzelErhebungData[0] aEinzelErhebung = { 'pk': aEinzelErhebungData.pk, 'trId': aEinzelErhebungData.id_transcript_id, 'd': aEinzelErhebungData.Datum.strftime("%d.%m.%Y- %H:%M"), 'e': aEinzelErhebungData.Explorator, 'k': aEinzelErhebungData.Kommentar, 'dp': aEinzelErhebungData.Dateipfad, 'af': aEinzelErhebungData.Audiofile, 'lf': aEinzelErhebungData.Logfile, 'o': aEinzelErhebungData.Ort, 'b': aEinzelErhebungData.Besonderheiten} aTokenTypes = {} for aTokenType in adbmodels.token_type.objects.filter(token__transcript_id_id=tpk): aTokenTypes[aTokenType.pk] = {'n': aTokenType.token_type_name} aInformanten = {} for aInf in adbmodels.token.objects.filter(transcript_id_id=tpk).values('ID_Inf').annotate(total=Count('ID_Inf')).order_by('ID_Inf'): aInfM = dbmodels.Informanten.objects.get(id=aInf['ID_Inf']) aInformanten[aInfM.pk] = {'k': aInfM.Kuerzel, 'ka': aInfM.Kuerzel_anonym} aSaetze = {} for aSatz in dbmodels.Saetze.objects.filter(token__transcript_id_id=tpk): aSaetze[aSatz.pk] = {'t': aSatz.Transkript, 's': aSatz.Standardorth, 'k': aSatz.Kommentar} aTmNr = int(adbmodels.event.objects.prefetch_related('rn_token_event_id').filter(rn_token_event_id__transcript_id_id=tpk).distinct().order_by('start_time').count() / maxQuerys) dataout.update({'aTranskript': aTranskript, 'aEinzelErhebung': aEinzelErhebung, 'aTokenTypes': aTokenTypes, 'aInformanten': aInformanten, 'aSaetze': aSaetze, 'aTmNr': aTmNr}) # Events laden: aNr = 0 aEvents = [] aTokens = {} if 'aNr' in request.POST: aNr = int(request.POST.get('aNr')) nNr = aNr startQuery = aNr * maxQuerys endQuery = startQuery + maxQuerys for aEvent in adbmodels.event.objects.prefetch_related('rn_token_event_id').filter(rn_token_event_id__transcript_id_id=tpk).distinct().order_by('start_time')[startQuery:endQuery]: aEITokens = {} for aEIToken in sorted(list(aEvent.rn_token_event_id.all()), key=operator.attrgetter("token_reihung")): if aEIToken.ID_Inf_id not in aEITokens: aEITokens[aEIToken.ID_Inf_id] = [] aEITokens[aEIToken.ID_Inf_id].append(aEIToken.id) aTokenData = { 't': aEIToken.text, 'tt': aEIToken.token_type_id_id, 'tr': aEIToken.token_reihung, 'e': aEIToken.event_id_id, 'to': aEIToken.text_in_ortho, 'i': aEIToken.ID_Inf_id, } if aEIToken.ortho: aTokenData['o'] = aEIToken.ortho if aEIToken.sentence_id_id: aTokenData['s'] = aEIToken.sentence_id_id if aEIToken.sequence_in_sentence: aTokenData['sr'] = aEIToken.sequence_in_sentence if aEIToken.fragment_of_id: aTokenData['fo'] = aEIToken.fragment_of_id if aEIToken.likely_error: aTokenData['le'] = 1 aTokens[aEIToken.pk] = aTokenData aEvents.append({'pk': aEvent.pk, 's': str(aEvent.start_time), 'e': str(aEvent.end_time), 'l': str(aEvent.layer if aEvent.layer else 0), 'tid': aEITokens}) if len(aEvents) == maxQuerys: nNr += 1 aTokenIds = [aTokenId for aTokenId in aTokens] maxVars = 500 aTokenSets = {} nTokenSets = [] aTokenIdsTemp = deepcopy(aTokenIds) # Token Sets zu Events laden: while len(aTokenIdsTemp) > 0: nTokenSets += adbmodels.tbl_tokenset.objects.distinct().filter(id_von_token_id__in=aTokenIdsTemp[:maxVars]) nTokenSets += adbmodels.tbl_tokenset.objects.distinct().filter(tbl_tokentoset__id_token__in=aTokenIdsTemp[:maxVars]) aTokenIdsTemp = aTokenIdsTemp[maxVars:] for nTokenSet in nTokenSets: if nTokenSet.pk not in aTokenSets: aTokenSet = {} if nTokenSet.id_von_token: aTokenSet['ivt'] = nTokenSet.id_von_token_id if nTokenSet.id_bis_token: aTokenSet['ibt'] = nTokenSet.id_bis_token_id nTokenToSets = [] for nTokenToSet in nTokenSet.tbl_tokentoset_set.all(): nTokenToSets.append(nTokenToSet.id_token_id) if nTokenToSets: aTokenSet['t'] = nTokenToSets aTokenSets[nTokenSet.pk] = (aTokenSet) # Antworten zu Tokens und Tokensets laden: aTokenSetIds = [aTokenSetId for aTokenSetId in aTokenSets] maxVars = 500 aAntworten = {} nAntworten = [] aTokenIdsTemp = deepcopy(aTokenIds) aTokenSetIdsTemp = deepcopy(aTokenSetIds) while len(aTokenIdsTemp) > 0: nAntworten += dbmodels.Antworten.objects.distinct().filter(ist_token_id__in=aTokenIdsTemp[:maxVars]) aTokenIdsTemp = aTokenIdsTemp[maxVars:] while len(aTokenSetIdsTemp) > 0: nAntworten += dbmodels.Antworten.objects.distinct().filter(ist_tokenset_id__in=aTokenSetIdsTemp[:maxVars]) aTokenSetIdsTemp = aTokenSetIdsTemp[maxVars:] for nAntwort in nAntworten: if nAntwort.pk not in aAntworten: aAntwort = {'vi': nAntwort.von_Inf_id} aAntwort['inat'] = nAntwort.ist_nat if nAntwort.ist_Satz: aAntwort['is'] = nAntwort.ist_Satz_id aAntwort['ibfl'] = nAntwort.ist_bfl if nAntwort.ist_token: aAntwort['it'] = nAntwort.ist_token_id if nAntwort.ist_tokenset: aAntwort['its'] = nAntwort.ist_tokenset_id aAntwort['bds'] = nAntwort.bfl_durch_S if nAntwort.start_Antwort: aAntwort['sa'] = str(nAntwort.start_Antwort) if nAntwort.stop_Antwort: aAntwort['ea'] = str(nAntwort.stop_Antwort) aAntwort['k'] = nAntwort.Kommentar # AntwortenTags laden: nAntTags = [] for xval in dbmodels.AntwortenTags.objects.filter(id_Antwort=nAntwort.pk).values('id_TagEbene').annotate(total=Count('id_TagEbene')).order_by('id_TagEbene'): nAntTags.append({'e': xval['id_TagEbene'], 't': getTagFamilie(dbmodels.AntwortenTags.objects.filter(id_Antwort=nAntwort.pk, id_TagEbene=xval['id_TagEbene']).order_by('Reihung'))}) if nAntTags: aAntwort['pt'] = nAntTags aAntworten[nAntwort.pk] = (aAntwort) dataout.update({'nNr': nNr, 'aEvents': aEvents, 'aTokens': aTokens, 'aTokenSets': aTokenSets, 'aAntworten': aAntworten}) return httpOutput(json.dumps(dataout), 'application/json') # Menü laden: if 'getMenue' in request.POST: if 'ainformant' in request.POST: ipk = int(request.POST.get('ainformant')) informantenMitTranskripte = [] translist = list(adbmodels.token.objects.values('ID_Inf', 'transcript_id').distinct().order_by('ID_Inf')) for val in dbmodels.Informanten.objects.all(): atc = 0 for atl in translist: if atl['ID_Inf'] == val.pk: atc += 1 informantenMitTranskripte.append({'model': {'pk': val.pk, 'model_str': str(val)}, 'Acount': atc}) aTranskripte = [] if ipk > 0: aTranskripte = [{'model': {'pk': val.pk, 'model_str': str(val), 'update_time': val.update_time.strftime("%d.%m.%Y- %H:%M"), 'name': val.name}, 'count': val.token_set.count()} for val in [adbmodels.transcript.objects.get(pk=atid['id']) for atid in adbmodels.transcript.objects.filter(token__ID_Inf=ipk).values('id').annotate(total=Count('id'))]] return httpOutput(json.dumps({'informantenMitTranskripte': informantenMitTranskripte, 'aInformant': ipk, 'aTranskripte': aTranskripte}), 'application/json') if 'getTranscriptsInfList' in request.POST: # TOOL infList = [{ 'pk': aInf.pk, 'modelStr': str(aInf), 'transcriptsPKs': aInf.transcriptsPKs } for aInf in dbmodels.Informanten.objects.raw(''' SELECT "Informanten".*, ARRAY( SELECT "token"."transcript_id_id" FROM "token" WHERE "token"."ID_Inf_id" = "Informanten"."id" GROUP BY "token"."transcript_id_id" ORDER BY "token"."transcript_id_id" ASC ) AS "transcriptsPKs" FROM "Informanten" ORDER BY "Informanten"."id" ASC ''')] transList = [{ 'pk': aTrans.pk, 'modelStr': str(aTrans), 'updateTime': aTrans.update_time.strftime("%d.%m.%Y- %H:%M"), 'name': aTrans.name, 'tokenCount': aTrans.tokenCount } for aTrans in adbmodels.transcript.objects.raw(''' SELECT "transcript".*, (SELECT COUNT(*) FROM "token" WHERE "token".transcript_id_id = "transcript".id) AS "tokenCount" FROM "transcript" ORDER BY "transcript"."id" ASC ''')] # from django.db import connection # print(connection.queries) return httpOutput(json.dumps({'informanten': infList, 'transcripts': transList}), 'application/json') return render_to_response('AnnotationsDB/startvue.html', RequestContext(request))
def view(request): # Ist der User Angemeldet? if not request.user.is_authenticated(): return redirect('dissdb_login') output = '<!doctype html><meta charset="utf-8"><html><body><div style="max-width:1800px;margin:10px auto;">' with open('AnnotationsDB/converter0.json', 'r', encoding='utf8') as file: aData = json.load(file) rows = [ 'DBresult', 'idtagebene', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10' ] doIt = 'doit' in request.GET maxPerSite = 25 aSite = int(request.GET.get('site')) if 'site' in request.GET else 0 maxSites = math.ceil(len(aData) / maxPerSite) output += '<div>' + str(aSite + 1) + '/' + str(maxSites) + '</div><div>' if aSite > 0: output += '<a href="/annotationsdb/converter0?site=' + str( aSite - 1) + '">Zurück</a>' if aSite < maxSites - 1: output += ' - ' if aSite < maxSites - 1: output += '<a href="/annotationsdb/converter0?site=' + str( aSite + 1) + '">Weiter</a>' output += '</div>' output += '<div><p><a href="/annotationsdb/converter0?site=' + str( aSite) + '&doit=1"><b>Do It</b></a></p></div>' count = 0 for aLine in aData: aTokens = adbmodels.token.objects.filter( Q(Q(splemma=aLine['DBresult']) & Q(sptag='NN')) | Q(Q(ttlemma=aLine['DBresult']) & Q(ttpos='NN')) | Q(ortho=aLine['DBresult'])).exclude( ID_Inf_id=35).order_by('token_reihung') count += aTokens.count() output += '<div>Anzahl: <b>' + str(count) + '</b></div>' output += '<table style="white-space:nowrap;"><tr>' for row in rows: output += '<th align="left">' + row + '</th>' output += '<th align="left">Result</th>' output += '</tr>' alDg = aSite * maxPerSite for aLine in aData[aSite * maxPerSite:aSite * maxPerSite + maxPerSite]: output += '<tr>' for row in rows: output += '<td valign="top" style="border-bottom: 1px solid #000;">' + str( aLine[row]) + '</td>' # splemma = MATCH und ttpos = "NN" ODER ttlemma = MATCH und ttpos = "NN" ODER ortho = Match aTokens = adbmodels.token.objects.filter( Q(Q(splemma=aLine['DBresult']) & Q(sptag='NN')) | Q(Q(ttlemma=aLine['DBresult']) & Q(ttpos='NN')) | Q(ortho=aLine['DBresult'])).exclude( ID_Inf_id=35).order_by('token_reihung') # print(aLine['DBresult'], aTokens.count()) output += '<td valign="top" style="border-bottom: 1px solid #000;">' output += '<table style="text-align:right;width:100%;"><tr><th># ' + str( len(aTokens) ) + '</th><th>Id</th><th>ttpos</th><th>sptag</th><th>splemma</th><th>ttlemma</th><th>ortho</th><th>Reihung</th><th>Tokens ermittelt</th><th>Tokens verwenden</th><th>Typ</th><th>Tokenset</th><th>Antwort</th><th>Tags</th></tr>' dg = 1 for aToken in aTokens: output += '<tr>' output += '<td>' + str(dg) + '</td><td>' + str( aToken.id) + '</td><td>' + str( aToken.ttpos) + '</td><td>' + str( aToken.sptag) + '</td><td>' + str( aToken.splemma) + '</td><td>' + str( aToken.ttlemma) + '</td><td>' + str( aToken.ortho) + '</td><td>' + str( aToken.token_reihung) + '</td>' # Die zwei Tags davor ermitteln: xTokens = None with connection.cursor() as cursor: cursor.execute( ''' SELECT array_to_json(array_agg(row_to_json(atok))) FROM ( ( SELECT "token".*, 0 AS tb FROM "token" WHERE ("token"."ID_Inf_id" = %s AND "token"."transcript_id_id" = %s AND "token"."token_reihung" <= %s) ORDER BY "token"."token_reihung" DESC LIMIT 5 ) ) AS atok ''', [aToken.ID_Inf_id, aToken.transcript_id_id, aToken.token_reihung]) zTokens = cursor.fetchone()[0] xTokens = [] dg = 0 for zToken in zTokens: if dg < 3: xTokens.append(zToken) if zToken['token_type_id_id'] == 1: dg += 1 xTokens = reversed(xTokens) if xTokens: # print(alDg, '/', len(aData), '-', dg, '/', len(aTokens)) output += '<td>' uTokens = [] dg = 0 for xToken in xTokens: output += (xToken['ortho'] or xToken['text']) + '(' + str( xToken['id']) + ', ' + str( xToken['token_type_id_id']) + ') | ' if dg > 0 or xToken['token_type_id_id'] == 1: uTokens.append(xToken) dg += 1 output += '</td><td>' for uToken in uTokens: output += (uToken['ortho'] or uToken['text']) + '(' + str( uToken['id']) + ') | ' output += '</td><td>' if len(uTokens) < 1: output += '<b style="color:#e00;">Error</b></td><td><b style="color:#e00;">Error</b>' elif len(uTokens) == 1: output += 'token' output += '</td>' output += '<td>-</td>' aAntworten = dbmodels.Antworten.objects.filter( ist_token_id=uTokens[0]['id']) if aAntworten.count() > 0: aAntwort = aAntworten[0] output += '<td><b style="color:#d00;">Ja</b> (' + str( aAntwort.pk) + ')</td>' else: if doIt: aAntwort = dbmodels.Antworten() aAntwort.von_Inf_id = uTokens[0]['ID_Inf_id'] aAntwort.ist_token_id = uTokens[0]['id'] aAntwort.start_Antwort = datetime.timedelta(0) aAntwort.stop_Antwort = datetime.timedelta(0) aAntwort.save() output += '<td><b style="color:#0d0;">Nein</b> (' + str( aAntwort.pk) + ')</td>' else: output += '<td><b style="color:#00d;">Nein</b></td>' aAntwort = None if aAntwort: output += addTags( aAntwort, aLine['idtagebene'], [(aLine[str(x)] if str(x) in aLine else None) for x in range(1, 12)], doIt) else: output += 'tokenset' output += '</td>' aTokensets = adbmodels.tbl_tokenset.objects.filter( id_von_token=uTokens[0]['id'], id_bis_token=uTokens[-1]['id']).order_by('created') if aTokensets.count() > 0: aTokenset = aTokensets[0] output += '<td><b style="color:#d00;">Ja</b> (' + str( aTokenset.pk) + ')</td>' else: if doIt: aTokenset = adbmodels.tbl_tokenset() aTokenset.id_von_token_id = uTokens[0]['id'] aTokenset.id_bis_token_id = uTokens[-1]['id'] aTokenset.save() output += '<td><b style="color:#0d0;">Nein</b> (' + str( aTokenset.pk) + ')</td>' else: output += '<td><b style="color:#00d;">Nein</b></td>' aTokenset = None if aTokenset: aAntworten = dbmodels.Antworten.objects.filter( ist_tokenset_id=aTokenset.pk) if aAntworten.count() > 0: aAntwort = aAntworten[0] output += '<td><b style="color:#d00;">Ja</b> (' + str( aAntwort.pk) + ')</td>' else: if doIt: aAntwort = dbmodels.Antworten() aAntwort.von_Inf_id = uTokens[0]['ID_Inf_id'] aAntwort.ist_tokenset_id = aTokenset.pk aAntwort.start_Antwort = datetime.timedelta(0) aAntwort.stop_Antwort = datetime.timedelta(0) aAntwort.save() output += '<td><b style="color:#0d0;">Nein</b> (' + str( aAntwort.pk) + ')</td>' else: output += '<td><b style="color:#00d;">Nein</b></td>' aAntwort = None if aAntwort: output += addTags( aAntwort, aLine['idtagebene'], [(aLine[str(x)] if str(x) in aLine else None) for x in range(1, 12)], doIt) output += '</tr>' dg += 1 output += '</table>' output += '</td>' output += '</tr>' alDg += 1 output += '</table>' return httpOutput(output + '</div></body></html>', 'text/html')
def einzelerhebungen(request): if not request.user.is_authenticated(): return httpOutput(json.dumps({'error': 'login'}), 'application/json') aEinzelErhebungen = [] try: for aEinzelErhebung in dbmodels.EinzelErhebung.objects.all(): aInformanten = [{ 'pk': aInformant.ID_Inf_id, 'Kuerzel': aInformant.ID_Inf.Kuerzel, 'Kuerzel_anonym': aInformant.ID_Inf.Kuerzel_anonym, 'Name': aInformant.ID_Inf.Name, 'Vorname': aInformant.ID_Inf.Vorname, 'weiblich': aInformant.ID_Inf.weiblich, 'Geburtsdatum': aInformant.ID_Inf.Geburtsdatum.strftime("%d.%m.%Y- %H:%M"), 'ErhAlterCa': aInformant.ID_Inf.ErhAlterCa, 'Wohnbezirk': aInformant.ID_Inf.Wohnbezirk, 'DialKomp': aInformant.ID_Inf.DialKomp, 'StandKomp': aInformant.ID_Inf.StandKomp, 'ZwischKomp': aInformant.ID_Inf.ZwischKomp, 'GWPGruppe': aInformant.ID_Inf.GWPGruppe } for aInformant in aEinzelErhebung.inf_zu_erhebung_set.all()] aEinzelErhebungen.append({ 'pk': aEinzelErhebung.pk, 'ID_Erh': aEinzelErhebung.ID_Erh_id, 'id_transcript': aEinzelErhebung.id_transcript_id, 'Datum': aEinzelErhebung.Datum.strftime("%d.%m.%Y- %H:%M"), 'Explorator': aEinzelErhebung.Explorator, 'Kommentar': aEinzelErhebung.Kommentar, 'Dateipfad': aEinzelErhebung.Dateipfad, 'Audiofile': aEinzelErhebung.Audiofile, 'Logfile': aEinzelErhebung.Logfile, 'Ort': aEinzelErhebung.Ort, 'Besonderheiten': aEinzelErhebung.Besonderheiten, 'FX_Informanten': aInformanten }) except Exception as e: return httpOutput(json.dumps({'error': str(type(e)) + ' - ' + str(e)}), 'application/json') return httpOutput( json.dumps({ 'einzelerhebungen': aEinzelErhebungen, 'error': None }), 'application/json')
def view_dateien(request, ojson=False): """Anzeige für Dateien.""" info = '' error = '' mDir = getattr(settings, 'PRIVATE_STORAGE_ROOT', None) if not mDir: if ojson: return httpOutput(json.dumps({'error': 'PRIVATE_STORAGE_ROOT wurde nicht gesetzt!'}), 'application/json') else: return HttpResponseServerError('PRIVATE_STORAGE_ROOT wurde nicht gesetzt!') # Dateien hochladen: if 'upload' in request.POST: uplDir = removeLeftSlash(request.POST.get('upload')) if getPermission(uplDir, mDir, request) < 2: if ojson: return httpOutput(json.dumps({'error': 'no directory permission'}), 'application/json') else: return httpOutput('Fehler! Sie haben nicht die nötigen Rechte für dieses Verzeichnis!') uplDir = os.path.join(mDir, uplDir) from django.core.files.storage import FileSystemStorage fs = FileSystemStorage(location=mDir) import unicodedata for afile in request.FILES.getlist('dateien'): asavename = os.path.join(uplDir, afile.name) asavename = unicodedata.normalize('NFKD', asavename).encode('ascii', 'ignore').decode("utf-8") filename = fs.save(asavename, afile) LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=ContentType.objects.get_for_model(sys_filesystem).pk, object_id=0, object_repr='Datei', action_flag=ADDITION, change_message='Datei hinzugefügt: ' + filename ) if ojson: return httpOutput(json.dumps({'file': 'uploaded'}), 'application/json') else: return httpOutput('OK') # Datei löschen: if 'delFile' in request.POST: delFile = removeLeftSlash(request.POST.get('delFile')) delFile = os.path.join(mDir, delFile) if getPermission(delFile, mDir, request) < 2: if ojson: return httpOutput(json.dumps({'error': 'no directory permission'}), 'application/json') else: return httpOutput('Fehler! Sie haben nicht die nötigen Rechte für dieses Verzeichnis!') if not os.path.isfile(delFile): if ojson: return httpOutput(json.dumps({'error': 'file dosn\'t exist'}), 'application/json') else: return httpOutput('Fehler! "' + request.POST.get('delFile') + '" existiert nicht oder ist keine Datei!') try: os.remove(delFile) LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=ContentType.objects.get_for_model(sys_filesystem).pk, object_id=0, object_repr='Datei', action_flag=DELETION, change_message='Datei gelöscht: ' + delFile ) if ojson: return httpOutput(json.dumps({'file': 'removed'}), 'application/json') else: return httpOutput('OK') except Exception as e: if ojson: return httpOutput(json.dumps({'error': 'can\'t delete File'}), 'application/json') else: return httpOutput('Fehler! Datei "' + delFile + '" konnte nicht gelöscht werden! ' + str(e)) # Datei umbenennen if 'renameFile' in request.POST: renameFile = request.POST.get('renameFile') if '/' in renameFile or '\\' in renameFile: if ojson: return httpOutput(json.dumps({'error': 'file has spezial character'}), 'application/json') else: return httpOutput('Fehler! Dateiname darf keine Sonderzeichen enthalten!') filename = request.POST.get('filename') fullpath = removeLeftSlash(request.POST.get('fullpath')) fullpathABS = os.path.join(mDir, fullpath) newfullpath = fullpath[:-len(filename)] + renameFile newfullpathABS = os.path.join(mDir, newfullpath) if getPermission(fullpath, mDir, request) < 2: if ojson: return httpOutput(json.dumps({'error': 'no directory permission'}), 'application/json') else: return httpOutput('Fehler! Sie haben nicht die nötigen Rechte für dieses Verzeichnis!') if not os.path.isfile(fullpathABS): if ojson: return httpOutput(json.dumps({'error': 'file dosn\'t exists'}), 'application/json') else: return httpOutput('Fehler! Datei "' + fullpath + '" existiert nicht!') if os.path.isfile(newfullpathABS): if ojson: return httpOutput(json.dumps({'error': 'file already exists'}), 'application/json') else: return httpOutput('Fehler! Datei "' + newfullpath + '" existiert bereits!') try: os.rename(fullpathABS, newfullpathABS) LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=ContentType.objects.get_for_model(sys_filesystem).pk, object_id=0, object_repr='Datei', action_flag=CHANGE, change_message='Datei umbenannt: ' + fullpathABS + ' -> ' + newfullpathABS ) if ojson: return httpOutput(json.dumps({'file': 'renamed'}), 'application/json') else: return httpOutput('OK') except Exception as e: if ojson: return httpOutput(json.dumps({'error': 'can\'t rename file'}), 'application/json') else: return httpOutput('Fehler! Datei "' + fullpath + '" konnte nicht umbenannt werden! ' + str(e)) # Verzeichnis erstellen: if 'makeDir' in request.POST: makeDir = request.POST.get('makeDir') if '/' in makeDir or '\\' in makeDir or '.' in makeDir: if ojson: return httpOutput(json.dumps({'error': 'file has spezial character'}), 'application/json') else: return httpOutput('Fehler! Verzeichnisname darf keine Sonderzeichen enthalten!') baseDir = removeLeftSlash(request.POST.get('baseDir')) makeDir = os.path.join(mDir, baseDir, makeDir) if getPermission(makeDir, mDir, request) < 3: if ojson: return httpOutput(json.dumps({'error': 'no directory permission'}), 'application/json') else: return httpOutput('Fehler! Sie haben nicht die nötigen Rechte für dieses Verzeichnis!') if not makeDir[:len(mDir)] == mDir: return httpOutput('Fehler! "' + makeDir[:len(mDir)] + '" != "' + mDir + '"') if os.path.isdir(makeDir): if ojson: return httpOutput(json.dumps({'error': 'directory already exists'}), 'application/json') else: return httpOutput('Fehler! Verzeichnis "' + makeDir + '" existiert bereits!') try: os.makedirs(makeDir) LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=ContentType.objects.get_for_model(sys_filesystem).pk, object_id=0, object_repr='Verzeichnis', action_flag=ADDITION, change_message='Verzeichnis erstellt: ' + makeDir ) if ojson: return httpOutput(json.dumps({'directory': 'created'}), 'application/json') else: return httpOutput('OK') except Exception as e: if ojson: return httpOutput(json.dumps({'error': 'can\'t create directory'}), 'application/json') else: return httpOutput('Fehler! Verzeichnis "' + makeDir + '" konnte nicht erstellt werden! ' + str(e)) # Verzeichnis umbenennen/löschen if 'renameDir' in request.POST: renameDir = request.POST.get('renameDir') if '/' in renameDir or '\\' in renameDir or '.' in renameDir: if ojson: return httpOutput(json.dumps({'error': 'directory has spezial character'}), 'application/json') else: return httpOutput('Fehler! Verzeichnisname darf keine Sonderzeichen enthalten!') subname = request.POST.get('subname') fullpath = removeLeftSlash(request.POST.get('fullpath')) fullpathABS = os.path.join(mDir, fullpath) newfullpath = fullpath[:-len(subname)] + renameDir newfullpathABS = os.path.join(mDir, newfullpath) if getPermission(fullpath, mDir, request) < 3: if ojson: return httpOutput(json.dumps({'error': 'no directory permission'}), 'application/json') else: return httpOutput('Fehler! Sie haben nicht die nötigen Rechte für dieses Verzeichnis!') if not os.path.isdir(fullpathABS): if ojson: return httpOutput(json.dumps({'error': 'directory didn\'t exist'}), 'application/json') else: return httpOutput('Fehler! Verzeichnis "' + fullpath + '" existiert nicht!') if renameDir == 'löschen': try: if getPermission(fullpath, mDir, request) > 3: for root, dirs, files in os.walk(fullpathABS, topdown=False): for name in files: os.remove(os.path.join(root, name)) for name in dirs: os.rmdir(os.path.join(root, name)) os.rmdir(fullpathABS) LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=ContentType.objects.get_for_model(sys_filesystem).pk, object_id=0, object_repr='Verzeichnis', action_flag=DELETION, change_message='Verzeichnis gelöscht: ' + fullpathABS ) if ojson: return httpOutput(json.dumps({'directory': 'deleted'}), 'application/json') else: return httpOutput('OK') except Exception as e: if ojson: return httpOutput(json.dumps({'error': 'can\'t delete directory'}), 'application/json') else: return httpOutput('Fehler! Verzeichnis "' + fullpath + '" konnte nicht gelöscht werden! ' + str(e)) if os.path.isdir(newfullpathABS): if ojson: return httpOutput(json.dumps({'error': 'directory already exist'}), 'application/json') else: return httpOutput('Fehler! Verzeichnis "' + newfullpath + '" existiert bereits!') try: os.rename(fullpathABS, newfullpathABS) LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=ContentType.objects.get_for_model(sys_filesystem).pk, object_id=0, object_repr='Verzeichnis', action_flag=CHANGE, change_message='Verzeichnis umbenannt: ' + fullpathABS + ' -> ' + newfullpathABS ) if ojson: return httpOutput(json.dumps({'directory': 'renamed'}), 'application/json') else: return httpOutput('OK') except Exception as e: if ojson: return httpOutput(json.dumps({'error': 'can\'t rename directory'}), 'application/json') else: return httpOutput('Fehler! Verzeichnis "' + fullpath + '" konnte nicht umbenannt werden! ' + str(e)) # Dateienliste: if 'getDirContent' in request.POST: dateien = scanFiles(request.POST.get('getDirContent'), mDir, request) aPath = removeLeftSlash(request.POST.get('getDirContent')) if ojson: return httpOutput(json.dumps({'files': dateien, 'directory': request.POST.get('getDirContent'), 'permission': getPermission(aPath, mDir, request)}), 'application/json') else: return render_to_response( 'DB/dateien.html', RequestContext(request, {'dateien': dateien, 'verzeichnis': request.POST.get('getDirContent'), 'permission': getPermission(aPath, mDir, request), 'info': info, 'error': error}),) # Startseite mit "Baum": tree = scanDir(mDir, None, request) if not ojson and 'getTree' in request.POST: return render_to_response( 'DB/dateien_tree.html', RequestContext(request, {'sdir': tree}),) if ojson: return httpOutput(json.dumps({'basetree': tree}), 'application/json') else: return render_to_response( 'DB/dateien_start.html', RequestContext(request, {'tree': tree, 'info': info, 'error': error}),)