def results(request): template_name = 'emr/results.html' search_query = request.GET.get('searchstring') daoobject = DAO() daoobject.set_tables_config() daoobject.setEasyUser(request.user) regularsearch = render( request, template_name, { 'searchresults': daoobject.search(search_query, '1'), 'lastId': daoobject.getLastId('tabla_1', 'campo_1'), 'easyUser': daoobject.easy_user }) try: searchInt = int(search_query) searchStr = str(searchInt) zerosToAdd = 6 - len(searchStr) IdToReturn = '' for i in xrange(zerosToAdd): IdToReturn = IdToReturn + '0' if daoobject.doesIdExist(IdToReturn + searchStr): return patient(request, daoobject.doesIdExist(IdToReturn + searchStr)) else: return regularsearch except ValueError: return regularsearch
def deleterecord(request, table_id, record_id): daoobject = DAO() daoobject.set_tables_config() daoobject.setEasyUser(request.user) if daoobject.backEndUserRolesCheck(table_id, 'delete_table'): daoobject.delete(table_id, record_id) return index(request)
def save(request): record_id = request.GET.get('record_id') table_id = request.GET.get('table_id') daoobject = DAO() daoobject.set_tables_config() daoobject.setEasyUser(request.user) fieldstochange = [] patientId = 0 #*TBC*# # Lost of nested loops for tablec in daoobject.tables_config: if tablec.id == table_id: for fieldc in tablec.fields: if fieldc.name == 'MSF ID': patientId = daoobject.getPatientIdFromMsfId( request.GET.get(fieldc.field_id)) fieldstochange.append([ fieldc.field_id, request.GET.get(fieldc.field_id), fieldc.type ]) fieldstochange.append(['user', request.user.username, '2']) if record_id != "0": if daoobject.backEndUserRolesCheck(table_id, 'edit_table'): daoobject.editrecord(table_id, record_id, fieldstochange) else: if daoobject.backEndUserRolesCheck(table_id, 'add_table'): record_id = daoobject.insertrecord(table_id, fieldstochange) if table_id == '1': patientId = record_id return HttpResponseRedirect(reverse('emr:patient', args=(patientId, )))
def read(self): dao = DAO() articles = dao.find("random") for article in articles: title = article["title"] content = article["content"] cont = content.encode('utf-8') self.article_dict.update({title: cont})
def __init__(self): self.stopList = {"lorsque","toute","ah","ainsi","car","chez","non","eux","venait","celui","au","aux","suis","oui","dont","bien","faire","y","avez","ai","j","m","cette","on","moi","me","c","a","plus","qu","dit","de","moi","n","avait","ne","se","était","s","lui","une","l","à","d","un","avait","alors", "au", "aucuns", "aussi", "autre", "avant", "avec", "avoir", "bon", "cas", "ce", "cela", "ces","ces","chaque","ci","comme","comment","dans","des","du","dedans","dehors","depuis","devrait","doit","donc","elle","elles","en","encore","est","et","eu","fait","faites","font","hors","ici","il","ils","je","juste","la","le","les","leur","là","ma","maintenant","mais","mes","mine","moins","mon","même","ni","notre","nous","ou","où","par","parce","pas","peut","peu","plupart","pour","pourquoi","quand","que","quel","quelle","quelles","quels","qui","sa","sans","ses","seulement","si","sien","son","sont","sous","soyez","sur","ta","tandis","tellement","tels","tes","ton","tous","tout","trop","très","tu","voient","vont","votre","vous","vu","ça","étaient","étions","été","être"} self.tableauDeMots = [] self.dictionnaire = {} self.bd = dao.DAO(); self.dictMotsDB = self.bd.creerDictMotsExistants() self.listeNouveauxMotsBD = [] self.dictMotsMisAJour = {}
def main(): print("Principal, main{... começou") Banco = DAO.DAO() Banco.criarTabelas() fila_MQTT = threading.Thread(target=GerenciarMQTT.GerenciarMQTT) fila_MQTT.start() Tf = Tarefas.Tarefas() print("Principal, main {... chegou até o fim")
def get_query(self): if not self.detour: blank = ''.encode('utf-8') content = blank dao = DAO() while content == blank: article = dao.pop('random') title = article["title"] content = article["content"].encode('utf-8') query = {title: content} return query else: return self.get_detour()
def index(request): template_name = 'emr/index.html' daoobject = DAO() daoobject.set_tables_config() daoobject.setEasyUser(request.user) #*TBC*# # Last ID not used anymore return render( request, template_name, { 'edbtables': daoobject.tables_config_lite, 'lastId': daoobject.getLastId('tabla_1', 'campo_1'), 'easyUser': daoobject.easy_user })
def patient(request, record_id): template_name = 'emr/patient.html' daoobject = DAO() daoobject.set_tables_config() daoobject.set_tables_relationships() daoobject.setEasyUser(request.user) return render( request, template_name, { 'record': daoobject.get_record_with_type('1', record_id, True), 'relatedrecords': daoobject.get_related_records(record_id), 'lastId': daoobject.getLastId('tabla_1', 'campo_1'), 'easyUser': daoobject.easy_user })
def edit(request, table_id, record_id): template_name = 'emr/edit.html' daoobject = DAO() daoobject.set_tables_config() daoobject.setEasyUser(request.user) if daoobject.backEndUserRolesCheck(table_id, 'edit_table'): return render( request, template_name, { 'record': daoobject.get_record_with_type(table_id, record_id, False), 'lastId': daoobject.getLastId('tabla_1', 'campo_1'), 'easyUser': daoobject.easy_user }) return index(request)
def downloadexport(request): daoobject = DAO() daoobject.set_tables_config() daoobject.setEasyUser(request.user) if request.user.groups.filter(id=2).exists(): zip = daoobject.generateExport() + '.zip' if os.path.exists(zip): with open(zip, 'rb') as fh: response = HttpResponse(fh.read(), content_type="application/zip") response[ 'Content-Disposition'] = 'inline; filename=' + os.path.basename( zip) return response raise Http404 else: return index(request)
def downloadbackup(request): daoobject = DAO() daoobject.set_tables_config() daoobject.setEasyUser(request.user) if request.user.groups.filter(id=2).exists(): file = u'/opt/shared/backup.gz.enc' if os.path.exists(file): with open(file, 'rb') as fh: response = HttpResponse(fh.read(), content_type="application/zip") response[ 'Content-Disposition'] = 'inline; filename=' + os.path.basename( file) return response raise Http404 else: return index(request)
def __init__(self, matriceCooccurrences, indiceMotParam, nombreMotsAffiches, nbCentroides=0, estAnalyseVerbale=False): self.debut = None self.bd = dao.DAO() self.dictMotExistant = self.bd.creerDictMotsExistants() self.algo = algo.Algorithmie() self.nbMots = len(matriceCooccurrences[0]) self.matriceCooccurrences = matriceCooccurrences self.centroidesEnParam = False self.estAnalyseVerbale = estAnalyseVerbale self.nombreMotsAAfficher = int(nombreMotsAffiches) if not indiceMotParam == []: self.centroidesEnParam = True self.nbCentroides= len(indiceMotParam) else: self.nbCentroides = nbCentroides if(self.estAnalyseVerbale): self.genererDictionnaireVerbes()
def rcv_message(client_socket): """ @do : Ecoute la socket et transforme les message reçu en DAO @args : Socket client_socket -> socket du client @return : DAO -> message reçu """ dao = DAO.DAO() try: msg = client_socket.recv(1024).decode() return dao.deserialize(msg) except: # Si erreur : Socket ferme # alors on simule la reception du message "sys exit" dao.type = "sys" dao.action = "exit" return dao
def addrecord(request, table_id, related_record_entry): template_name = 'emr/addrecord.html' daoobject = DAO() daoobject.set_tables_config() daoobject.set_tables_relationships() daoobject.setEasyUser(request.user) if daoobject.backEndUserRolesCheck(table_id, 'add_table'): if (related_record_entry == '0') and (table_id == '1'): related_record_field = 'campo_1' else: related_record_field = 'campo_2' if table_id != "0": return render( request, template_name, { 'recordform': daoobject.getrecordform(table_id), 'related_record_entry': related_record_entry, 'related_record_field': related_record_field, 'lastId': daoobject.getLastId('tabla_1', 'campo_1'), 'easyUser': daoobject.easy_user }) return index(request)
def __init__(self): self.DAO = DAO() self.api = Wikipedia_API() self.links = self.read_file("links.txt")
def recognize_people(people_folder, shape): try: people = [person for person in os.listdir(people_folder)] except: print("Have you added at least one person to the system?") sys.exit() print("This are the people in the Recognition System:") for person in people: print("-" + person) print(" POSSIBLE RECOGNIZERS TO USE") print("1. EigenFaces") print("2. FisherFaces") print("3. LBPHFaces") choice = check_choice() detector = FaceDetector('face_recognition_system/frontal_face.xml') if choice == 1: recognizer = cv2.face.EigenFaceRecognizer_create() threshold = 4000 elif choice == 2: recognizer = cv2.face.FisherFaceRecognizer_create() threshold = 300 elif choice == 3: recognizer = cv2.face.LBPHFaceRecognizer_create() threshold = 105 #recognizer=VotingClassifier([('eigen',recognizer1),('lbph',recognizer2),('fisher',recognizer3)],voting='soft') images = [] labels = [] labels_people = {} for i, person in enumerate(people): labels_people[i] = person for image in os.listdir(people_folder + person): images.append(cv2.imread(people_folder + person + '/' + image, 0)) labels.append(i) try: #print("printing images"+" "+" "+str(images)) #print("printing labesl"+" "+" "+str(labels)+" "+str(np.array(labels))) recognizer.train(images, np.array(labels)) except: print( "\nOpenCV Error: Do you have at least two people in the database?\n" ) sys.exit() video = VideoCamera() while True: frame = video.get_frame() faces_coord = detector.detect(frame, False) if len(faces_coord): frame, faces_img = get_images(frame, faces_coord, shape) for i, face_img in enumerate(faces_img): #if __version__ == "3.1.0": global students collector = cv2.face.StandardCollector_create() #recognizer.predict(face_img) #conf = collector.getDist() #pred = collector.getLabel() #print ("Prediction: " +pred) #else: pred, conf = recognizer.predict(face_img) print("Prediction: " + str(pred)) students.add(pred) print(students) print('Confidence: ' + str(round(conf))) print('Threshold: ' + str(threshold)) if conf < threshold: cv2.putText(frame, labels_people[pred].capitalize(), (faces_coord[i][0], faces_coord[i][1] - 2), cv2.FONT_HERSHEY_PLAIN, 1.7, (206, 0, 209), 2, cv2.LINE_AA) else: cv2.putText(frame, "Unknown", (faces_coord[i][0], faces_coord[i][1]), cv2.FONT_HERSHEY_PLAIN, 1.7, (206, 0, 209), 2, cv2.LINE_AA) cv2.putText(frame, "ESC to exit", (5, frame.shape[0] - 5), cv2.FONT_HERSHEY_PLAIN, 1.2, (206, 0, 209), 2, cv2.LINE_AA) cv2.imshow('Video', frame) if cv2.waitKey(100) & 0xFF == 27: dao = DAO(students) dao.dbOperations() print( "*******************************Attendence Updated ****************************" ) #dao.__del__() sys.exit()
def main(): try: bd = dao.DAO() bd.creerTable() lecteur = lect.Lecteur() affichage = aff.Affichage() #c: clustering / n: nombre de mots à afficher par cluster / nc: nombre de centroïdes options, arguments = getopt.getopt(sys.argv[1:], 'erct:n:v', ['enc=','chemin=','nc=','mots=']) entrainement = False recherche = False clustering = False analyseVerbale = False for option, argument in options: if option == '-e': entrainement = True elif option == '-r': recherche = True elif option == '-c': clustering = True elif option == '-t': tailleFenetre = argument elif option == '-n': nombreMotsAffiches = argument elif option == '-v': analyseVerbale = True if entrainement: affichage.entrainement() start = time() listeChemins = [] for option, argument in options: if option == '--enc': encodage = argument elif option == '--chemin': listeChemins.append(argument) for chemin in listeChemins: texte = txt.Texte(chemin, encodage, lecteur) lecteur.ajouterAuTableau(texte.tableauContenu) lecteur.remplirDictionnaire() bd.insertionsNouveauxMots(lecteur.listeNouveauxMotsBD) dictCooccurrences = dict.Dictionnaire(tailleFenetre, lecteur.tableauDeMots, lecteur.dictionnaire) dictionnaireCooccurences = dictCooccurrences.construireDictionnaireCooccurences() dictionnaireCooccurrencesExistantes = bd.creerDictCoocurrencesExistantes(tailleFenetre) tupleListes = lecteur.trierInsertUpdate(dictionnaireCooccurrencesExistantes, dictionnaireCooccurences) bd.InsertionCooccurrences(tupleListes[0]) bd.UpdateCooccurrences(tupleListes[1]) affichage.tempsEcoule("Entrainement",time()-start) texte = None except FileNotFoundError: affichage.erreurChemin() sys.exit() except LookupError: affichage.erreurEncodage() sys.exit() except ValueError: affichage.erreurTailleFenetre() sys.exit() except getopt.GetoptError: affichage.erreurArgumentLigneDeCommande() sys.exit() if recherche: affichage.recherche() start = time() algo = alg.Algorithmie() matrice = matr.Matrice(lecteur.dictMotsDB) matriceCooccurrences = bd.creerMatriceCooccurrences(tailleFenetre, matrice.matriceVide) affichage.tempsEcoule("Initialisation de la recherche ",time()-start) reponse = affichage.inputParamsRecherche() mot = reponse[0].casefold() while mot != 'q': try: indexMot = bd.trouverIndexMot(mot) #Mot qui n'existe pas if indexMot == -1: raise KeyError() resultat = algo.calculresultats(reponse[2], indexMot,mot, matriceCooccurrences, lecteur.dictMotsDB) affichage.afficherResultats(int(reponse[1]) - 1, resultat, mot, lecteur.motStopList) except KeyError: affichage.erreurMotInexistant() except UnboundLocalError: affichage.erreurChoixAlgo() except ValueError: affichage.erreurNombreSynonymes() except IndexError: affichage.erreurNombreParametresRecherche() reponse = affichage.inputParamsRecherche() mot = reponse[0].casefold() try: if clustering: nbCentroides = 0 listeIndexMots = [] for option, argument in options: if option == '--nc': nbCentroides = int(argument) elif option == '--mots': listeMots = argument for mot in listeMots.split(): indexMot = bd.trouverIndexMot(mot) if indexMot == -1: raise KeyError() else: listeIndexMots.append(indexMot) algo = alg.Algorithmie() matrice = matr.Matrice(lecteur.dictMotsDB) matriceCooccurrences = bd.creerMatriceCooccurrences(tailleFenetre, matrice.matriceVide) objClustering = cluster.Clustering(matriceCooccurrences, listeIndexMots, nombreMotsAffiches, nbCentroides,analyseVerbale) objClustering.bouclePrincipale() except ValueError: affichage.erreurTailleFenetre() sys.exit() except KeyError: affichage.erreurMotInexistant() affichage.finProgramme()
# # This program is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see http://www.gnu.org/licenses/ from __future__ import division import yaml import numpy as np import DAO as dao import stringDuplicates as sd import csv db = dao.DAO() class FuzzyMatch: def __init__(self, yaml_map_file, clusters_csv_file, tag="raaga", similarity_threshold=0.6, log="log.txt"): self.yaml_map = yaml.load(file(yaml_map_file)) self.clusters = {} self.clusters_csv_file = clusters_csv_file self.load_clusters() self.all_terms = []
def __init__(self): self.DAO = DAO() self.wikipedia = Wikpedia_API() self.db_count = 0
def __init__(self,DBPath): self.dbp = DBPath import DAO self.dx = DAO.DAO(DBPath)
def __init__(self, **kwargs): super(RecordDetail, self).__init__(**kwargs) self.daoobject = DAO.DAO() self.daoobject.set_tables_config() self.daoobject.set_tables_relationships()
def __init__(self, path, db, user, pwd, ftp_user, ftp_pwd, port): global table_id # Create a table id, which is also version_id # If not convert table_id into string, it's gonna export error id = time.strftime("%Y%m%d%H%M%S", time.localtime()) table_id = '%s' % id # father_path=os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".") # data -> 'md5',[filename, server_path] data = dict() # Check version if os.path.exists(os.getcwd() + "/version"): txt = open(os.getcwd() + "/version", 'r') last_version = txt.readline() txt.close() try: fobj = open(os.getcwd() + "/version", 'w') fobj.write(table_id) fobj.close() except IOError: print('*** version file open error:') else: try: fobj = open(os.getcwd() + "/version", 'w') fobj.write(table_id) fobj.close() last_version = "null" except IOError: print('*** version file open error:') dao = DAO.DAO(db, user, pwd) dao.create_table(table_id) ftp = FTP.FTP(ftp_user, ftp_pwd, port) # Create new folder for this version # version_path = '/home/dataspace/%s/%s/' % (ftp_user, table_id) # Test version_path = '/home/dataspace/user/%s/' % table_id ftp.create_new_folder(version_path) # Don't care the warning on last_version, that's bullshit # Whether is the first time dao.execute_sql('Begin') if last_version is "null": print('Start scanning') file_scanner_initial(path, version_path, dao, ftp) print('Scanner completed') else: res = dao.request_data(last_version) if len(res) < 1: print('Start scanning') file_scanner_initial(path, version_path, dao, ftp) print('Scanner completed') else: for row in res: md = row[0] filename_serverpath = [row[1], row[2]] data[md] = filename_serverpath print('Start scanning') file_scanner(path, version_path, data, dao, ftp) print('Scanner completed') dao.execute_sql('Commit') dao.disconnect()
def balancodobanco(self): print("Executando tarefa agendada do balanco do banco") banco = DAO.DAO() banco.balancoDoBanco()
# devuelve repeticiones # # optimización # # fin de definiciones import psycopg2 import operator import time import DAO dao = DAO.DAO() #dao.db('segmentador:rodatnemges:censo2020:172.26.67.239') dao.db('halpe:halpe:CPHyV2020:172.26.68.174') radios = dao.get_radios(_table) for prov, dpto, frac, radio in radios: if (radio and prov == _prov and dpto == _dpto and frac == _frac and radio == _radio): # las del _table print print("radio: ") print(prov, dpto, frac, radio) conteos_mzas = dao.get_conteos_mzas(_table, prov, dpto, frac, radio) manzanas = [mza for mza, conteo in conteos_mzas] conteos = dao.get_conteos_lados(_table, prov, dpto, frac, radio)