def text_callback(text): db = DB() student_id = text.replace(" ", "") # QUERY to check if student exists find_student_query = ( "SELECT COUNT(nfc_id) FROM students WHERE nfc_id = %s") find_student_value = ((str( student_id.decode('unicode_escape').encode('ascii', 'utf-8')), )) if db.get_data(find_student_query, find_student_value) == 0: # RETRIEVE Student info via the Artevelde API student_res_obj = db.get_student_info(student_id) # QUERY to create new student put_new_student_query = ( "INSERT INTO students (firstname, lastname, email, nfc_id) VALUES (%s,%s,%s,%s)" ) put_new_student_value = (student_res_obj['Voornaam'], student_res_obj['Naam'], student_res_obj['Email'], student_id) db.write_data(put_new_student_query, put_new_student_value) # write new student to db # query FIND CLASSROOM find_class_query = ("SELECT id FROM classrooms WHERE title = %s") find_class_value = ((str( classroom.decode('unicode_escape').encode('ascii', 'utf-8')), )) # query WRITE NEW SCAN put_students_query = ( "INSERT INTO classrooms__students (scan_date, scan_time, classroom_id, student_id, exam) VALUES (%s,%s,%s,%s,%s)" ) put_students_value = (current_date, current_time, db.get_data(find_class_query, find_class_value), student_id, exam) print(student_id) db.write_data(put_students_query, put_students_value)
def __init__(self, config: str = 'config/config.xml', output_dir='./report'): if not config: config = 'config/config.xml' if isinstance(config, str): config = Config(config) self.config = config if not output_dir: if 'output_dir' in config.data: output_dir = config.data['output_dir'] else: output_dir = '../report' self.__output_dir = output_dir self.config_db = dict() self.__db = DB(typedb='postgres', **self.config_db) self.__file = File(f"{self.get_output_dir()}/file.csv", 'csv')
def main4(): db3 = DB('../Reports/base_full/testes' + "/", "database-dump", debug=False, run_on_ram='../Reports/base_full/testes' + '/basefull_phrases3.sql') db3.ram = False db2 = DB('../Reports/base_full/testes' + "/", "database-dump", debug=False, run_on_ram='../Reports/base_full/testes' + '/basefull_phrases2.sql') db2.ram = False t = input() while t: print("FF3 : ", main3([0.01516190, 0.13934191, 0.34223577, 0.26711407, 0.23614635], t, db=db3, ps=3)) print("FF2 : ", main3([0.02620581, 0.15741069, 0.36506735, 0.14562509, 0.30569106], t, db=db2, ps=2)) print("WHALES: ", main3([0.11556000, 0.17738700, 0.01242800, -0.4284020, 0.02998000], t, db=db3, ps=3)) t = input()
# To extract the NFCID I used a class created by the GitHub users Doronhorowitz # https://gist.github.com/doronhorwitz/fc5c4234a9db9ed87c53213d79e63b6c from classes.db import DB from classes.pn7150 import PN7150 from datetime import datetime, date import json now = datetime.now() today = date.today() db = DB() pn7150 = PN7150() # set current date and time current_time = now.strftime("%H:%M:%S") current_date = today.strftime("%Y-%m-%d") filename = "/home/pi/Desktop/config.txt" exam = 0 # # Read the config.txt file that is saved on the desktop to retreive all the variables # # Load txt file if filename: with open(filename, 'r') as f: datastore = json.load(f) f.close()
import classes.console as console from classes.db import DB from config.config import Config # Set connection to the origin DB config = Config() db = DB(config.config_origin) db.setConnection() db.setLinkedTables(config.linked_tables) db_dest = DB(config.config_destination) db_dest.setConnection() desrination_db = db_dest.getConnection() db.generateTables(desrination_db)
def set_db(self, typedb='postgres', prefix=None): if prefix: self.config_db = self.get_config_db(prefix) self.__db = DB(typedb=typedb, **self.config_db)
def backup(): db = DB() new_filename = db.save_backup() del db return new_filename
def validation_text_comparison(data_validation_source, report_flag, training_file, report_name, error_threshold, alfas, database): DB_V = DB(database + "/", "database", debug=False, run_on_ram=database + "/database.sql") print(database) now = datetime.datetime.now() positive_error = 0 #erro positivo negative_error = 0 #erro negativo positive_error_c = 0 #contagem de erro positivo negative_error_c = 0 #contagem de erro negativo sentence_counter = 0 #contagem de frases with open( data_validation_source, encoding='utf-8-sig' ) as csv_file: # conta a quantidade de linhas no arquivo original csv_reader = csv.reader(csv_file, delimiter=';') if report_flag: #caso a flag de relatório esteja ativada with open("../reports/" + report_name + "/report.out", "w") as report: #abre arquivo de relatório orig_stdout = sys.stdout # guarda saida padrão sys.stdout = report # troca saida padrão por relatório print("Data: (DD/MM/AAAA)" + str(now.day) + "/" + str(now.month) + "/" + str(now.year)) print("Hora: (HH:MM:SS)" + str(now.hour) + ":" + str(now.minute) + ":" + str(now.second)) print("Training_File: " + training_file) print("Validation_File: " + data_validation_source) for row in csv_reader: sentence_counter += 1 t = Text(str.split(str.upper(row[1])), row[0]) if t: t.build_phrases(3) #construir frases first = 0 prob = 0 for p in t.phrases: p_prob = DB_V.get_phrase_prob(p) print("Palavra: " + (p.words[0].value.encode('ascii', 'ignore') ).decode('utf-8')) print("Palavra: " + (p.words[1].value.encode('ascii', 'ignore') ).decode('utf-8')) print("Palavra: " + (p.words[2].value.encode('ascii', 'ignore') ).decode('utf-8')) print("Probabilidade: " + str(p_prob)) temp_alfa = alfas.getalfa(p_prob) print("Alfa: " + str(temp_alfa)) if first != 0: prob = prob * ( 1 - p_prob )**temp_alfa # busca a probabilidade associada à frase e calcula probabilidade do texto else: prob = ( 1 - p_prob )**temp_alfa # busca probabilidade da primeira frase first = 1 prob = 1 - prob print(row) #imprime texto print("Probabilidade do texto: " + str( t.probability)) #imprime probabilidade do texto print("Probabilidade calculada: " + str(prob)) #imprime probabilidade calculada error = float(t.probability) - prob if error > 0: positive_error += error if error > error_threshold: positive_error_c += 1 else: negative_error += error if error < error_threshold * -1: negative_error_c += 1 del t print("Numero de frases: " + str(sentence_counter)) print("Erro positivo: " + str(positive_error)) print("Erro negativo: " + str(negative_error)) print("Erro total: " + str(positive_error + abs(negative_error))) print("Contagem de Erros Positivos: " + str(positive_error_c)) print("Contagem de Erros Negativos: " + str(negative_error_c)) sys.stdout = orig_stdout # reseta saída report.close() #fechar arquivo de relatório # caso a flag de relatório esteja desativada else: for row in csv_reader: sentence_counter += 1 t = Text(str.split(str.upper(row[1])), row[0]) if t: t.build_phrases(3) #construir frases first = 0 prob = 0 for p in t.phrases: p_prob = DB_V.get_phrase_prob(p) temp_alfa = alfas.getalfa(p_prob) if first != 0: prob = prob * ( 1 - p_prob )**temp_alfa # busca a probabilidade associada à frase e calcula probabilidade do texto else: prob = ( 1 - p_prob )**temp_alfa # busca probabilidade da primeira frase first = 1 prob = 1 - prob error = float(t.probability) - prob if error > 0: positive_error += error if error > error_threshold: positive_error_c += 1 else: negative_error += error if error < error_threshold * -1: negative_error_c += 1 del t return 1 - ( positive_error + abs(negative_error) ) / sentence_counter #retorna o erro mais 0.000001 para evitar divisão por 0
import classes.console as console from classes.db import DB from config.config import Config # Set connection to the origin DB config = Config() db = DB(config.config_origin) db.setConnection() conn = db.getConnection() # ======================= typeMigrate = console.askType() typeParameters = console.askTypeParameters(typeMigrate) if typeMigrate != "id": migrateIds = console.getMigrationIds(conn, typeMigrate, typeParameters) else: migrateIds = typeParameters # ======================= db.setLinkedTables(config.linked_tables) db_dest = DB(config.config_destination) db_dest.setConnection() desrination_db = db_dest.getConnection() if len(migrateIds) > 0: db.migrate(desrination_db, migrateIds) else: print("No appropriate items found!")
def isprimentos(path='../experiments/1', treino='treino.csv', ff='firefly.csv', validation='validation.csv', phrase_size=[2, 3]): for ps in phrase_size: print("Start loop for phrase size: ", ps) start = datetime.now() # Setting up DATABASE database_filename = 'database' + str(ps) + '.sql' print("Load DB: ", path + '/' + database_filename) db = DB(run_on_ram=path + '/' + database_filename) print("DB Loaded") db.ram = False # Setting up DBH dbh_filename = 'dbh' + str(ps) + '.txt' if os.path.isfile(path + '/' + dbh_filename): print("DBH already exists") print("Loading DBH: ", dbh_filename) dbh = DbHandler() dbh.from_file(path, dbh_filename) print("DBH Loaded") else: print("Finding Probability") dbh = get_all_phrases_prob(path + '/' + ff, db, ps) print("to_file") dbh.to_file(path, 'dbh' + str(ps) + '.txt') # Setting up Firefly best_ff = [] config = configparser.ConfigParser() ff_filename = 'ff' + str(ps) + '.ini' if os.path.isfile(path + '/' + ff_filename): print("Firefly already exists") print("Loading Firefly: ", ff_filename) config.read(path + '/' + ff_filename) temp = str.split(config['Firefly']['alpha_powers']) for t in temp: best_ff.append(float(t)) else: print("Calculating firefly") [brightness, best_ff] = firefly( dimension=5, number_fireflies=100, max_generation=100, data_source=path + '/' + ff, database_path=path, processes=16, phrase_size=ps, dbh=dbh ) config.add_section('Firefly') best_ff = str(best_ff) best_ff = best_ff.replace('[', '') best_ff = best_ff.replace(']', '') config.set('Firefly', 'alpha_powers', best_ff) config.set('Firefly', 'brightness', brightness) config.write(path + '/' + ff_filename) print('BEST FIREFLY: ', best_ff) print("") # Setting up Tests print("Testing") output = open(path + '/' + 'results' + str(ps) + '.csv', 'w') output.write('text;grand_truth;calculated;dif;found phrases;out of\n') row_number = 0 with open(path + '/' + validation, newline='', encoding='utf-8-sig') as csvfile: # lendo o csv reader = csv.reader(csvfile, delimiter=";", quoting=csv.QUOTE_NONE) for row in reader: line = row[1] + ';' + str(row[0]) + ';' [text_prob, found, out_of] = calc_text_prob(row[1], db, best_ff, ps) difference = float(row[0]) - text_prob line += str(text_prob) + ';' + str(difference) + ';' + str(found) + ';' + str(out_of) output.write(line + '\n') row_number += 1 print(row_number, " lines tested") output.close()