class User(): def __init__(self): self.db = Database() def login(self, uid, username): response = self.db.find_user({"userid": uid}) if(response is None): self.add_user(uid, username) return "OKAY" def add_user(self, uid, username): self.db.add_user({"userid": uid, "username": username}) return "OKAY"
class User(): def __init__(self): self.db = Database() def login(self, uid): response = self.db.find_user({"userid": uid}) if(response is None): return "User is not yet registered" else: return response["username"] def add_user(self, uid, username): print uid self.db.add_user({"userid": uid, "username": username}) return "user added"
def getChild(self, name, request): CreatorID = request.path.split("/")[-2] filename = ".".join(name.split(".")[:-1]) if Database.FlipnoteExists(CreatorID, filename):#html, ppm and info return self.CreatorIDFile elif name == "": return self else: return NotFound
def create(): var.done = True while not var.user_bool: print(var.color("Enter a username: "******"") new_user = input() if db.find_data("user", new_user): var.error("This username has already been taken.\n") else: var.user_bool = True while not var.email_bool: print(var.color("\nEnter a valid email: "), end="") new_email = input() if not validate_email(new_email): var.error("This email isn't valid.") else: if db.find_data("email", new_email): var.error( "This email has already been used in another account.") else: var.email_bool = True while not var.pass_bool: new_pass = getpass(var.color("\nEnter a strong password: "******"Your password isn't strong enough") continue same_pass = getpass(var.color("\nEnter the password again: ")) if same_pass != new_pass: var.error("Your password doesn't match.") else: var.pass_bool = True global acc acc = Accounts(new_user, new_email, new_pass, 0, 0) for attr in var.attribute_keys: var.attribute_values.append(getattr(acc, attr)) accdata = dict(zip(var.attribute_keys, var.attribute_values)) db.insert(accdata) print(var.color(f"\nSuccessfully made an account for {acc.user}!")) time.sleep(3.5) os.system("cls")
def user_sudy(): if request.cookies.get('user_id') == None: return redirect('/login') else: user = request.cookies.get('user_id') get_db() db = Database(g._database) user_info = db.find_user_by_id(user) role = user_info[0] name = ' '.join(user_info[1:]) if request.method == 'GET': d_table = db.get_courts_u_id(u_id=user) else: client = request.form.get('client') date = request.form.get('date') if client == '' and date == '': d_table = db.get_courts_u_id(u_id=user) elif client == '' and date != '': d_table = db.get_courts_search_u_id(date=date, u_id=user) elif client != '' and date == '': d_table = db.get_courts_search_u_id(client=client, u_id=user) elif client != '' and date != '': d_table = db.get_courts_search_u_id(client=client, date=date, u_id=user) else: redirect('/user/render/sudy') if d_table == []: return render_template("/user/sudy.html", data=[], role=role, name=name, delite_href='') else: for item in d_table: item.insert(0, d_table.index(item) + 1) item.append(item.pop(1)) item.pop(1) serch_clients = [] for cl in db.get_courts_clients_u_id(u_id=user): serch_clients.append(cl[0]) return render_template("/user/sudy.html", data=d_table, role=role, serch_clients=serch_clients, name=name)
def delo(template_name, t_id): get_db() db = Database(g._database) user = request.cookies.get('user_id') user_info = db.find_user_by_id(user) role = user_info[0] name = ' '.join(user_info[1:]) table_name = settings_by_template(template_name, 'table_name') delo_data = db.get_delo(table_name, t_id) rez_table = [] af_table = delo_data[0][2:7] af_table.pop(1) af_table[1] = ' ,'.join(json.loads(af_table[1])) af_table[3] = ' ,'.join(json.loads(af_table[3])) t_count = settings_by_template(template_name, 'delo_count') j_table = delo_data[0][-t_count + 1:] files_t = delo_data[0][7:9] for f in files_t: if f != 'Нет файла': files_t[files_t.index(f)] = '/download_files/' + '/'.join( f.split('\\')[-3:]) else: continue in_table = delo_data[0][9:11] rez_table = af_table + j_table + files_t + in_table if template_name == 'bankr_dela': rez_table.pop(1) Type = settings_by_template(template_name, 'Type') return render_template('delo.html', type=Type, role=role, name=name, delo=[rez_table], t_id=t_id, template_name=template_name)
def user_add_sudy(): if request.method == 'POST': new_c_id = str(uuid.uuid4()) get_db() db = Database(g._database) user = request.cookies.get('user_id') user_info = db.find_user_by_id(user) adding_dict = request.form.to_dict(flat=False) list_to_Courts = [] for margin in tables_sets(table_name='Sud', mode='fields'): if margin == 'c_id': list_to_Courts.append(new_c_id) elif margin == 'lawyer': list_to_Courts.append(' '.join(user_info[1:])) elif margin == 'u_id': list_to_Courts.append(request.cookies.get('user_id')) else: list_to_Courts.append(adding_dict[margin][0]) db.insert_tables('Sud', tuple(list_to_Courts)) return redirect('/user/render/sudy') if request.method == 'GET': if request.cookies.get('user_id') == None: return redirect('/login') else: user = request.cookies.get('user_id') get_db() db = Database(g._database) user_info = db.find_user_by_id(user) role = user_info[0] name = ' '.join(user_info[1:]) urists = db.get_urists() ur_up = [' '.join(i) for i in urists] clients = db.get_clients_u_id(user) return render_template('user/add/add_sudy.html', clients=clients, role=role, name=name, urists=ur_up)
def register(): if request.method == 'POST': email = request.form.get('email') password = request.form.get('password') Access_level = request.form.get('Access_level') name = request.form.get('name') surname = request.form.get('surname') lastname = request.form.get('lastname') def hash_password(password): salt = uuid.uuid4() return salt, hashlib.sha256(salt.hex.encode() + password.encode() ).hexdigest() + ':' + salt.hex u_id, hash_p = hash_password(password) new_U = (str(u_id), Access_level, email, str(hash_p), name, surname, lastname) get_db() db = Database(g._database) db.insert_User(new_U) return 'Registered Successfully' else: return abort(401)
def __init__(self, roi_id): """PlateReader looks for unprocessed image files, detects vehicles' license plates and caches associated emissions to database for reporting. Args: roi_id (int): Identifier of processed region in image Args read from environment variables POLL_FOLDER: Folder to look for images and metadata DEBUG: Flag indicating whether debug logs and features are enabled """ logging.basicConfig(level=logging.INFO) self.roi_id = roi_id self.poll_folder = os.getenv("POLL_FOLDER") self.DB = Database() self.OCR = ocr_reader self.raw_plate_history = [] self.debug = os.getenv("DEBUG", "").lower() == "true" self.reveal_plate = os.getenv("LOG_PLAINTEXT_PLATE", "").lower() == "true" self.analyser = ROIPlateAnalyser(roi_id, self.debug)
def render_POST(self, request):#implement channels? data = request.content.read() channel = "" if "channel" in request.args: channel = request.args["channel"][0] add = Database.AddFlipnote(data, channel) if add: ServerLog.write("%s successfully uploaded \"%s.ppm\"" % (request.getClientIP(), add[1]), Silent) request.setResponseCode(200) else: ServerLog.write("%s tried to upload a flipnote, but failed..." % request.getClientIP(), Silent) request.setResponseCode(500)#only causes an error, need to fix return "" #===
def login(): if request.method == 'POST': email = request.form.get('email').rstrip() password = request.form.get('password') get_db() db = Database(g._database) if db.find_user(email) != []: if db.find_user(email)[0][3] != []: hash_pass = db.find_user(email)[0][3] def check_password(hashed_password, user_password): password, salt = hashed_password.split(':') return password == hashlib.sha256( salt.encode() + user_password.encode()).hexdigest() if check_password(hash_pass, password): if db.find_user(email)[0][1] == 'Руководитель': way = '/admin/render/sud_dela' elif db.find_user(email)[0][1] == 'Пользователь': way = '/user/render/sud_dela' elif db.find_user(email)[0][1] == 'Секретарь': way = '/secretary' else: way = '/login' response = make_response(redirect(way)) response.set_cookie('user_id', db.find_user(email)[0][0]) return response else: return html_error_replacer('auth.html', 'Password []') else: return html_error_replacer('auth.html', 'User not found or name invalid') else: return html_error_replacer('auth.html', 'User not found or name invalid') else: return render_template('auth.html')
def launch(method, neighbor_by_class: int = 3, store: bool = False): """Launch a classification algorithm. Train on a first dataset, and validate it with a second (and print result) Args: method (object): Class object that have a make_samples function neighbor_by_class (int, optional): Nb nearsest neighbors by class. Defaults to 3. store (bool, optional): True if you want to store the alg result in a folder. Default to False. """ db_train = Database(DB_TRAIN) db_validation = Database(DB_VALIDATION) train_samples = method.make_samples(db_train) validation_samples = method.make_samples(db_validation) if store: # Delete previous result for classe in db_train.get_class(): shutil.rmtree(os.path.join(RESULT_RETRIEVAL, classe), ignore_errors=True) # and re-create directory for classe in db_train.get_class(): os.mkdir(os.path.join(RESULT_RETRIEVAL, classe)) final = {cl: [0, 0] for cl in sorted(db_train.get_class())} for img in validation_samples: selected_class = knn(img, train_samples, neighbor_by_class=neighbor_by_class) # Compare selected class with real class if img['cls'] == selected_class: final[img['cls']][0] += 1 final[img['cls']][1] += 1 # Store image class result if asked if store: # Copy image to result dir i = os.path.basename(img['img']) shutil.copy(img['img'], os.path.join(RESULT_RETRIEVAL, selected_class, i)) # Beautiful print print("\n========= Result =========") col_width = max(len(cl) for cl, v in final.items()) + 2 for cl, v in final.items(): print( f"{cl.ljust(col_width)}: {v[0]}/{v[1]}\t{round(v[0]*100/v[1], 2)} %" )
def menu(user): cargo = Database.cargo(user) root = Tk() root.geometry("1024x768+0+0") root.title('S4U® Bem Vindo {}'.format(user)) title = Label(root, text='Seja Bem Vindo "{} {}"\n{}\n{}'.format(cargo.title(), user.title(), date, hour)) title.grid(row=0, columnspan=2, sticky=W+E) #-------------------------------------- # INTERFACE DE CADASTRO #-------------------------------------- cad = LabelFrame(root, text='Cadastro') cad.grid(row=1,columnspan=2, sticky=W+E) Button(cad, text='CADASTRAR', command=Database.cadastro).grid(row=0, column=0, sticky=W+E) Button(cad, text='CONSULTAR', command=Database.consulta).grid(row=1, column=0, sticky=W+E) #-------------------------------------- # INTERFACE DE CONFIGURAÇÃO #-------------------------------------- conf = LabelFrame(root, text='Configurações') Button(conf, text='CONFIGURAR').grid(row=0, column=0, sticky=W+E) #-------------------------------------- # INTERFACE DE DADOS #-------------------------------------- dados = LabelFrame(root, text='Dados') Button(dados, text='DADOS').grid(row=0, column=0, sticky=W+E) #-------------------------------------- # GESTOR DE PERMISSÃO #-------------------------------------- if cargo == 'admin' or cargo == 'fiscal': dados.grid(row=3,columnspan=2, sticky=W+E) if cargo == 'admin': conf.grid(row=2,columnspan=2, sticky=W+E) root.mainloop()
class Message(): def __init__(self): self.db = Database() def drop_message(self, user_id, message_x, message_y, message, viewable_by, is_picture): message_coordinates = (message_x, message_y) print "(" + str(message_coordinates) + ") " + str(message) message_entry = {"coordinates":message_coordinates, "message":message, "userid":user_id, "viewable_by":viewable_by, "is_picture":is_picture } self.db.insert(message_entry) self.db.get_entries() def pickup_message(self, user_id, user_x, user_y): #This is an estimation... The length of a single degree of latitude changes as the longitude changes. This won't #work accurately for latitudes within about 20-30 degrees of either pole. #360 degrees / 40075 km = .00898 degrees/km = .00000898 degrees/km ~= .00001 # coords = (user_x, user_y) # p = self.db.posts.find({"coordinates": coords}) # p = self.db.posts.find({"coordinates":( # {"$gt": user_x + .01, "$lt": user_x + .01}, # {"$gt": user_y + .01, "$lt": user_y + .01} # )}) # This is shit... Hung would be ashamed of us. for p in self.db.posts.find(): if(p["coordinates"][0] >= user_x - 0.00001 and p["coordinates"][0] <= user_x + 0.00001): if(p["coordinates"][1] >= user_y - 0.00001 and p["coordinates"][1] <= user_y + 0.00001): self.db.delete_entries(p) return p # print str(p[0]),'\n' return "NONE"
value = item['index'] link = item['link'] querry = '''UPDATE products set {}={} WHERE LINK='{}' '''.format( tmp.upper(), value, link) cur.execute(querry) self.con.commit() self.con.close() except: return False def close(self): self.con.close() return True if __name__ == '__main__': db = Database() data = db.get_data() sql = SQLite() sql.insertmuti(db) sql.update('10000.jpg', 'res', 1) tmp = sql.select('res', 1) for d in data.itertuples(): d_img, d_cls, d_file = getattr(d, "img"), getattr(d, "cls"), getattr( d, "filename") sql.insert(d_cls, d_file, d_img) sql.close()
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ReplyKeyboardRemove, ReplyKeyboardMarkup from telegram.ext import ConversationHandler from localization import languages from utils.message_parser import parser from ._regular_reminder import reminder_handler import datetime from DB import Database db = Database("db") user_lang = "" LANG, TIME_ZONE, MENU, CHOOSE_EVENT, ADD_EVENT = range(5) def start_command(update, context): keyboard = [[ InlineKeyboardButton("EN ЪЈ┤заЂДзаЂбзаЂЦзаЂ«заЂДзаЂ┐", callback_data="en"), InlineKeyboardButton("UA ЪЄ║ЪЄд", callback_data="ua"), InlineKeyboardButton("RU ЪЄиЪЄ║", callback_data="ru") ]] reply_markup = InlineKeyboardMarkup(keyboard) context.bot.send_message( chat_id=update.message.chat_id, text=languages[update.message.from_user.language_code]["hello"], reply_markup=reply_markup)
def UpdateThreaded(self, flipnotes): #run in an another thread #sort the flipnotes by viewcount, affected by amount of stars def sort((i, (ID, flip))): views, stars = Database.GetFlipnote(ID, flip)[1:3] return int(views) * 60 + int(stars) / 4 - i
d_img, d_cls = getattr(d, "img"), getattr(d, "cls") d_hist = self.histogram(d_img, type=h_type, n_slice=n_slice) samples.append({'img': d_img, 'cls': d_cls, 'hist': d_hist}) cPickle.dump( samples, open(os.path.join(cache_dir, sample_cache), "wb", True)) return samples if __name__ == "__main__": #On met en place les deux bases DB_train_dir_param = "../../ReseauDeNeurones/data/train" DB_train_csv_param = "database/data_train.csv" db_train = Database(DB_train_dir_param, DB_train_csv_param) data_train = db_train.get_data() DB_test_dir_param = "../../ReseauDeNeurones/data/test" DB_test_csv_param = "database/data_test.csv" db_test = Database(DB_test_dir_param, DB_test_csv_param) data_test = db_test.get_data() edge = Edge() # check shape assert edge_kernels.shape == (5, 2, 2) # evaluate database APs, prevision = my_evaluate_class(db_train, db_test, f_class=Edge,
def __init__(self): self.db = Database()
def render(self, request): creator, file = request.path.split("/")[-2:] filetype = file.split(".")[-1].lower() if filetype in "ppm": #log it: path = "/".join(request.path.split("/")[3:]) Log(request, path) #add a view: Database.AddView(creator, file[:-4]) #read ppm file: data = Database.GetFlipnotePPM(creator, file[:-4]) #send file to the client: request.responseHeaders.setRawHeaders('content-type', ['text/plain']) return data elif filetype == "info": path = "/".join(request.path.split("/")[3:]) Log(request, path, True) request.responseHeaders.setRawHeaders('content-type', ['text/plain']) return "0\n0\n"#undocumented what it means elif filetype == "htm": #maybe cache the details page of Database.Newest? if "mode" in request.args: if request.args["mode"][0] == "commentshalfsize": pass return self.GenerateDetailsPage(creator, ".".join(file.split(".")[:-1])).encode("UTF-8") elif filetype == "star": path = "/".join(request.path.split("/")[3:]) headers = request.getAllHeaders() #bad formatting if "x-hatena-star-count" not in headers: ServerLog.write("%s got 403 when requesting %s without a X-Hatena-Star-Count header" % (request.getClientIP(), path), Silent) request.setResponseCode(400) return "400 - Denied access\nRequest lacks a X-Hatena-Star-Count http header" #add the stars: amount = int(headers["x-hatena-star-count"]) if not Database.AddStar(creator, file[:-5], amount): #error ServerLog.write("%s got 500 when requesting %s" % (request.getClientIP(), path), Silent) request.setResponseCode(500) return "500 - Internal server error\nAdding the stars seem to have failed." #report success ServerLog.write("%s added %i stars to %s/%s.ppm" % (request.getClientIP(), amount, creator, file[:-5]), Silent) return "Success" elif filetype == "dl": path = "/".join(request.path.split("/")[3:]) Log(request, path, True) #this is POSTed to when it've been stored to memory. Database.AddDownload(creator, file[:-3]) return "Noted ;)" else: path = "/".join(request.path.split("/")[3:]) ServerLog.write("%s got 403 when requesting %s" % (request.getClientIP(), path), Silent) request.setResponseCode(403) return "403 - Denied access"
np.mean(cls_MAPs)) print(r) result.write('\n' + r) print("Moyennes {} bonnes réponses tout depth= {}".format( ",".join(combination), sommeBonnesReponsesCombinaison / len(depths) * 100)) result.close() if __name__ == "__main__": # On crée les deux bases, celle de test et celle de train DB_train_dir_param = "../../ReseauDeNeurones/data/train" DB_train_csv_param = "database/data_train.csv" db_train = Database(DB_train_dir_param, DB_train_csv_param) DB_test_dir_param = "../../ReseauDeNeurones/data/test_classés" DB_test_csv_param = "database/data_test.csv" db_test = Database(DB_test_dir_param, DB_test_csv_param) # evaluate features double-wise evaluate_feats(db_train, db_test, N=2, d_type='d1') # evaluate features triple-wise evaluate_feats(db_train, db_test, N=3, d_type='d1') # evaluate features quadra-wise evaluate_feats(db_train, db_test, N=4, d_type='d1')
def test_get_vehicles(): DB = Database() assert DB.get_vehicles() is not None
def GenerateDetailsPage(self, CreatorID, filename): #filename without ext flipnote = Database.GetFlipnote( CreatorID, filename ) #flipnote = [filename, views, stars, green stars, red stars, blue stars, purple stars, Channel], all probably strings if not flipnote: return "This flipnote doesn't exist!" tmb = TMB().Read(Database.GetFlipnoteTMB(CreatorID, filename)) if not tmb: return "This flipnote is corrupt!" #Is it a spinoff? Spinnoff = "" if tmb.OriginalAuthorID <> tmb.EditorAuthorID or tmb.OriginalFilename <> tmb.CurrentFilename: if Database.FlipnoteExists(tmb.OriginalAuthorID, tmb.OriginalFilename[:-4]): Spinnoff = SpinoffTemplate1.replace( "%%CreatorID%%", tmb.OriginalAuthorID).replace("%%Filename%%", tmb.OriginalFilename[:-4]) elif tmb.OriginalAuthorID <> tmb.EditorAuthorID: Spinnoff = SpinoffTemplate2 #make each entry: Entries = [] #Creator username: name = "Creator" #content = "<a href=\"http://flipnote.hatena.com/ds/ds/v2-xx/%s/profile.htm?t=260&pm=80\">%s</a>" % (CreatorID, tmb.EditorAuthorName) content = '<a href="http://flipnote.hatena.com/ds/v2-xx/%s/profile.htm?t=260&pm=80\">%s</a>' % ( CreatorID, tmb.Username) Entries.append( PageEntryTemplate.replace("%%Name%%", name).replace("%%Content%%", content)) #Stars: name = "Stars" content = u'<a href="http://flipnote.hatena.com/ds/v2-xx/movie/%s/%s.htm?mode=stardetail"><span class="star0c">\u2605</span> <span class="star0">%s</span></a>' % ( CreatorID, filename, flipnote[2]) #yellow stars #todo: add other stars Entries.append( PageEntryTemplate.replace("%%Name%%", name).replace("%%Content%%", content)) #Views: name = "Views" content = str(flipnote[1]) Entries.append( PageEntryTemplate.replace("%%Name%%", name).replace("%%Content%%", content)) #Channel: if flipnote[7]: #todo: make channels work at all name = "Channel" content = 'a href="http://flipnote.hatena.com/ds/v2-xx/ch/%s.uls">%s</a>' % ( flipnote[7], flipnote[7]) Entries.append( PageEntryTemplate.replace("%%Name%%", name).replace( "%%Content%%", content)) #Comments: Comments = "0" #doto: add original author info too #add the entries to page: return DetailsPageTemplate.replace("%%CreatorID%%", CreatorID).replace( "%%Filename%%", filename).replace("%%CommentCount%%", Comments).replace( "%%Spinoff%%", Spinnoff).replace("%%PageEntries%%", PageEntrySeparator.join(Entries))
combinations = itertools.combinations(feat_pools, N) for combination in combinations: fusion = FeatureFusion(features=list(combination)) for d in depths: APs = evaluate_class(db, f_instance=fusion, d_type=d_type, depth=d) cls_MAPs = [] for cls_APs in APs: MAP = np.mean(cls_APs) cls_MAPs.append(MAP) r = "{},{},{},{}".format( ",".join(combination), d, d_type, np.mean(cls_MAPs)) print(r) result.write('\n' + r) print() result.close() if __name__ == "__main__": print("Pensez à supprimer le dossier cache dans le cas où vous utilisez des nouvelles données.\n") dbTrain = Database(DB_dir="CorelDBDataSet/train", DB_csv="CorelDBDataSetTrain.csv") fusion = FeatureFusion(features=['color', 'daisy']) result = evaluate_class(dbTrain, f_instance=fusion, d_type=d_type, depth=depth) print("{} classes classées sur {} disponibles".format( result[0], result[1]))
data = db.get_data() for d in data.itertuples(): d_img, d_cls = getattr(d, "img"), getattr(d, "cls") d_hist = self.histogram(d_img, type=h_type, n_slice=n_slice) samples.append({'img': d_img, 'cls': d_cls, 'hist': d_hist}) cPickle.dump(samples, open(os.path.join(cache_dir, sample_cache), "wb")) return samples if __name__ == "__main__": d = Daisy() # Create my samples db = Database("database\\train") print("Train databse created.") samples = d.make_samples(db, sample_name="train") print("Train samples created.") test = Database("database\dev") print("Test databse created.") sample_test = d.make_samples(test, sample_name="dev") print("Test samples created.") # Find class for each image of my test DB and verify the result nb_good_classification = 0 for img_test in sample_test: _, resultes = infer(img_test, samples) real_cls = KNN(resultes, db.get_class())
import login as log from extra_vars import color from DB import Database as db db.init("DB/data.json") # If you can't see color or password doesn't work: alias python='winpty python.exe' print(color("\nWelcome to Hyper Studios!\n")) log.choice()
for cls, cls_APs in APs.items(): MAP = np.mean(cls_APs) cls_MAPs.append(MAP) r = "{},{},{},{}".format(",".join(combination), d, d_type, np.mean(cls_MAPs)) print(r) result.write('\n' + r) print() result.close() if __name__ == "__main__": DB_train_dir = '../database/train' DB_train_csv = 'data_train.csv' db = Database(DB_train_dir, DB_train_csv) # DB_test_dir = '../database/test' # DB_test_csv = 'data_test.csv' DB_test_dir = '../database/validation' DB_test_csv = 'data_validation.csv' db2 = Database(DB_test_dir, DB_test_csv) # evaluate features double-wise evaluate_feats(db, db2, N=2, d_type='d1') # evaluate features triple-wise evaluate_feats(db, db2, N=3, d_type='d1')
def test_plate_hash(): DB = Database() plate0 = DB.get_vehicles()[0] if plate0: assert DB.hash_exists(plate0[0])
class PlateReader: def __init__(self, roi_id): """PlateReader looks for unprocessed image files, detects vehicles' license plates and caches associated emissions to database for reporting. Args: roi_id (int): Identifier of processed region in image Args read from environment variables POLL_FOLDER: Folder to look for images and metadata DEBUG: Flag indicating whether debug logs and features are enabled """ logging.basicConfig(level=logging.INFO) self.roi_id = roi_id self.poll_folder = os.getenv("POLL_FOLDER") self.DB = Database() self.OCR = ocr_reader self.raw_plate_history = [] self.debug = os.getenv("DEBUG", "").lower() == "true" self.reveal_plate = os.getenv("LOG_PLAINTEXT_PLATE", "").lower() == "true" self.analyser = ROIPlateAnalyser(roi_id, self.debug) def start(self): """Start polling for unprocessed frames""" logging.info(f"Starting polling for ROI {self.roi_id}") while True: started = time.time() files = self._get_files() self._process_files(files) if files: logging.info("Loop process time {}s, {} files".format( (time.time() - started), len(files))) if self.debug: sys.exit(1) time.sleep(1) def _get_files(self): """Return batch of image filenames with associated metadata from filename and separate JSON file Returns: List: List of dictionaries containing information about images to process and related metadata """ if ENCRYPT: suffix = "aes" else: suffix = "jpg" files = [] for f in sorted(os.listdir(self.poll_folder)): if f.endswith( "json" ): # check for metadata since it's written after image try: parsed = parse.parse( "{stream}_ts_{ts}_roi_{roi}_f_{frame}." + "json", f) if int(parsed["roi"]) != self.roi_id: continue files.append({ "stream": parsed["stream"], "timestamp": timestamp_to_date(parsed["ts"]), "frame_no": parsed["frame"], "ROI": parsed["roi"], "path": os.path.join(self.poll_folder, f.replace("json", suffix)), "metadata": self._read_metadata(os.path.join(self.poll_folder, f)), }) except Exception as e: logging.error("ERROR READING FILES: {}".format(e)) pass if len(files) >= MAX_BATCH_SIZE: break if self.debug: return files[0:100] # return only few at a time return files def _get_plate(self, path): """Read image from given path and perform OCR Args: path (str): Path to image file Returns: Dict: License plate recognition results """ if ENCRYPT: img = read_encrypted_image(path) plates = self.OCR.read_array(bytes(bytearray(img))) else: plates = self.OCR.read_file(path) plates["file"] = path return plates def _read_metadata(self, path): """Read image frame metadata from JSON file. Metadata contains e.g. object detection results, ROI offset etc. Args: path (str): Path to JSON file containing image metadata Returns: Dict or None: Return image metadata as dictionary or None if unsuccessful """ data = None with open(path, mode="r", encoding="utf-8") as f: data = json.load(f) return data def _process_files(self, files): """Process set of files and remove after them after processing Args: files (List): List of frame metadata """ if len(files) == 0: return self.raw_plate_history = [] logging.info("LAG: {}".format(datetime.datetime.now() - files[0]["timestamp"])) for file in files: # Yolo saw an object here if len(file["metadata"]["detections"]) > 0: plate = self._get_plate(file["path"]) try: for temp in plate["results"]: logging.info("ALPR like: {}".format( get_obfuscated_plate(temp["plate"]))) except Exception: pass file["plates"] = plate self.raw_plate_history.append(file) # Delete files when data has been read try: os.remove(file["path"]) except FileNotFoundError: pass try: os.remove(file["path"].replace(".jpg", ".json")) except FileNotFoundError: pass # analyse known plates roi_plates = self.analyser.analyse_plates(self.raw_plate_history) for detected_plate in roi_plates: self._find_emissions_and_cache(detected_plate) plate_text = detected_plate["plate_text"] plate_hashed = create_hash(plate_text) if self.reveal_plate: logging.info(f"ROI: {self.roi_id}, {plate_text}") else: logging.info(f"ROI: {self.roi_id}, {plate_hashed}") # Placeholder: # - hash plate # - get emission info for the hashed plate # - insert emission to local database to be sent out later. # emissions = self.DB.get_vehicle(plate_hashed) # if emissions: # self.DB.write_cache(detected_plate["timestamp"], emissions) def _find_emissions_and_cache(self, detected_plate): """Retrieve emissions from database and write to cache for sending. If license plate is known and found then actual emissions are read, otherwise average for the vehicle type is used. Args: detected_plate (str): License plate text or string identifier for an unknown vehicle TODO: Write average vehicle to cache Returns: bool: True if emission caching was successful """ plate_text = detected_plate["plate_text"] if plate_text.startswith(UNKNOWN_VEHICLE_PREFIX): # TODO: retrieve average emissions based on detected_plate["label"] return True else: vehicle = self.DB.get_vehicle(create_hash(plate_text)) if vehicle: self.DB.write_cache(str(detected_plate["timestamp"]), vehicle) return True return False
def get_db(self, conf): db = Database(**conf) return db.session
def sort(xxx_todo_changeme): (i, (ID, flip)) = xxx_todo_changeme views, stars = Database.GetFlipnote(ID, flip)[1:3] return int(stars)*110 + int(views)/10 - i
res_model = res_model.cuda() samples = [] img = imageio.imread(q_img, pilmode="RGB") img = img[:, :, ::-1] # switch to BGR img = np.transpose(img, (2, 0, 1)) / 255. img[0] -= means[0] # reduce B's mean img[1] -= means[1] # reduce G's mean img[2] -= means[2] # reduce R's mean img = np.expand_dims(img, axis=0) try: if use_gpu: inputs = torch.autograd.Variable( torch.from_numpy(img).cuda().float()) else: inputs = torch.autograd.Variable(torch.from_numpy(img).float()) d_hist = res_model(inputs)[pick_layer] d_hist = d_hist.data.cpu().numpy().flatten() d_hist /= np.sum(d_hist) # normalize except: pass return d_hist if __name__ == "__main__": # evaluate database db = Database() obj = Embeddings() q_img = "database/images/15970.jpg" ret = obj.make_hist(q_img) print(ret)
inputs = torch.autograd.Variable( torch.from_numpy(img).float()) d_hist = vgg_model(inputs)[pick_layer] d_hist = np.sum(d_hist.data.cpu().numpy(), axis=0) d_hist /= np.sum(d_hist) # normalize samples.append({ 'img': d_img, 'cls': d_cls, 'hist': d_hist }) except BaseException: pass cPickle.dump( samples, open(os.path.join(cache_dir, sample_cache), "wb", True)) return samples if __name__ == "__main__": # evaluate database DB = Database() APs = evaluate_class(DB, f_class=VGGNetFeat, d_type=d_type, depth=depth) cls_MAPs = [] for cls, cls_APs in APs.items(): MAP = np.mean(cls_APs) print("Class {}, MAP {}".format(cls, MAP)) cls_MAPs.append(MAP) print("MMAP", np.mean(cls_MAPs))