def vote(): allphotos = db_session.query(Photo).all() sql = """select distinct v.photo_id from votes v where v.give_vote_user_id = %s and v.value > 0;""" % ( g.user_id) upvotes = [vote[0] for vote in db_session.execute(sql)] print upvotes sql = """select distinct v.photo_id from votes v where v.give_vote_user_id = %s and v.value < 0;""" % ( g.user_id) downvotes = [vote[0] for vote in db_session.execute(sql)] if request.form: vote = request.form['vote'] photoid = request.form['photoid'] photoowner = request.form['photoowner'] v = db_session.query(Vote).filter_by(give_vote_user_id=g.user_id, photo_id=photoid).first() if not v: v = Vote(give_vote_user_id=g.user_id, photo_id=photoid, receive_vote_user_id=photoowner) db_session.add(v) p = db_session.query(Photo).filter_by(id=photoid).one() if vote == "upvote": v.value = 1 p.up_vote = Photo.up_vote + 1 elif vote == "downvote": v.value = -1 p.down_vote = Photo.down_vote + 1 db_session.commit() sql = """select distinct v.photo_id from votes v where v.give_vote_user_id = %s and v.value > 0;""" % ( g.user_id) upvotes = [vote[0] for vote in db_session.execute(sql)] sql = """select distinct v.photo_id from votes v where v.give_vote_user_id = %s and v.value < 0;""" % ( g.user_id) downvotes = [vote[0] for vote in db_session.execute(sql)] return render_template("_vote.html", u=g.user, photos=allphotos, upvotes=upvotes, downvotes=downvotes) return render_template("vote.html", u=g.user, photos=allphotos, upvotes=upvotes, downvotes=downvotes)
def insert_data(name, cno, age, sex, sno, major, grade, sd, province, address, tele, pnum, QQ, wx, email): try: session.execute( f"call insert_data('{name}', {cno}, {age}, '{sex}', '{sno}', '{major}', '{grade}', '{sd}', '{province}', '{address}', '{tele}', '{pnum}', '{QQ}', '{wx}', '{email}');" ) except: session.rollback() return (100, '插入失败,记录已存在') else: return (0, '插入成功')
def popular(): # more recent votes carry more weight, 1/x over time votes' weight goes down dramatically sql = """select v.photo_id, p.file_location, p.caption, sum( 1 / ( (extract(epoch from now()) - extract(epoch from v.timestamp)) ) * value ) as POPULAR from votes v inner join photos p on p.id = v.photo_id group by p.file_location, v.photo_id, p.caption order by 4 desc;""" photos = db_session.execute(sql) sql = """select v.photo_id, p.file_location, p.caption, sum( 1 / ( (extract(epoch from now()) - extract(epoch from v.timestamp)) ) * value ) as POPULAR from votes v inner join photos p on p.id = v.photo_id group by p.file_location, v.photo_id, p.caption order by 4 desc limit 1;""" topPhoto = db_session.execute(sql) print topPhoto return render_template("popular.html", u=g.user, photos=photos, topPhoto=topPhoto)
def del_account_client(token): log.info('Controle del_account_client starts') client_id = get_client_using_Token(token) if request.method == 'POST': enter_number = request.form['number'] req = db.query(BankAccount).filter_by(number=enter_number) account = db.execute(req).fetchone() print("this account client", account) if account is None: return render_template('redirectory_to_delete_account_client.html', number=enter_number) if client_id != account[2] or account is None: return render_template('redirectory_to_delete_account_client.html', number=enter_number) if client_id == account[2]: db.query(SavingAccount).filter_by( number=request.form['number']).delete() #print(req) db.commit() return redirect( url_for('client_file', token=token) ) # dans le url_for (qui est une fonction) le 'client file' ici est un end point d'une url définiit dans le main et qui va appeler le reste de l'url au moment de l'éxécution de la fonction, et qui va injecter la donneé contenu de "client_id" à l'emplcement correspondant dans l'url du main else: return render_template('delete_account_client.html', token=token)
def del_client(): log.info('Controle del_clients starts') if request.method == 'POST': # si la méthode du formaulaire est post on interroge la table Client avec comme contrainte le prénom et le nom de l'utilisateur client_account = db.query(Client).filter_by( firstname=request.form['firstname']).filter_by( lastname=request.form['lastname'] ) #.filter_by(email=request.form['email']) #ensuite on récupère sous forme tuples la ligne du client client = db.execute(client_account).fetchone() #si le client n'existe pas on redirige l'utilisateur sur un page d'erreur if client is None: return render_template('redirectory_delete_client.html') else: #si il existe on récupère on id en interrogeant la table Client on récupère son id à l'indcice 0 client_id = db.query(Client).get(client[0]) #ensuite on interroge la table Bankaccount possédant tous les ocmptes bancaires du client à l'aide de l'id du client, # ensuite on supprime le client de la table Bankaccount db.query(BankAccount).filter_by( client_id=client_id.client_id).delete() # enusite on supprime l'id du client existant dans la table Client, il n'y a plus de trace de lui db.delete(client_id) db.commit() return redirect(url_for('home_admin')) else: return render_template('delete_client.html')
def get_client_using_Token( token ): #récupère un id à parir d'un token, sert à authentifier un client client_id = db.query(Client).filter_by(token=token) result = db.execute(client_id).fetchone() #print(result) result_id = result[0] return result_id
def vote(): allphotos=db_session.query(Photo).all() sql = """select distinct v.photo_id from votes v where v.give_vote_user_id = %s and v.value > 0;""" % (g.user_id) upvotes = [ vote[0] for vote in db_session.execute(sql) ] print upvotes sql = """select distinct v.photo_id from votes v where v.give_vote_user_id = %s and v.value < 0;""" % (g.user_id) downvotes = [ vote[0] for vote in db_session.execute(sql) ] if request.form: vote = request.form['vote'] photoid = request.form['photoid'] photoowner = request.form['photoowner'] v = db_session.query(Vote).filter_by(give_vote_user_id=g.user_id, photo_id=photoid).first() if not v: v = Vote(give_vote_user_id=g.user_id, photo_id=photoid, receive_vote_user_id=photoowner) db_session.add(v) p = db_session.query(Photo).filter_by(id=photoid).one() if vote == "upvote": v.value = 1 p.up_vote = Photo.up_vote + 1 elif vote == "downvote": v.value = -1 p.down_vote = Photo.down_vote + 1 db_session.commit() sql = """select distinct v.photo_id from votes v where v.give_vote_user_id = %s and v.value > 0;""" % (g.user_id) upvotes = [ vote[0] for vote in db_session.execute(sql) ] sql = """select distinct v.photo_id from votes v where v.give_vote_user_id = %s and v.value < 0;""" % (g.user_id) downvotes = [ vote[0] for vote in db_session.execute(sql) ] return render_template("_vote.html", u=g.user, photos=allphotos, upvotes=upvotes, downvotes=downvotes) return render_template("vote.html", u=g.user, photos=allphotos, upvotes=upvotes, downvotes=downvotes)
def retrieve_trafficky_text(): documents = [] labels = [] ads_text_cmd = "SELECT ads.text AS text FROM ads_attributes JOIN ads ON ads.id = ads_id WHERE ads_attributes.value IN ('7087629612', '9292103206', '4142395461', '4146870501', '7045060509', '5203663536')" trafficky_text = session.execute(ads_text_cmd) for text in trafficky_text: string_text = str(text) string_text = re.sub('(\\()', '', string_text) string_text = re.sub('(,\\))', '', string_text) string_text = re.sub("(\\')", "", string_text) lower_text = string_text.lower() no_HTML_text = re.sub('<\s*\w.*?>', '', lower_text) no_unicode_text = re.sub('([\\\\]x..)', '', no_HTML_text) no_newline_text = re.sub('([\\\\]n)', '', no_unicode_text) filtered_text = no_newline_text documents.append(filtered_text) labels.append('trafficky') query = "SELECT text from ads WHERE text LIKE '%cherry11%'" known_trafficky_test_text = session.execute(query) for text in known_trafficky_test_text: string_text = str(text) string_text = re.sub('(\\()', '', string_text) string_text = re.sub('(,\\))', '', string_text) string_text = re.sub("(\\')", "", string_text) lower_text = string_text.lower() no_HTML_text = re.sub('<\s*\w.*?>', '', lower_text) no_unicode_text = re.sub('([\\\\]x..)', '', no_HTML_text) no_newline_text = re.sub('([\\\\]n)', '', no_unicode_text) filtered_text = no_newline_text documents.append(filtered_text) labels.append('trafficky') print 'documents length:', len(documents), 'labels length:', len(labels) dl_list = [documents, labels] return dl_list
def get_percent(table_name, field_name): sql = f"SELECT `{field_name}`,number,concat( round( number / total * 100.00, 2 ), '%' ) percent FROM( SELECT * FROM (SELECT `{field_name}`,COUNT( 1 ) number FROM `{table_name}` GROUP BY `{field_name}` ) t1 INNER JOIN ( SELECT COUNT( 1 ) total FROM `{table_name}` ) t2 ON 1 = 1 ) t;" try: result = session.execute(sql) except: session.rollback() percentList = result.fetchall() # [('计算机',5,'8.00%'), ...] ret = [] for percent in percentList: data = { 'name': percent[0], 'value': percent[1], } ret.append(data) return ret
def get_random_id(): id_list = [] f = open('id_list.pickle', 'wb') pickle.dump(id_list, f) f.close() ads_id_cmd = "SELECT id FROM ads" ids = session.execute(ads_id_cmd) for i in ids: i = str(i) i = re.sub('(\\()', '', i) i = re.sub('(L,\\))', '', i) id_list.append(i) random_id = (random.choice(id_list)) return random_id
def test_all_samples(vectorizer): test_samples_list = [] probability_classification_list = [] histogram_data = {0.0:0, 0.1:0, 0.2:0, 0.3:0, 0.4:0, 0.5:0, 0.6:0, 0.7:0, 0.8:0, 0.9:0, 1.0:0} ads_text_cmd = "SELECT text FROM ads LIMIT 1000000" test_samples = session.execute(ads_text_cmd) print test_samples for text in test_samples: string_text = str(text) string_text = re.sub('(\\()', '', string_text) string_text = re.sub('(,\\))', '', string_text) string_text = re.sub("(\\')", "", string_text) lower_text = string_text.lower() no_HTML_text = re.sub('<\s*\w.*?>', '', lower_text) no_unicode_text = re.sub('([\\\\]x..)', '', no_HTML_text) no_newline_text = re.sub('([\\\\]n)', '', no_unicode_text) filtered_text = no_newline_text test_samples_list.append(filtered_text) f = open('classifier.pickle') classifier = pickle.load(f) samples = vectorizer.transform(test_samples_list) classification_new_ads = classifier.predict(samples) ndarray = classifier.predict_proba(samples) f.close() for item in ndarray: probability_classification = str(item) probability_classification = re.sub('^(\[ +).+\s', '', probability_classification) probability_classification = re.sub('(\])', '', probability_classification) probability_classification = probability_classification.replace(',', '.') probability_classification = float(probability_classification) probability_classification = round(probability_classification, 1) probability_classification_list.append(probability_classification) print "binary classification:", classification_new_ads print "probability classification list:", probability_classification_list for item in probability_classification_list: if histogram_data[item]: histogram_data[item] = histogram_data[item] + 1 else: histogram_data[item] = 1 print histogram_data
def generate_test_data(random_id): test_document = [] query = session.query(Ad.text).filter(Ad.id == random_id) test_text = session.execute(query) for text in test_text: string_text = str(text) string_text = re.sub('(\\()', '', string_text) string_text = re.sub('(,\\))', '', string_text) string_text = re.sub("(\\')", "", string_text) lower_text = string_text.lower() no_HTML_text = re.sub('<\s*\w.*?>', '', lower_text) no_unicode_text = re.sub('([\\\\]x..)', '', no_HTML_text) no_newline_text = re.sub('([\\\\]n)', '', no_unicode_text) filtered_text = no_newline_text test_document.append(filtered_text) print test_document return test_document
def retrieve_not_trafficky_text(dl_list): ads_text_cmd = "SELECT ads.text AS text FROM ads_attributes JOIN ads ON ads.id = ads_id WHERE ads_attributes.value IN ('7027565783', '4702535139', '9172794962', '6149001084', '7865195399', '4048401717', '3133388625', '5106213824', '3374231635', '2622609175', '6465433780', '4388078188')" not_trafficky_text = session.execute(ads_text_cmd) for text in not_trafficky_text: string_text = str(text) string_text = re.sub('(\\()', '', string_text) string_text = re.sub('(,\\))', '', string_text) string_text = re.sub("(\\')", "", string_text) lower_text = string_text.lower() no_HTML_text = re.sub('<\s*\w.*?>', '', lower_text) no_unicode_text = re.sub('([\\\\]x..)', '', no_HTML_text) no_newline_text = re.sub('([\\\\]n)', '', no_unicode_text) filtered_text = no_newline_text dl_list[0].append(filtered_text) dl_list[1].append('not trafficky') # print 'documents length:', len(dl_list[0]), 'labels length:', len(dl_list[1]) # print 'fraction trafficky:', len([item for item in dl_list[1] if item == 'trafficky'])/len(dl_list[1]) return dl_list
def test_all_samples(vectorizer): test_samples_list = [] probability_classification_list = [] histogram_data = { 0.0: 0, 0.1: 0, 0.2: 0, 0.3: 0, 0.4: 0, 0.5: 0, 0.6: 0, 0.7: 0, 0.8: 0, 0.9: 0, 1.0: 0 } ads_text_cmd = "SELECT text FROM ads LIMIT 1000000" test_samples = session.execute(ads_text_cmd) print test_samples for text in test_samples: string_text = str(text) string_text = re.sub('(\\()', '', string_text) string_text = re.sub('(,\\))', '', string_text) string_text = re.sub("(\\')", "", string_text) lower_text = string_text.lower() no_HTML_text = re.sub('<\s*\w.*?>', '', lower_text) no_unicode_text = re.sub('([\\\\]x..)', '', no_HTML_text) no_newline_text = re.sub('([\\\\]n)', '', no_unicode_text) filtered_text = no_newline_text test_samples_list.append(filtered_text) f = open('classifier.pickle') classifier = pickle.load(f) samples = vectorizer.transform(test_samples_list) classification_new_ads = classifier.predict(samples) ndarray = classifier.predict_proba(samples) f.close() for item in ndarray: probability_classification = str(item) probability_classification = re.sub('^(\[ +).+\s', '', probability_classification) probability_classification = re.sub('(\])', '', probability_classification) probability_classification = probability_classification.replace( ',', '.') probability_classification = float(probability_classification) probability_classification = round(probability_classification, 1) probability_classification_list.append(probability_classification) print "binary classification:", classification_new_ads print "probability classification list:", probability_classification_list for item in probability_classification_list: if histogram_data[item]: histogram_data[item] = histogram_data[item] + 1 else: histogram_data[item] = 1 print histogram_data
def execute_query(*args): return session.execute(build_query(args)).one_or_none()