def add_user(res): params = utils.get_data(['user_id', 'email'], request.values) user.add_user(params) return res.send('Successfully added user')
def edit_user(res, user_id=None): """edit user information Keyword arguments: res -- instance of Response class user_id -- id of user to be edited (default None) """ if not user_id: if not utils.has_scopes(db, request.user_id, 'self.info'): raise FailedRequest(config['ERROR']['permission'], 403) user_id = request.user_id params = utils.get_data([], ['genre', 'mood', 'instrument', 'picture'], request.values) else: if not utils.has_scopes(db, request.user_id, 'user.info'): raise FailedRequest(config['ERROR']['permission'], 403) params = utils.get_data(['active'], [], request.values) params['user_id'] = user_id if 'active' in params: user.edit_user_active(params) else: user_image = request.files.get('image') if user_image and utils.is_file_type(user_image.filename, user_image.mimetype, 'image'): params.update({ 'image': user_image, 'image_filename': secure_filename(user_image.filename) }) user.edit_user_picture(params) user.edit_user_preference(params) result = dict(params) result.pop('image', None) return res.send(result)
def search_users(res): if not utils.has_scopes(db, request.user_id, 'user.list'): raise FailedRequest(config['ERROR']['permission'], 403) params = utils.get_data(['query'], ['page', 'entries'], request.args) result = user.search_users(params) return res.send(result)
def get_data(res): """get air_check information""" params = utils.get_data(['country'], [], request.args) result = air_check.get_data_by_country(params['country']) if len(result) == 0: raise FailedRequest('Seems the ninjas cannot find any record. Sorry :(', 404) return res.send(result)
def get_user_flagged_tracks(res): if not utils.has_scopes(db, request.user_id, 'user.music.list'): raise FailedRequest(config['ERROR']['permission'], 403) params = utils.get_data([], ['page', 'entries'], request.args) params['user_id'] = request.user_id result = user.get_user_flagged_tracks(params) return res.send(result)
def decline_invitation(res): if not utils.has_scopes(db, request.user_id, 'self.info'): raise FailedRequest(config['ERROR']['permission'], 403) params = utils.get_data(['role'], ['music_provider_id', 'artist_id'], request.values) params['user_id'] = request.user_id user.decline_invitation(params) return res.send(params)
def autocomplete_users(res): if not utils.has_scopes(db, request.user_id, 'user.available.list'): raise FailedRequest(config['ERROR']['permission'], 403) params = utils.get_data(['query'], ['entries'], request.args) params['entries'] = int(params['entries'] or config['ENTRIES']) result = user.autocomplete_users(params) return res.send(result)
def get_data2(res): """get air_check information""" params = utils.get_data(['lat','lng'], ['distance'], request.args) if not params['distance']: params['distance'] = 15 #15KM default result = air_check.get_data_by_point(params['lng'], params['lat'], params['distance']) if len(result) == 0: raise FailedRequest('Seems the ninjas cannot find any record. Sorry :(', 404) return res.send(result)
def get_all_users(res): """get all users information sorted by email Keyword arguments: res -- instance of Response class """ if not utils.has_scopes(db, request.user_id, 'user.list'): raise FailedRequest(config['ERROR']['permission'], 403) params = utils.get_data([], ['page', 'entries'], request.args) result = user.get_all_users(params) return res.send(result)
def gateway_all(res): """gateway all tracks Keyword arguments: res -- instance of Response class """ params = utils.get_data(['access_token', 'subscriber_number'], [], request.args) result = gateway.store_mobile_info(params) if not result: raise FailedRequest('Cannot do that. Sorry :(', 403) return res.send(request.args)
def main(): parser = argparse.ArgumentParser(description='Short sample app') parser.add_argument('--data_dir', help='data directory and save model') parser.add_argument('--params_path', help='config file path', default='./params_base.json') args = parser.parse_args() data_dir = args.data_dir params_path = args.params_path test_data_dir = os.path.join(data_dir, 'query') label_map_path = os.path.join(data_dir, "label_map.pbtxt") model_dir = os.path.join(data_dir, 'save_model') params = Params(params_path) data_tuple = get_data(test_data_dir, label_map_path) images1, images2, labels = train_input(data_tuple, params.eval_input) # 模型 images = tf.concat([images1, images2], axis=0) net = SelfNetModel(batch_size=params.batch_size, res_layer_num=params.res_layer_num, classes_num=params.classes_num, embedding_size=params.embedding_size) feature, pred = net.predict( input_batch=images) # image_batch 是 label batch的两倍 saver = tf.train.Saver() # 保存全部参数 sess = tf.Session() sess.run(tf.local_variables_initializer()) sess.run(tf.global_variables_initializer()) model_path = tf.train.latest_checkpoint(model_dir) if model_path: print('load ckpt from: %s.' % model_path) saver.restore(sess, save_path=model_path) while True: cont = 0 try: # ————————————first_stage train————————————————accuracy, embeddings, classes = sess.run([feature, pred]) print(cont) except tf.errors.OutOfRangeError: break
def apply(res): if not utils.has_scopes(db, request.user_id, 'self.info'): raise FailedRequest(config['ERROR']['permission'], 403) application_status = user.get_user(request.user_id)['application_status'] if application_status != 'applied': raise FailedRequest(config['ERROR']['permission'], 403) params = utils.get_data( ['contract_id', 'original_music', 'name', 'source'], ['skype', 'genre', 'music_location'], request.values) params['user_id'] = request.user_id user.apply(params) return res.send(params)
def apply(res): if not utils.has_scopes(db, request.user_id, 'self.info'): raise FailedRequest(config['ERROR']['permission'], 403) application_status = user.get_user(request.user_id)['application_status'] if application_status != 'applied': raise FailedRequest(config['ERROR']['permission'], 403) params = utils.get_data(['contract_id', 'original_music', 'name', 'source'], ['skype', 'genre', 'music_location'], request.values) params['user_id'] = request.user_id user.apply(params) return res.send(params)
import unittest, requests from util.utils import Create_Data, Execute_Mysql, Add_GuestURL, DATAPATH, get_data file = DATAPATH + '/add_guest.csv' response_data = get_data(file) class Test_Add_Guest(unittest.TestCase): '''添加嘉宾接口''' def setUp(self): self.em = Execute_Mysql() self.em.con_mysql() self.gd = Create_Data() self.data = {} sql = 'select id from sign_event where status=1 and start_time>=now();' self.em.execute_sql(sql) self.a = self.em.get_allsql_result() self.data['eid'] = int(self.a[-1][0]) self.data['realname'] = self.gd.get_name() self.data['phone'] = self.gd.get_phone() self.data['email'] = self.gd.get_email() def tearDown(self): self.em.close_mysql() def testcase01(self): '''正确传入以“13”开头的phone参数添加嘉宾成功''' sql = 'select phone from sign_guest' self.em.execute_sql(sql) self.e_phone = self.em.get_allsql_result() self.error_phone = []
import pandas as pd import numpy as np from importlib import reload from embedding import fasttext from model import lstm, train from util import utils train = utils.get_data("train") test = utils.get_data("test") max_words, max_seq_len = 100000, 150 target_labels = [ 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate' ] num_classes = len(target_labels) max_words = 100000 max_seq_len = 150 # get all the word word_collection = fasttext.word_collection() word_collection.feed_word_by_df(train, "comment_text") word_collection.feed_word_by_df(test, "comment_text") train = train.fillna("NAN") test = test.fillna("NAN") train['comment_seq'], test[ 'comment_seq'], word_index = fasttext.convert_text2seq(
import unittest, requests from util.utils import Create_Data, Execute_Mysql, Sec_Get_Event_ListURL, DATAPATH, get_data file = DATAPATH + '/test_sec_get_event_list.csv' response_data = get_data(file) file = DATAPATH + '/ifo_auth.csv' auth_data = get_data(file) class Test_Sec_Get_Event_List(unittest.TestCase): '''带用户认证的发布会查询接口''' def setUp(self): self.em = Execute_Mysql() self.em.con_mysql() self.gd = Create_Data() self.data = {} sql = 'select id from sign_event' self.em.execute_sql(sql) self.e_id = self.em.get_allsql_result() self.data['eid'] = self.e_id[self.gd.get_num(0, len(self.e_id) - 1)][0] sql = 'select name from sign_event where id="%d";' self.em.execute_sql(sql % (self.data['eid'])) self.data['name'] = self.em.get_sql_result()[0] def tearDown(self): self.em.con_mysql() def testcase01(self): '''正确传入所有参数查询成功''' self.r = requests.get(Sec_Get_Event_ListURL, self.data,
def main(): parser = argparse.ArgumentParser(description='Short sample app') parser.add_argument('--data_dir', help='data directory and save model') parser.add_argument('--params_path', help='config file path', default='./params_base.json') args = parser.parse_args() data_dir = args.data_dir params_path = args.params_path # test_data_dir = os.path.join(data_dir, 'query') train_data_dir = os.path.join(data_dir, 'gallery') label_map_path = os.path.join(data_dir, "label_map.pbtxt") model_dir = os.path.join(data_dir, 'save_model') params = Params(params_path) if params.train['mode'] == 'tuple': # 成对输入,label没有成对,需要concat data_tuple = get_train_tuple_data(train_data_dir, label_map_path) images1, images2, labels = train_tuple_input(data_tuple, params.train_input) images = tf.concat([images1, images2], axis=0) else: # 随机输入 data_tuple = get_data(train_data_dir, label_map_path) images, labels = train_input(data_tuple, params.train_input) # 模型 net = SelfNetModel(batch_size=params.train_input['batch_size'], res_layer_num=params.model['res_layer_num'], classes_num=params.model['num_classes'], embedding_size=params.model['embedding_size'], labels=labels, mode=params.train['mode']) loss = net.loss(input_batch=images) # image_batch 是 label batch的两倍 # 优化器 global_step = tf.train.get_or_create_global_step() lr = tf.train.exponential_decay(params.train['learning_rate'], global_step, params.train['decay_step'], params.train['decay_rate'], staircase=True) optimizer = optimizer_factory[params.train['optimizer']]( learning_rate=lr, momentum=params.train['momentum']) train_op = optimizer.minimize(loss, global_step=global_step) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # BN操作以及滑动平均操作 update_ops.append(train_op) update_op = tf.group(*update_ops) # tf.group() 星号表达式 with tf.control_dependencies([update_op]): loss_tensor = tf.identity(loss, name='loss_op') # tf.identity() net.variable_summaries(exclude_name=params.train['optimizer']) merged = tf.summary.merge_all() summary_writer = tf.summary.FileWriter( os.path.join(data_dir, 'save_model'), tf.get_default_graph()) saver = tf.train.Saver() # 保存全部参数 sess = tf.Session() sess.run(tf.local_variables_initializer()) sess.run(tf.global_variables_initializer()) model_path = tf.train.latest_checkpoint(model_dir) if model_path: print('load ckpt from: %s.' % model_path) var_map = net.resotre_map(model_path, include_global_step=True) print(var_map) init = tf.train.Saver(var_map) init.restore(sess, save_path=model_path) while True: try: # ————————————first_stage train————————————————accuracy, step = sess.run(global_step) loss, summary = sess.run([loss_tensor, merged]) print('global step:', step, end='|') print('loss:%.5f' % loss) if step % params.train['save_summary_steps'] == 0: summary_writer.add_summary(summary, step) if step % params.train['save_model_steps'] == 0: saver.save(sess, save_path=os.path.join(data_dir, 'save_model/model.ckpt'), global_step=step) except tf.errors.OutOfRangeError: break