def main(_): mode = tf.flags.FLAGS.mode #print("is gpu avaliable:",tf.test.is_gpu_available()) if mode == 'train': model.train() elif mode == 'test': model.test() elif mode == 'predict': model.predict() else: raise ValueError('--mode {} was not found.'.format(mode))
def trim(model, xtrain, ytrain, name, threshold, path_to_data, win_len): """ removes from xtrain, ytrain elements on which the model has F1 greater than threshold :param path_to_data: path to the folder where the trimmed dataset will be saved :return: trimmed dataset """ pred_train = np.array(model.predict(xtrain)) xtrain_new = xtrain.copy() ytrain_new = ytrain.copy() counter = 0 for i in range(len(xtrain)): pred = pred_train[i, win_len // 2:5000 - win_len // 2, :] y = ytrain[i, win_len // 2:5000 - win_len // 2, :] stat = statistics(np.expand_dims(y, axis=0), np.expand_dims(pred, axis=0)) F = F_score(stat) if F >= threshold: xtrain_new = np.delete(xtrain_new, i - counter, axis=0) ytrain_new = np.delete(ytrain_new, i - counter, axis=0) counter += 1 if not os.path.exists(path_to_data): os.makedirs(path_to_data) outfile = open(path_to_data + "\\trim_" + name + ".pkl", 'wb') pkl.dump({"x": xtrain_new, "y": ytrain_new}, outfile) outfile.close() return xtrain_new, ytrain_new
async def predict(payload: FakeNewsPayload): vector = fake_news_preprocess(payload) prediction = model.predict(vector) real_probability = float(prediction[0][0]) fake_probability = 1 - real_probability response = {'fake_probability': fake_probability} return response
def predict_cost(model, dicti): di = { 'AGE': 0, 'LAT': 0, 'LON': 0, 'ethnicity_hispanic': 0.0, 'ethnicity_nonhispanic': 0.0, 'gender_F': 0.0, 'gender_M': 0.0, 'marital_M': 0.0, 'marital_S': 0.0, 'race_asian': 0.0, 'race_black': 0.0, 'race_native': 0.0, 'race_white': 0.0, 'reasoncode_10509002.0': 0.0, 'reasoncode_185086009.0': 0.0, 'reasoncode_192127007.0': 0.0, 'reasoncode_195662009.0': 0.0, 'reasoncode_201834006.0': 0.0, 'reasoncode_230265002.0': 0.0, 'reasoncode_233678006.0': 0.0, 'reasoncode_239872002.0': 0.0, 'reasoncode_239873007.0': 0.0, 'reasoncode_24079001.0': 0.0, 'reasoncode_254837009.0': 0.0, 'reasoncode_26929004.0': 0.0, 'reasoncode_301011002.0': 0.0, 'reasoncode_363406005.0': 0.0, 'reasoncode_36971009.0': 0.0, 'reasoncode_38822007.0': 0.0, 'reasoncode_40275004.0': 0.0, 'reasoncode_424132000.0': 0.0, 'reasoncode_43878008.0': 0.0, 'reasoncode_44054006.0': 0.0, 'reasoncode_444814009.0': 0.0, 'reasoncode_55822004.0': 0.0, 'reasoncode_59621000.0': 0.0, 'reasoncode_75498004.0': 0.0, 'reasoncode_87433001.0': 0.0, 'reasoncode_88805009.0': 0.0, 'reasoncode_90560007.0': 0.0 } # df=pd.DataFrame(di) di['AGE'] = (dicti['AGE']) di['LAT'] = (dicti['LAT']) di['LON'] = (dicti['LON']) di['ethnicity_' + dicti['ethnicity']] = 1.0 di['gender_' + dicti['gender']] = 1.0 di['marital_' + dicti['marital']] = 1.0 di['race_' + dicti['race']] = 1.0 di['reasoncode_' + dicti['reasoncode'] + '.0'] = 1.0 return model.predict(pd.DataFrame(di, index=[0]))[0]
def predict(): # get data data = request.get_json(force=True) # convert data into dataframe data.update((x, [y]) for x, y in data.items()) data_df = pd.DataFrame.from_dict(data) # predictions result = model.predict(data_df) # send back to browser output = {'results': float(result)} # return data return jsonify(results=output)
def trim(model, xtrain, ytrain, data_name, threshold, path_to_data, win_len): pred_train = np.array(model.predict(xtrain)) xtrain_new = xtrain.copy() ytrain_new = ytrain.copy() counter = 0 for i in range(len(xtrain)): pred = pred_train[i, win_len // 2:5000 - win_len // 2, :] y = ytrain[i, win_len // 2:5000 - win_len // 2, :] stat = statistics(np.expand_dims(y, axis=0), np.expand_dims(pred, axis=0)) F = F_score(stat) if F >= threshold: xtrain_new = np.delete(xtrain_new, i - counter, axis=0) ytrain_new = np.delete(ytrain_new, i - counter, axis=0) counter += 1 outfile = open(path_to_data + "\\trim_" + data_name + ".pkl", 'wb') pkl.dump({"x": xtrain_new, "y": ytrain_new}, outfile) outfile.close() return xtrain_new, ytrain_new
def simulate(test_np_x, test_np_y): """ 模拟购买彩票,对测试数据进行回测 :param test_np_x: 测试数据输入 :param test_np_y: 测试数据输出 :return: 本次模拟的净收益 """ # 获得的奖金总额 money_in = 0 # 买彩票花出去的钱总额 money_out = 0 # 预测 predicts = model.predict(test_np_x, batch_size=settings.BATCH_SIZE) # 共有多少组数据 samples_num = len(test_np_x['x1']) # 对于每一组数据 for j in range(samples_num): # 这一期的真实开奖结果 outputs = [] for k in range(settings.FRONT_SIZE + settings.BACK_SIZE): outputs.append(np.argmax(test_np_y['y{}'.format(k + 1)][j])) # 每一期彩票买五注 money_out += 10 for k in range(5): # 存放每个球的概率分布的list probabilities = [] # 对于每一种球,将其概率分布加入到列表中去 for i in range(settings.FRONT_SIZE + settings.BACK_SIZE): probabilities.append(predicts[i][j]) # 根据概率分布随机选择一个序列 balls = utils.select_seqs(probabilities) # 计算奖金 award = utils.lotto_calculate(outputs, balls) money_in += award if award: print('{} 中奖了,{}元! {}/{}'.format(j, award, money_in, money_out)) print('买彩票花费金钱共{}元,中奖金额共{}元,赚取{}元'.format(money_out, money_in, money_in - money_out)) return money_in - money_out
def predict_test(file_path, model, X_test): model.load_weights(file_path) prediction = model.predict(X_test, verbose=1, batch_size=32) return prediction
# @Author : AaronJny # @Date : 2019/11/26 # @Desc : 指定一个训练好的模型参数,让模型随机选出下期彩票号码 from dataset import LottoDataSet from models import model import settings import utils # 加载模型参数 model.load_weights(settings.PREDICT_MODEL_PATH) # 构建数据集 lotto_dataset = LottoDataSet() # 提取倒数第MAX_STEPS期到最近一期的数据,作为预测的输入 x = lotto_dataset.predict_data # 开始预测 predicts = model.predict(x, batch_size=1) # 存放选号结果的列表 result = [] # 存放每个球的概率分布的list probabilities = [predict[0] for predict in predicts] # print(probabilities) # 总共要选出settings.PREDICT_NUM注彩票 for i in range(settings.PREDICT_NUM): # 根据概率分布随机选择一个序列 balls = utils.select_seqs(probabilities) # 加入到选号列表中,注意,我们需要把全部的数字+1,恢复原始的编号 result.append([ball + 1 for ball in balls]) # 输出要买的彩票序列 print('本次预测结果如下:') for index, balls in enumerate(result, start=1): print('第{}注 {}'.format(index, ' '.join(map(str, balls))))
# @Desc : 自己输入句子测试模型是否有效 from dataset import tokenizer from models import model import settings # 加载训练好的参数 model.load_weights(settings.BEST_WEIGHTS_PATH) print('启动验证程序!') while True: try: sentence = input('请输入一句话,模型将判断其情绪倾向:') token_ids, segment_ids = tokenizer.encode(sentence) output = model.predict([[ token_ids, ], [ segment_ids, ]])[0][0] if output > 0.5: print('正面情绪!') else: print('负面情绪!') except KeyboardInterrupt: print('结束程序!') break """ 请输入一句话,模型将判断其情绪倾向:虽然没有买到想要的东西,但我并不沮丧 正面情绪! 请输入一句话,模型将判断其情绪倾向:没有买到想要的东西, 有点沮丧 负面情绪! 请输入一句话,模型将判断其情绪倾向:书挺好的,就是贵了点
def predict_from_real_images(model, image_dims, images, image_labels): from models.model import predict model = model.resize(image_dims) return predict(model, images, image_labels=image_labels)
def predict_careplan(model, dicti): di = { 'AGE': 0, 'LAT': 0, 'LON': 0, 'ethnicity_hispanic': 0.0, 'ethnicity_nonhispanic': 0.0, 'gender_F': 0.0, 'gender_M': 0.0, 'marital_M': 0.0, 'marital_S': 0.0, 'race_asian': 0.0, 'race_black': 0.0, 'race_native': 0.0, 'race_other': 0.0, 'race_white': 0.0, 'reasoncode_10509002.0': 0.0, 'reasoncode_109838007.0': 0.0, 'reasoncode_110030002.0': 0.0, 'reasoncode_126906006.0': 0.0, 'reasoncode_15724005.0': 0.0, 'reasoncode_15777000.0': 0.0, 'reasoncode_16114001.0': 0.0, 'reasoncode_185086009.0': 0.0, 'reasoncode_192127007.0': 0.0, 'reasoncode_201834006.0': 0.0, 'reasoncode_230265002.0': 0.0, 'reasoncode_233678006.0': 0.0, 'reasoncode_239720000.0': 0.0, 'reasoncode_239872002.0': 0.0, 'reasoncode_239873007.0': 0.0, 'reasoncode_24079001.0': 0.0, 'reasoncode_262574004.0': 0.0, 'reasoncode_263102004.0': 0.0, 'reasoncode_26929004.0': 0.0, 'reasoncode_283371005.0': 0.0, 'reasoncode_283385000.0': 0.0, 'reasoncode_284549007.0': 0.0, 'reasoncode_284551006.0': 0.0, 'reasoncode_301011002.0': 0.0, 'reasoncode_307731004.0': 0.0, 'reasoncode_30832001.0': 0.0, 'reasoncode_33737001.0': 0.0, 'reasoncode_359817006.0': 0.0, 'reasoncode_363406005.0': 0.0, 'reasoncode_36923009.0': 0.0, 'reasoncode_370143000.0': 0.0, 'reasoncode_370247008.0': 0.0, 'reasoncode_38822007.0': 0.0, 'reasoncode_39848009.0': 0.0, 'reasoncode_40275004.0': 0.0, 'reasoncode_403190006.0': 0.0, 'reasoncode_403191005.0': 0.0, 'reasoncode_424132000.0': 0.0, 'reasoncode_44054006.0': 0.0, 'reasoncode_444448004.0': 0.0, 'reasoncode_444470001.0': 0.0, 'reasoncode_44465007.0': 0.0, 'reasoncode_449868002.0': 0.0, 'reasoncode_45816000.0': 0.0, 'reasoncode_47505003.0': 0.0, 'reasoncode_55680006.0': 0.0, 'reasoncode_55822004.0': 0.0, 'reasoncode_58150001.0': 0.0, 'reasoncode_59621000.0': 0.0, 'reasoncode_62106007.0': 0.0, 'reasoncode_62564004.0': 0.0, 'reasoncode_65966004.0': 0.0, 'reasoncode_67811000119102.0': 0.0, 'reasoncode_69896004.0': 0.0, 'reasoncode_70704007.0': 0.0, 'reasoncode_72892002.0': 0.0, 'reasoncode_87433001.0': 0.0, 'reasoncode_88805009.0': 0.0, 'reasoncode_90560007.0': 0.0, 'reasoncode_93761005.0': 0.0, 'reasoncode_94260004.0': 0.0, 'reasoncode_95417003.0': 0.0 } di['AGE'] = float(dicti['AGE']) di['LAT'] = float(dicti['LAT']) di['LON'] = float(dicti['LON']) di['ethnicity_' + dicti['ethnicity']] = 1.0 di['gender_' + dicti['gender']] = 1.0 di['marital_' + dicti['marital']] = 1.0 di['race_' + dicti['race']] = 1.0 di['reasoncode_' + dicti['reasoncode'] + '.0'] = 1.0 return model.predict(pd.DataFrame(di, index=[0]))[0]