def lstm_predict(model_path, lstm_data_x, lstm_data_y): from keras.models import Sequential, load_model from keras.layers.core import Dense, Activation from keras.layers import BatchNormalization from keras.layers.recurrent import LSTM from keras.utils import np_utils from keras.optimizers import Adam #読み込み load_model = load_model(model_path) #load_model = load_model("./output/"+username+"_model_"+str(emo_num)+".h5") #予測 lstm_data_y_predict = load_model.predict(lstm_data_x) plt.figure() #plt.plot(lstm_data_y[-150:, 0], lw=2,label="data") #plt.plot(lstm_data_y_predict[-150:, 0], '--', lw=2,label="predict") plt.plot(lstm_data_y, lw=2, label="data") plt.plot(lstm_data_y_predict, '--', lw=2, label="predict") plt.legend() #plt.savefig("test.png") plt.show()
def predict(sentence): # 预测语句 text = sentence text = text.replace("\n", "").replace("\r", "").replace("\t", "") labels = [] bert_model = BertVector(pooling_strategy="NONE", max_seq_len=200) # 将句子转换成向量 vec = bert_model.encode([text])["encodes"][0] x_train = np.array([vec]) # 模型预测 predicted = load_model.predict(x_train)[0] indices = [i for i in range(len(predicted)) if predicted[i] > 0.5] with open( "/Users/xuzhang/Documents/STUDY/Github/IntentRec/utils/event_type.json", "r", encoding="utf-8") as g: movie_genres = json.loads(g.read()) #print("预测语句: %s" % text) #print("意图分析: %s" % "|".join([movie_genres[index] for index in indices])) return "|".join([movie_genres[index] for index in indices])
def main(x): # load json and create model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model_1 = model_from_json(loaded_model_json) # load weights into new model # load YAML and create model yaml_file = open('model.yaml', 'r') loaded_model_yaml = yaml_file.read() yaml_file.close() loaded_model_2 = model_from_yaml(loaded_model_yaml) # load weights into new model loaded_model_1.load_weights("model.h5") #json # loaded_model_2.load_weights("model.h5") #yaml print("Loaded model from disk") results = load_model.predict(x) print(results)
def main(): if flask.request.method == 'GET': dataError = {} dataError[ 'prediction'] = "Method Provided is GET. Please Try Using Post Method" json_data = json.dumps(dataError) resp = make_response(json_data) resp.status_code = 200 resp.headers['Access-Control-Allow-Origin'] = '*' #prediction = 1 return resp if flask.request.method == 'POST': song = flask.request.form['song'] translate_client = translate.Client.from_service_account_json( 'key.json') song = [song] target = 'en' translation = translate_client.translate(song[0], target_language=target) tsong = format(translation['translatedText']) init() x_input = loaded_vec.transform([tsong]) prediction = int(np.round(load_model.predict(x_input))[0]) data = {} if (int(prediction) == 1): data['Prediction'] = 'Sad' else: data['Prediction'] = 'Happy' json_data = json.dumps(data) resp = make_response(json_data) resp.status_code = 200 resp.headers['Access-Control-Allow-Origin'] = '*' #prediction = 1 return resp
def main(): if len(sys.argv) > 18: print('You have exceeded the number of input arguments') sys.exit() if len(sys.arg) < 18: print('You have less than required number of input arguments') sys.exit() else: if (sys.argv[0] == 'infer' & sys.argv[1] == 'json'): x = [ sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9], sys.argv[10], sys.argv[11], sys.argv[12], sys.argv[13], sys.argv[14], sys.argv[15], sys.argv[16], sys.argv[17] ] json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("seq_model.h5") results = load_model.predict(x) print(results) if (sys.argv[0] == 'infer' & sys.argv[1] == 'yaml'): x = [ sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9], sys.argv[10], sys.argv[11], sys.argv[12], sys.argv[13], sys.argv[14], sys.argv[15], sys.argv[16], sys.argv[17] ] yaml_file = open('model.yaml', 'r') loaded_model_yaml = yaml_file.read() yaml_file.close() loaded_model = model_from_yaml(loaded_model_yaml) # load weights into new model loaded_model.load_weights("seq_model.h5") results = load_model.predict(x) print(results)
def predict(): print("You are in Predict") int_features = [int(x) for x in request.form.values()] values=84.,88.,144.,125., 124., 211., 303., 242., 111., 132., 70.,53., 47., 54., 61., 66., 33., 40., 29., 57., 57., 75., 89., 92. scaler = MinMaxScaler(feature_range=(0, 1)) values = np.reshape(values, (-1, 1)) values=scaler.fit_transform(values) pred =[] for i in range(0, int_features[0]): #p = load_model.predict(np.asarray([a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x]).astype(np.float32).reshape(1,24,1)) p = load_model.predict(np.asarray(values).astype(np.float32).reshape(1,24,1)) pred.append(p) values[0]=values[1] values[1]=values[2] values[2]=values[3] values[3]=values[4] values[4]=values[5] values[5]=values[6] values[6]=values[7] values[7]=values[8] values[8]=values[9] values[9]=values[10] values[10]=values[11] values[11]=values[12] values[12]=values[13] values[13]=values[14] values[14]=values[15] values[15]=values[16] values[16]=values[17] values[17]=values[18] values[18]=values[19] values[19]=values[20] values[20]=values[21] values[21]=values[22] values[22]=values[23] values[23]=p aa = pd.date_range('2018-04-20 01:00:00', periods=int_features[0], freq='H') df = pd.DataFrame(aa, columns = ['Date']) prediction = [] for i in range(0, int_features[0]): prediction.append(pred[i][0][0]) prediction=np.reshape(prediction, (-1, 1)) pred=scaler.inverse_transform(prediction) df["Prediction"] = pred table = df.to_html(escape = False) table = Markup(table) #plt.plot(table) print("End of def") #<img src={{ name }} name=plot alt="Chart" height="42" width="42"> return render_template('index__1.html', table = table)
def __init__(self, image): super(Final, self).__init__() root = Tk() root.title("Final") root.geometry("600x600") root.resizable(0, 0) load_model = model() load_model.load_model("cnn1.h5") images = load_model.predict(image) height, width, no_channels = images["orignal_image"].shape frame = Frame(root, width=width, height=height, highlightthickness=1, highlightbackground="black") canvas = Canvas(frame, width=width, height=height) c_img = cv2.cvtColor(images["orignal_image"].copy(), cv2.COLOR_BGR2RGB) photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(c_img)) image_on_canvas = canvas.create_image(0, 0, image=photo, anchor=NW) canvas.pack() frame.pack() frame1 = Frame(root, width=350, height=30) frame1.pack() canvas1 = Canvas(frame1, bg='#FFFFFF', width=300, height=300, scrollregion=(0, 0, 30 * len(images["images"]), 100)) hbar = Scrollbar(frame1, orient=HORIZONTAL) hbar.pack(side=BOTTOM, fill=X) hbar.config(command=canvas1.xview) canvas1.config(width=300, height=300) canvas1.config(xscrollcommand=hbar.set) canvass = [] photo1 = [] i = 0 for img in images["images"]: canvass.append(Canvas(canvas1, width=28, height=80)) im = img["image"].copy() photo1.append( PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(im))) canvass[i].create_text(12, 10, fill="darkblue", font="Times 20 italic bold", text=img["output"]) image_on_canvas = canvass[i].create_image(0, 30, image=photo1[i], anchor=NW) canvass[i].pack(side=LEFT) i += 1 canvas1.pack(side=LEFT, expand=True, fill=BOTH) root.mainloop()
def post(self): text = self.get_argument("text") # 将句子转换成向量 vec = bert_model.encode([text])["encodes"][0] x_train = np.array([vec]) # 模型预测 predicted = load_model.predict(x_train) y = np.argmax(predicted[0]) label = '是' if y else "否" # 返回结果 result = {"原文": text, "是否属于出访类事件?": label} self.write(json.dumps(result, ensure_ascii=False, indent=2))
import cv2 import os import numpy as np import keras.backend as K import tensorflow as tf from keras.models import load_model from keras.datasets import mnist from keras import Model from keras.utils import to_categorical from numpy import savetxt os.environ['CUDA_VISIBLE_DEVICES']='2' load_model = load_model("./lenet2.h5") (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000,28,28,1) x_test = x_test.reshape(10000,28,28,1) x_train = np.pad(x_train, ((0,0),(2,2),(2,2), (0,0)), 'constant') x_test = np.pad(x_test, ((0,0),(2,2),(2,2), (0,0)), 'constant') y_train=to_categorical(y_train,num_classes=10) y_test=to_categorical(y_test,num_classes=10) print("Using loaded model to predict...") load_model.summary() img = x_test[0] img = img.reshape(-1, 32, 32, 1) flatten_out = load_model.predict(img) print("Done")
sio.savemat(datanew, {'raw_seg': slice_seg}) # vol_size vol_size = (160, 192, 224) vol_size1 = (160, 224, 1) # set model directory m_dir = '/home/ys895/Models/slice100_500.h5' # set model load_model = un.unet(pretrained_weights=m_dir, input_size=vol_size1, label_nums=len(labels_data)) # predict p_outcome = load_model.predict(slice_vol) # visualization of the outputs print(p_outcome) datanew = 'output_data.mat' sio.savemat(datanew, {'output': p_outcome}) # change the dimension of the output # plot the image #nplt.slices(p_outcome, show = None) i = 100 # set model for (vol_data, seg_data) in genera.vol_seg(vol_data_dir, seg_data_dir, relabel=labels_data,
return ''.join(self.indices_char[x] for x in x) chars = '0123456789kg._= ' ctable = CharacterTable(chars) print("Using loaded model to predict...") load_model = load_model("data/conversion_rnn_model.h5") unknown = sys.argv[1] query = unknown + ' ' * (MAXLEN - len(unknown)) # Answers can be of maximum size DIGITS + 1. if REVERSE: # Reverse the query, e.g., '12+345 ' becomes ' 543+21'. (Note the # space used for padding.) query = query[::-1] x = ctable.encode(query, MAXLEN) imagelist = [] imagelist.append(x) print("算式: " + unknown) begin = datetime.datetime.now() predicted = load_model.predict(np.asarray(imagelist)) # print("\nPredicted softmax vector is: ") # print(predicted) guess = ctable.decode(predicted[0]) print("结果: " + guess) end = datetime.datetime.now() k = end - begin print("用时: " + str(k.total_seconds()) + "s")
plt.ylabel('loss') # plt.legend(loc="upper right") plt.savefig("loss.png") plt.show() if __name__ == '__main__': job = input() if job == 'train': epochs = 2000 model = keras.models.Sequential([keras.layers.Dense(1, input_dim=13)]) model.summary() model.compile(optimizer='adam', loss='mse') history = LossHistory() model.fit(x_train, y_train, epochs=epochs, callbacks=[history]) model.evaluate(x_test, y_test) history.loss_plot('epoch') # model.save('./boston_model_' + str(epochs) + '.h5') else: model_path = './boston_model_2000.h5' load_model = load_model(model_path) predicted = load_model.predict(x_test) plt.title('Neural network result (2000 epochs)') plt.scatter(y_test, predicted, color='y', marker='o') plt.plot(y_test, y_test, color='g') plt.xlabel('True value') plt.ylabel('Predicted value') plt.savefig('./nn_500.png') plt.show() print('NN RMSE为:', np.sqrt(mean_squared_error(y_test, predicted)))
def predictReturn(): print("*** 삼성전자 익영업일 종가 UP/DOWN 예측에 따른 수익률 계산 ***\n") startDate = input("시작일을 입력하세요 EX) YYYY-MM-DD : \n") if startDate == "": startDate = '2021-02-19' endDate = input("종료일을 입력하세요 EX) YYYY-MM-DD : \n") if endDate == "": endDate = (date.today() - timedelta(2)).isoformat() elif datetime.strptime(endDate, "%Y-%m-%d") > (datetime.today() - timedelta(2)): print("종료일을 조정합니다.") endDate = (date.today() - timedelta(2)).isoformat() predict_method = input("예측모델을 정해주세요 (ML / DL) : \n") if predict_method == "": predict_method = "DL" else: predict_method = predict_method.upper() # 시작일, 종료일 영업일로 조정 samsung = fdr.DataReader('005930', startDate, endDate) startDate = samsung.index[0].strftime("%Y-%m-%d") endDate = samsung.index[-1].strftime("%Y-%m-%d") print(f'시작일(영업일) : {startDate}') print(f'종료일(영업일) : {endDate}') print(f'예측모델 : {predict_method}\n') # 1. 대상기간동안의 feature data 불러오기 data = loadFeatureList(startDate, endDate) if data is None: print("종료.") return # 2. 전제조건 출력 print("\n※ 수익률 계산은 다음과 같은 가정 하에 수행됩니다.") print(" 1. 당일 종가 대비 익일 종가 변화율을 예측합니다.") print(" 2. 매매가 = 당일 종가 = 익일 시가라고 가정합니다.") print(" 3. 매매 수수료와 세금은 고려하지 않습니다.") print(" 4. 실제 주식 가격과 상관없이, 항상 현재 갖고있는 전체 자산을 투자한다고 가정합니다.") print(" 5. 변화율이 + 일 때는 Up, 0 이거나 - 일때는 Down 으로 표시합니다.\n") # 3. 실행 if predict_method == 'ML': pd_columns = [ '보유상태', '예측결과', '매매상태', '실제결과', '실제당일종가', '실제익일종가', '실제변화율', '잔고', '주식계좌', '수익률', '예측정확도' ] else: pd_columns = [ '보유상태', '예측결과', '예측강도', '매매상태', '실제결과', '실제당일종가', '실제익일종가', '실제변화율', '잔고', '주식계좌', '수익률', '예측정확도' ] # 잔고 bullet = 10000 balance = bullet # 주식계좌 purchase = 0 # 보유상태 : 보유 / 미보유 current_state = nstate[1] next_state = nstate[1] # 매매상태 : 매수 / 매도 / 유지 trading_state = None # 정확도 acc = None correct_num = 0 iter_num = 1 if predict_method == 'ML': data = data[startDate:endDate] load_model = getMLModel() if load_model is None: return iter_start = 0 iter_end = len(data.index) init_log = pd.Series( ['-', '-', '-', '-', '-', '-', '-', bullet, '-', '-', '-'], index=pd_columns) else: load_model = getDLModel() if load_model is None: return iter_start = num_step - 1 iter_end = len(data.index) init_log = pd.Series( ['-', '-', '-', '-', '-', '-', '-', '-', bullet, '-', '-', '-'], index=pd_columns) init_log = pd.DataFrame([init_log], columns=pd_columns, index=['시작']) result = pd.DataFrame(columns=pd_columns) result.index.name = '당일' result = pd.concat([result, init_log]) for i in range(iter_start, iter_end): if predict_method == 'ML': row = data.iloc[[i]] else: row = data.iloc[i - num_step + 1:i + 1] tmp_df = pd.DataFrame(row) tmp_df.drop(['Y', 'Close', 'Y_close'], inplace=True, axis=1) tmp_df = dataToPCA(tmp_df) if tmp_df is None: return None if predict_method == 'DL': tmp_df = makeLSTMData(tmp_df) real_val = row['Y'].values[-1] real_val_percent = round(real_val * 100, 1) if real_val_percent > 0: real_val_percent = '+' + str(real_val_percent) if real_val > 0: real = updown[0] else: real = updown[1] tmp_df = tmp_df[-1:] pred = load_model.predict(tmp_df) if predict_method == 'DL': softmax = tf.keras.layers.Softmax() prob = softmax(pred)[0].numpy() pred = np.argmax(pred, axis=1)[0] if prob[pred] > prob_strength: strength = pred_strength[0] else: strength = pred_strength[1] if pred == 1: pred = updown[0] else: pred = updown[1] if real == pred: correct_num = correct_num + 1 # 정확도 계산 acc = round(correct_num / iter_num * 100, 2) # 현재 상태에 따른 다음 상태와 잔고, 주식계좌 계산 current_state = next_state if pred == updown[0]: if current_state == nstate[1]: trading_state = tstate[0] next_state = nstate[0] purchase = int(balance * (1 + real_val)) balance = 0 elif current_state == nstate[0]: trading_state = tstate[2] purchase = int(purchase * (1 + real_val)) elif pred == updown[1]: if current_state == nstate[1]: trading_state = tstate[2] elif current_state == nstate[0]: trading_state = tstate[1] next_state = nstate[1] balance = purchase purchase = 0 if balance == 0 and purchase != 0: returnRate = round((purchase - bullet) / bullet * 100, 2) if returnRate > 0: returnRate = '+' + str(returnRate) + '%' else: returnRate = str(returnRate) + '%' else: returnRate = round((balance - bullet) / bullet * 100, 2) if returnRate > 0: returnRate = '+' + str(returnRate) + '%' else: returnRate = str(returnRate) + '%' if predict_method == 'DL': trading_log = pd.Series([ current_state, pred, strength, trading_state, real, row['Close'].values[-1], row['Y_close'].values[-1], str(real_val_percent) + '%', balance, purchase, returnRate, str(acc) + '%' ], index=pd_columns) else: trading_log = pd.Series([ current_state, pred, trading_state, real, row['Close'].values[-1], row['Y_close'].values[-1], str(real_val_percent) + '%', balance, purchase, returnRate, str(acc) + '%' ], index=pd_columns) trading_log = pd.DataFrame( [trading_log], columns=pd_columns, index=[row.index[-1].strftime(format='%Y-%m-%d')]) result = pd.concat([result, trading_log]) iter_num = iter_num + 1 # 최종 수익률 if balance == 0 and purchase != 0: returnRate = str(round((purchase - bullet) / bullet * 100, 2)) + '%' else: returnRate = str(round((balance - bullet) / bullet * 100, 2)) + '%' # 바이앤홀드 전략과 비교 samsung = fdr.DataReader('005930', startDate) endiloc = samsung.index.get_loc( samsung[samsung.index == endDate].iloc[-1].name) buyPrice = samsung['Close'][startDate] sellPrice = samsung['Close'].iloc[endiloc + 1] buyNhold = round((sellPrice - buyPrice) / buyPrice * 100, 2) return (buyNhold, result, returnRate)
def main(): # 0. 开始 print("\nIris dataset using Keras/TensorFlow ") np.random.seed(4) tf.set_random_seed(13) # 1. 读取CSV数据集 print("Loading Iris data into memory") CSV_FILE_PATH = 'E://iris.csv' train_x, test_x, train_y, test_y, Class_dict = load_data(CSV_FILE_PATH) # 2. 定义模型 init = K.initializers.glorot_uniform(seed=1) simple_adam = K.optimizers.Adam() model = K.models.Sequential() model.add( K.layers.Dense(units=5, input_dim=4, kernel_initializer=init, activation='relu')) model.add( K.layers.Dense(units=6, kernel_initializer=init, activation='relu')) model.add( K.layers.Dense(units=3, kernel_initializer=init, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=simple_adam, metrics=['accuracy']) # 3. 训练模型 b_size = 1 max_epochs = 10 print("Starting training ") h = model.fit(train_x, train_y, batch_size=b_size, epochs=max_epochs, shuffle=True, verbose=1) print("Training finished \n") # 4. 评估模型 eval = model.evaluate(test_x, test_y, verbose=0) print("Evaluation on test data: loss = %0.6f accuracy = %0.2f%% \n" \ % (eval[0], eval[1] * 100) ) # 5. 保存模型 print("Saving model to disk \n") mp = "E://logs/iris_model.h5" model.save(mp) # 6. 使用模型进行预测 np.set_printoptions(precision=4) unknown = np.array([[6.1, 3.1, 5.1, 1.1]], dtype=np.float32) predicted = model.predict(unknown) print("Using model to predict species for features: ") print(unknown) print("\nPredicted softmax vector is: ") print(predicted) species_dict = {v: k for k, v in Class_dict.items()} print("\nPredicted species is: ") print(species_dict[np.argmax(predicted)]) # 7. 模型的加载及使用 from keras.models import load_model print("Using loaded model to predict...") load_model = load_model(mp) predicted = load_model.predict(unknown) print("Using model to predict species for features: ") print(unknown) print("\nPredicted softmax vector is: ") print(predicted) print("\nPredicted species is: ") print(species_dict[np.argmax(predicted)])
# prediction = model.predict(x,verbose=0) # prediction = np.argmax(prediction) # values.append(prediction) # new_document.append(prediction) # # new_document = reverse_document((new_document)) # print(new_document) # 模型的加载及使用 from keras.models import load_model from keras.preprocessing import sequence import numpy as np x_max_len = 141 seq = '14215 14216 18 82 6 82 12 187 10 19 13 141 11 2 394 89 25 104 107 21 5 23 47 24 2493 14215 14216 91 75 90 1 1093 427 62 33 132 56 899 77 193 368 76 184 3' seq = seq.split(' ') for i in range(534 - len(seq)): seq.append(0) seq = list(map(int, seq)) model_hd5_file = './save/model.hd5' print("Using loaded model to predict...") load_model = load_model(model_hd5_file) np.set_printoptions(precision=4) unknown = np.array([seq], dtype=np.float32) predicted = load_model.predict(unknown) print("Using model to predict species for features: ") print(unknown) print("\nPredicted softmax vector is: ") print(predicted)
load_model = load_model("./lenet.h5") (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000,28,28,1) x_test = x_test.reshape(10000,28,28,1) y_train=to_categorical(y_train,num_classes=10) y_test=to_categorical(y_test,num_classes=10) print("Using loaded model to predict...") model_output = load_model.get_layer("flatten").output m = Model(inputs=load_model.input, outputs=model_output) flatten_layer_data = a = np.zeros((numSamples,400)) print(flatten_layer_data.shape) for num in range(0,numSamples): #print(np.argmax(y_test[num])) img = x_train[num] img = img.reshape(-1, 28, 28, 1) #img = cv2.imread('test.png',cv2.IMREAD_GRAYSCALE) #output=img.copy() #img = img.astype('float32') x_train = x_train.astype('float32') x_test = x_test.astype('float32') predicted = load_model.predict(img) #print(predicted) predicted = np.argmax(predicted) flatten_out = m.predict(img) flatten_layer_data[num] = flatten_out #print(flatten_out.shape) #savetxt('data.csv', flaten_layer_data, delimiter=',') #print(predicted) savetxt('train_labels.csv', y_train, delimiter=',') savetxt('train_data.csv', flatten_layer_data, delimiter=',')
input_size=vol_size1, label_nums=len(labels_data)) # test the model with 3D images for (vol_data, seg_data) in genera.vol_seg(vol_data_dir, seg_data_dir, relabel=labels_data, nb_labels_reshape=len(labels_data), iteration_time=iter_num): concatenate_outcome = np.empty(seg_data.shape) for i in range(0, 191): vol_train = vol_data[:, :, i, :, :] # concatenate slices #slice_outcome = load_model.predict(slice_vol) concatenate_outcome[:, :, i, :, :] = load_model.predict(vol_train) #np.concatenate([concatenate_outcome,concatenate_outcome]) concatenate_outcome.reshape([ 1, concatenate_outcome.shape[1], concatenate_outcome.shape[2], concatenate_outcome.shape[3], concatenate_outcome.shape[4] ]) #print('the shape of the output:') #print(concatenate_outcome.shape) # compute the dice score of test example print('the dice score of the test is:') #dice_score = nm.Dice(nb_labels = len(labels_data), input_type='prob', dice_type='hard',).dice(seg_data,concatenate_outcome) #dice_score = dice(concatenate_outcome,,) vals, _ = dice(concatenate_outcome, seg_data, nargout=2) print(np.mean(vals), np.std(vals)) #print(dice_score)
model.save('my_model.h5') del model load_model = load_model('my_model.h5') print("Training evaluation..................") loss, acc = load_model.evaluate([X, Xq], Y, batch_size=BATCH_SIZE) print(' loss - accuracy = {:.4f} / {:.4f}'.format(loss, acc)) print("\n\n") print("Testing..............") loss, acc = load_model.evaluate([tX, tXq], tY, batch_size=BATCH_SIZE) print('loss -accuracy = {:.4f} / {:.4f}'.format(loss, acc)) print("\n\n") print("Test Sample : \n") predicted_Labels = load_model.predict([tX, tXq]) print(predicted_Labels) target_names = ["+", "*", "-", "/"] print(predicted_Labels) labels_test = [np.argmax(lb) for lb in predicted_Labels] print("labels_test: ", labels_test) expected = [np.argmax(le) for le in tY] print("\n---------------------------------\n") print("seq2seq Classifier Test Accuracy: ", accuracy_score(expected, labels_test) * 100) print("\n---------------------------------\n") print("\n")
load_model = load_model(model_path) total = 0. right = 0. step = 0 all_class = list('0123456789abcdefghijklmnopqrstuvwxyz') qq = 1 while True: file_name = f'./img_folder/{qq}.jpg' if not os.path.isfile(file_name): break samples = glob.glob(file_name) img_predict = data_generator(samples, 1) for x, y in img_predict: _ = load_model.predict(x) _ = np.array([i.argmax(axis=1) for i in _]).T predi = [''.join([all_class[k] for k in i]) for i in _] print(predi[0]) break qq += 1 time.sleep(1) # for x, y in data_generator(samples, 1): # _ = load_model.predict(x) # _ = np.array([i.argmax(axis=1) for i in _]).T # print('=='*20) # predict = [''.join([all_class[k] for k in i]) for i in _] # print('predict:', predict) # break
"choking_model_final.h5", # type in the filepath of the saved model custom_objects={ 'Prec': Prec, 'Rec': Rec }) from test_loader import test_loader import matplotlib.pyplot as plt import numpy as np import json factor = 0 test_loader = test_loader() test_choking_x = test_loader.read( "test_choking") #type in the filepath of the test folder y_pred = load_model.predict(test_choking_x) y_pred = np.array(y_pred) - factor time = range(1, len(y_pred) + 1) jsontext = {"points": []} for i in range(len(y_pred)): jsontext["points"].append({ "Time": str(i), "predict_result": str(y_pred[i]) }) jsondata = json.dumps(jsontext, indent=4, separators=(",", ": ")) f = open("data summary/video.json", "w") # type in the filepath you want to save the json file f.write(jsondata) f.close()
load_model = model_from_json(loaded_model) load_model.load_weights('model.h5') v=cv2.VideoCapture(0) # Load Image image = Image.open('face_332.jpg') ## Test Image Path image1 = Image.open('File_10,027.jpg') im = image.resize((200,200)) im1 = image1.resize((200,200)) im = np.asarray(im) im = np.reshape(im,(1,im.shape[0],im.shape[1],im.shape[2])) im1 = np.asarray(im1) im1 = np.reshape(im1,(1,im1.shape[0],im1.shape[1],im1.shape[2])) # Make Prediction prediction = load_model.predict(im) if prediction == 1: print('Real Face') else: print('Fake Face') prediction = load_model.predict(im1) if prediction == 1: print('Real Face') else: print('Fake Face')
'\u3000', '').replace('\xa0', '')) for i in data_train['一级分类']: y_tr.append(classification.index(i)) # y_tr.append(i) labels = [] bert_model = BertVector(pooling_strategy="REDUCE_MEAN", max_seq_len=470) # 对上述句子进行预测 for text in texts: # 将句子转换成向量 vec = bert_model.encode([text])["encodes"][0] x_train = np.array([vec]) # 模型预测 predicted = load_model.predict(x_train) y = np.argmax(predicted[0]) # print(y) # label = 'Y' if y else 'N' labels.append(y) # for text,y_label, label in zip(texts, y_tr,labels): # print('%s\t%s\t%s' % (label, y_label,text)) df = pd.DataFrame({ '留言': texts, "原始类别": y_tr, "预测类别": labels }, columns=['留言', "原始类别", "预测类别"]) print(df)
import keras import cv2 import numpy as np from keras.models import load_model print("Using loaded model to predict...") load_model = load_model("./lenet.h5") img = cv2.imread('test.png', cv2.IMREAD_GRAYSCALE) output = img.copy() img = img.reshape(-1, 28, 28, 1) img = img.astype('float32') predicted = load_model.predict(img) #输出预测结果 print(predicted) predicted = np.argmax(predicted) print(predicted) result = "The Number Is {:d}".format(predicted) output = cv2.resize(output, (512, 512)) cv2.putText(output, result, (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (155, 155, 0), 2) cv2.imshow("img", output) cv2.waitKey(0)