def __getstate__(self): model_str = "" with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd: save_model(self, fd.name, overwrite=True) model_str = fd.read() d = {'model_str': model_str} return d
def check_model(model, model_name, x, y, check_model_io=True): """ compile model,train and evaluate it,then save/load weight and model file. :param model: :param model_name: :param x: :param y: :param check_model_io: test save/load model file or not :return: """ model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) model.fit(x, y, batch_size=100, epochs=1, validation_split=0.5) print(model_name + " test train valid pass!") model.save_weights(model_name + '_weights.h5') model.load_weights(model_name + '_weights.h5') os.remove(model_name + '_weights.h5') print(model_name + " test save load weight pass!") if check_model_io: save_model(model, model_name + '.h5') model = load_model(model_name + '.h5', custom_objects) os.remove(model_name + '.h5') print(model_name + " test save load model pass!") print(model_name + " test pass!")
def save(self, path): print(path) try: with SimpleNN.tensorflow_graph.as_default(): save_model(self.model, path + '.h5') except Exception as e: raise AlgorithmException(str(e))
def train_rnn(filename): X_train_token, X_dev_token, X_test_token, y_train, y_dev, y_test, tokenizer = my_data.generate_rnn( filename) paragram_embeddings = load_para(tokenizer.word_index) model = Sequential() optimizer = Adam(lr=1e-3) model.add( Embedding(weights=[paragram_embeddings], trainable=False, input_dim=num_words, output_dim=embedding_size, input_length=max_tokens)) model.add(GRU(units=32, return_sequences=True)) model.add(GRU(units=16, dropout=0.5, return_sequences=True)) model.add(GRU(units=8, return_sequences=True)) model.add(GRU(units=4)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['AUC', 'accuracy']) model.summary() history = model.fit(np.array(X_train_token), y_train, validation_data=(np.array(X_dev_token), y_dev), epochs=4, batch_size=500) save_model(model, path + 'rnn_model_ref.h5') logging.info('train complete') return model
def test_FNN(sparse_feature_num): model_name = "FNN" sample_size = 64 feature_dim_dict = {"sparse": {}, 'dense': []} for name, num in zip(["sparse", "dense"], [sparse_feature_num, sparse_feature_num]): if name == "sparse": for i in range(num): feature_dim_dict[name][name + '_' + str(i)] = np.random.randint(1, 10) else: for i in range(num): feature_dim_dict[name].append(name + '_' + str(i)) sparse_input = [np.random.randint(0, dim, sample_size) for dim in feature_dim_dict['sparse'].values()] dense_input = [np.random.random(sample_size) for name in feature_dim_dict['dense']] y = np.random.randint(0, 2, sample_size) x = sparse_input + dense_input model = FNN(feature_dim_dict, hidden_size=[32, 32], keep_prob=0.5, ) model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) model.fit(x, y, batch_size=100, epochs=1, validation_split=0.5) print(model_name+" test train valid pass!") model.save_weights(model_name + '_weights.h5') model.load_weights(model_name + '_weights.h5') print(model_name+" test save load weight pass!") save_model(model, model_name + '.h5') model = load_model(model_name + '.h5', custom_objects) print(model_name + " test save load model pass!") print(model_name + " test pass!")
def save_model(self, name: str = None, use_timestamp: bool = True): if name is None: name = self.__class__.__name__ filename = f"model_{name}" if use_timestamp: filename += f"{time.strftime('%Y%m%d-%H%M%S')}" save_model(self.model, filename)
def test_DIN_sum(): model_name = "DIN_sum" x, y, feature_dim_dict, behavior_feature_list = get_xy_fd() model = DIN(feature_dim_dict, behavior_feature_list, hist_len_max=4, embedding_size=8, use_din=False, hidden_size=[4, 4, 4], keep_prob=0.6, activation="sigmoid") model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) model.fit(x, y, verbose=1, validation_split=0.5) print(model_name + " test train valid pass!") model.save_weights(model_name + '_weights.h5') model.load_weights(model_name + '_weights.h5') print(model_name + " test save load weight pass!") save_model(model, model_name + '.h5') model = load_model(model_name + '.h5', custom_objects) print(model_name + " test save load model pass!") print(model_name + " test pass!")
def xdeepfm_model(linear_feature_columns,dnn_feature_columns,train_model_input,train,test_model_input,test): cols = ['model','RMSE','MAE','MSE','AUC','score'] df_result = pd.DataFrame(columns=cols, index=range(1)) model = xDeepFM(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=config.xdeepfm_att["dnn_hidden_units"], init_std=config.xdeepfm_att["init_std"],cin_layer_size=config.xdeepfm_att["cin_layer_size"], cin_split_half=config.xdeepfm_att["cin_split_half"], cin_activation=config.xdeepfm_att["cin_activation"], l2_reg_cin=config.xdeepfm_att["l2_reg_cin"],seed=config.xdeepfm_att["seed"], dnn_dropout=config.xdeepfm_att["dnn_dropout"], dnn_activation=config.xdeepfm_att["dnn_activation"], task=config.xdeepfm_att["task"],dnn_use_bn=config.xdeepfm_att["dnn_use_bn"]) model.compile("adam", "mse", metrics=['mse']) history = model.fit(train_model_input, train[target].values, batch_size=256, epochs=config.model_epoch['epoch'], verbose=2, validation_split=0.2) pred_ans = model.predict(test_model_input, batch_size=256) save_model(model, 'saved_xdeepfm.h5')# save_model auc = roc_auc_score(test[target].values, pred_ans) df_result.loc[0].model = "XDeepFM" df_result.loc[0].RMSE = np.round(math.sqrt(mean_squared_error(test[target].values, pred_ans)),3) df_result.loc[0].MAE = np.round(mean_absolute_error(test[target].values, pred_ans),3) df_result.loc[0].MSE = np.round(mean_squared_error(test[target].values, pred_ans),3) df_result.loc[0].AUC = np.round(auc,3) #df_result.loc[0].score=(1/df_result.iloc[0]['RMSE'])*(1/df_result.iloc[0]['MAE'])*(2*df_result.iloc[0]['AUC']) return df_result
def test_NFM(): name = "NFM" sample_size = 64 feature_dim_dict = {'sparse': {'sparse_1': 2, 'sparse_2': 5, 'sparse_3': 10}, 'dense': ['dense_1', 'dense_2', 'dense_3']} sparse_input = [np.random.randint(0, dim, sample_size) for dim in feature_dim_dict['sparse'].values()] dense_input = [np.random.random(sample_size) for name in feature_dim_dict['dense']] y = np.random.randint(0, 2, sample_size) x = sparse_input + dense_input model = NFM(feature_dim_dict, embedding_size=8, hidden_size=[32, 32], keep_prob=0.5, ) model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) model.fit(x, y, batch_size=100, epochs=1, validation_split=0.5) print(name+" test train valid pass!") model.save_weights(name + '_weights.h5') model.load_weights(name + '_weights.h5') print(name+" test save load weight pass!") save_model(model, name + '.h5') model = load_model(name + '.h5', custom_objects) print(name + " test save load model pass!") print(name + " test pass!")
def _provide_new_training_data(models, size=64): for i, model in enumerate(models): save_model(model, str(i) + '.tmp') with Pool(1) as pool: pool.starmap(_provide_new_training_data_partial, [([0, 1], 32)] * 4) for i in range(2): os.remove(str(i) + '.tmp')
def save_model(self, model: Model): """Saves the given model of the current generation to the shared storage. Args: model: Model that should be saved """ model_path = self.tf_model_path with FileLock(model_path + '.lock'): models.save_model(model, model_path)
def _compete(old_models, new_models, size=64): models = [*old_models, *new_models] for i, model in enumerate(models): save_model(model, str(i) + '.tmp') with Pool(1) as pool: wins = sum(pool.starmap(_compete_partial, [([0, 1], [2, 3], 16)] * 4)) for i in range(4): os.remove(str(i) + '.tmp') return 0.55 * size <= wins
def train_nn(new_X_num, new_y_num, num_student, num_map, save=False): start = time.time() num_classes = num_student val_data = test_manager.generate_validation_data(exclude_unknown=True, num_map=num_map) recog_model = Sequential([ Dense(128, activation="tanh", input_dim=128), Dense(num_classes, activation="softmax") ]) recog_model.compile(loss=keras.losses.sparse_categorical_crossentropy, optimizer='adam', metrics=['accuracy'] ) training_history = recog_model.fit( np.array(new_X_num), np.array(new_y_num), validation_data=val_data, batch_size=batch_size, epochs=epochs ) # summarize history for accuracy plt.plot(training_history.history['acc']) plt.plot(training_history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(training_history.history['loss']) plt.plot(training_history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() print('Trained NN model in {:.3f}ms'.format((time.time() - start) * 1000)) if save: save_model( model=recog_model, filepath=file_path ) with open('{}.num_map.pkl'.format(file_path), 'wb') as file: pickle.dump(num_map, file) return recog_model, num_map
def check_model(model, model_name, x, y): model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) model.fit(x, y, batch_size=100, epochs=1, validation_split=0.5) print(model_name+" test train valid pass!") model.save_weights(model_name + '_weights.h5') model.load_weights(model_name + '_weights.h5') print(model_name+" test save load weight pass!") save_model(model, model_name + '.h5') model = load_model(model_name + '.h5', custom_objects) print(model_name + " test save load model pass!") print(model_name + " test pass!")
def learn(iterations=100): current_best_val = load_model("../models/alphazerolike/azval.tf") current_best_pol = load_model("../models/alphazerolike/azpol.tf") candidate_val = clone_model(current_best_val) candidate_pol = clone_model(current_best_pol) _provide_new_training_data((candidate_val, candidate_pol)) for _ in tqdm(range(iterations)): candidate_val = clone_model(current_best_val) candidate_pol = clone_model(current_best_pol) _training(candidate_val, candidate_pol) if _compete((current_best_val, current_best_pol), (candidate_val, candidate_pol)): save_model(candidate_val, "../models/alphazerolike/azval.tf") save_model(candidate_pol, "../models/alphazerolike/azpol.tf") _provide_new_training_data((candidate_val, candidate_pol)) save_model( candidate_val, "../models/alphazerolike/history/" + datetime.datetime.now().strftime("%Y_%m_%d%H_%M")) save_model( candidate_pol, "../models/alphazerolike/history/" + datetime.datetime.now().strftime("%Y_%m_%d%H_%M")) current_best_pol = candidate_pol current_best_val = candidate_val
def check_model(model, model_name, x, y): model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) model.fit(x, y, batch_size=100, epochs=1, validation_split=0.5) print(model_name+" test train valid pass!") model.save_weights(model_name + '_weights.h5') model.load_weights(model_name + '_weights.h5') print(model_name+" test save load weight pass!") save_model(model, model_name + '.h5') model = load_model(model_name + '.h5', custom_objects) print(model_name + " test save load model pass!") print(model_name + " test pass!")
def test_DIN_model_io(): model_name = "DIN_att" _, _, feature_dim_dict, behavior_feature_list = get_xy_fd() model = DIN(feature_dim_dict, behavior_feature_list, hist_len_max=4, embedding_size=8, att_activation=Dice, use_din=True, hidden_size=[4, 4, 4], keep_prob=0.6,) model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) #model.fit(x, y, verbose=1, validation_split=0.5) save_model(model, model_name + '.h5') model = load_model(model_name + '.h5', custom_objects) print(model_name + " test save load model pass!")
def xtest_DIEN_model_io(): model_name = "DIEN" _, _, feature_dim_dict, behavior_feature_list = get_xy_fd() model = DIEN(feature_dim_dict, behavior_feature_list, hist_len_max=4, embedding_size=8, att_activation=Dice, hidden_size=[4, 4, 4], keep_prob=0.6,use_negsampling=False) model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) #model.fit(x, y, verbose=1, validation_split=0.5) save_model(model, model_name + '.h5') model = load_model(model_name + '.h5', custom_objects) print(model_name + " test save load model pass!")
def save_students(save_students_mode: str, results: list, out_folder: str, results_name_prefix: str) -> None: """ Saves the student network(s). :param save_students_mode: the save mode. :param results: the KT results. :param out_folder: the folder in which the student networks will be saved. :param results_name_prefix: a prefix for the results name. """ # Get project logger. kt_logging = logging.getLogger('KT') if save_students_mode == 'all': for result in results: model_name = join( out_folder, '{}{}_model.h5'.format(results_name_prefix, result['method'])) save_model(result['network'], model_name) kt_logging.info( 'Student network has been saved as {}.'.format(model_name)) elif save_students_mode == 'best': best = -1 best_model = None for result in results: if result['method'] != 'Probabilistic Knowledge Transfer': accuracy_idx = result['network'].metrics_names.index( 'categorical_accuracy') accuracy = result['evaluation'][accuracy_idx] if accuracy > best: best = accuracy best_model = result['network'] model_name = join(out_folder, '{}best_model.h5'.format(results_name_prefix)) if best_model is not None: save_model(best_model, model_name) kt_logging.info( 'The best student network has been saved as {}.'.format( model_name))
def check_mtl_model(model, model_name, x, y_list, task_types, check_model_io=True): """ compile model,train and evaluate it,then save/load weight and model file. :param model: :param model_name: :param x: :param y_list: mutil label of y :param check_model_io: test save/load model file or not :return: """ loss_list = [] metric_list = [] for task_type in task_types: if task_type == 'binary': loss_list.append('binary_crossentropy') # metric_list.append('accuracy') elif task_type == 'regression': loss_list.append('mean_squared_error') # metric_list.append('mae') print('loss:', loss_list) print('metric:', metric_list) model.compile('adam', loss=loss_list, metrics=metric_list) model.fit(x, y_list, batch_size=100, epochs=1, validation_split=0.5) print(model_name + " test train valid pass!") model.save_weights(model_name + '_weights.h5') model.load_weights(model_name + '_weights.h5') os.remove(model_name + '_weights.h5') print(model_name + " test save load weight pass!") if check_model_io: save_model(model, model_name + '.h5') model = load_model(model_name + '.h5', custom_objects) os.remove(model_name + '.h5') print(model_name + " test save load model pass!") print(model_name + " test pass!")
def test_DIN_model_io(): name = "DIN_att" x, y, feature_dim_dict, behavior_feature_list = get_xy_fd() model = DIN( feature_dim_dict, behavior_feature_list, hist_len_max=4, embedding_size=8, use_din=True, hidden_size=[4, 4, 4], keep_prob=0.6, ) model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) #model.fit(x, y, verbose=1, validation_split=0.5) save_model(model, name + '.h5') model = load_model(name + '.h5', custom_objects) print(name + " test save load model pass!")
def rnn_train(): tokenizer= load(path+'tokenizer_ref.pkl') X_train_token = load(path +'X_train_token_project.sav') X_dev_token = load(path +'X_dev_token_project.sav') y_train = load(path +'y_train_project.sav') y_dev = load(path +'y_dev_project.sav') paragram_embeddings = load_para(tokenizer.word_index) model = Sequential() optimizer = Adam(lr=1e-3) model.add(Embedding(weights=[paragram_embeddings], trainable=False, input_dim=num_words, output_dim=embedding_size, input_length=max_tokens)) model.add(GRU(units=32, return_sequences=True)) model.add(GRU(units=16, dropout=0.5, return_sequences=True)) model.add(GRU(units=8, return_sequences=True)) model.add(GRU(units=4)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['AUC', 'accuracy']) model.summary() history = model.fit(np.array(X_train_token), y_train, validation_data=(np.array(X_dev_token),y_dev), epochs=4, batch_size=500) save_model(model,path+'rnn_model.h5') print('train complete')
def widendeep_model(linear_feature_columns, dnn_feature_columns, train_model_input, train, test_model_input, test): cols = ['model', 'RMSE', 'MAE', 'MSE', 'AUC', 'score'] df_result = pd.DataFrame(columns=cols, index=range(1)) model = WDL( linear_feature_columns, dnn_feature_columns, dnn_hidden_units=config.widendeep_att["dnn_hidden_units"], #l2_reg_linear=config.widendeep_att["l2_reg_linear"], # l2_reg_embedding=config.widendeep_att["l2_reg_embedding"], #l2_reg_dnn=config.widendeep_att["l2_reg_dnn"], # init_std=config.widendeep_att["init_std"], dnn_dropout=config.widendeep_att['dnn_dropout'], dnn_activation=config.widendeep_att['dnn_activation'], seed=config.widendeep_att["seed"], task=config.widendeep_att["task"]) model.compile("adam", "mse", metrics=['mse']) history = model.fit(train_model_input, train[target].values, batch_size=256, epochs=config.model_epoch['epoch'], verbose=2, validation_split=0.2) pred_ans = model.predict(test_model_input, batch_size=256) save_model(model, 'saved_widendeep.h5') # save_model auc = roc_auc_score(test[target].values, pred_ans) df_result.loc[0].model = "Wide and Deep" df_result.loc[0].RMSE = np.round( math.sqrt(mean_squared_error(test[target].values, pred_ans)), 3) df_result.loc[0].MAE = np.round( mean_absolute_error(test[target].values, pred_ans), 3) df_result.loc[0].MSE = np.round( mean_squared_error(test[target].values, pred_ans), 3) df_result.loc[0].AUC = np.round(auc, 3) return df_result
def check_model(model, model_name, x, y, check_model_io=True): """ compile model,train and evaluate it,then save/load weight and model file. :param model: :param model_name: :param x: :param y: :param check_model_io: test save/load model file or not :return: """ model.fit(x, y, batch_size=10, epochs=2, validation_split=0.5) print(model_name + " test train valid pass!") user_embedding_model = Model(inputs=model.user_input, outputs=model.user_embedding) item_embedding_model = Model(inputs=model.item_input, outputs=model.item_embedding) user_embs = user_embedding_model.predict(x, batch_size=2 ** 12) # user_embs = user_embs[:, i, :] i in [0,k_max) if MIND print(model_name + " user_emb pass!") item_embs = item_embedding_model.predict(x, batch_size=2 ** 12) print(model_name + " item_emb pass!") model.save_weights(model_name + '_weights.h5') model.load_weights(model_name + '_weights.h5') os.remove(model_name + '_weights.h5') print(model_name + " test save load weight pass!") if check_model_io: save_model(model, model_name + '.h5') model = load_model(model_name + '.h5', custom_objects) os.remove(model_name + '.h5') print(model_name + " test save load model pass!") print(model_name + " test pass!")
def check_model(model, model_name, x, y, category="regression"): if category == "regression": model.compile('adam', "mean_squared_error", metrics=['mean_absolute_error']) elif category == "binary_classification": model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) else: model.compile('adam', 'categorical_crossentropy', metrics=['accuracy']) model.fit(x, y, batch_size=100, epochs=1, validation_split=0.5) print(model_name + " test train valid pass!") model.save_weights(model_name + '_weights.h5') model.load_weights(model_name + '_weights.h5') print(model_name + " test save load weight pass!") save_model(model, model_name + '.h5') model = load_model(model_name + '.h5') print(model_name + " test save load model pass!") print(model_name + " test pass!")
plt.imshow(img) # find the highest probability highest_prob = 0 for prediction in y_predict: if prediction > highest_prob: highest_prob = prediction print("Highest probability of input: " + str(highest_prob)) # navigate through label data set for the 1 entry one_index = 0 i = 0 for bin in master_label_set[index]: if bin == 1: one_index = i else: i = i + 1 print("index location: " + str(one_index)) # get prediction at real index value guess_prediction = y_predict[one_index] print("Guess prediction: " + str(guess_prediction)) displayImage(3) # saving the NN as an object models.save_model(conv_model, "NN_object") # ./stall_NN.py
img_rows, img_cols = 28, 28 num_classes = 10 def prep_data(raw, train_size, val_size): y = raw[:, 0] out_y = keras.utils.to_categorical(y, num_classes) x = raw[:,1:] num_images = raw.shape[0] out_x = x.reshape(num_images, img_rows, img_cols, 1) out_x = out_x / 255 return out_x, out_y fashion_file = "Fashion_kaggle/fashion-mnist_train.csv" fashion_data = np.loadtxt(fashion_file, skiprows=1, delimiter=',') x, y = prep_data(fashion_data, train_size=50000, val_size=5000) fashion_model = Sequential() fashion_model.add(Conv2D(12,kernel_size=(3,3),activation = 'relu', input_shape = (img_rows,img_cols,1))) fashion_model.add(Conv2D(12,kernel_size = (3,3), activation='relu')) fashion_model.add(Conv2D(12,kernel_size = (3,3), activation='relu')) fashion_model.add(Flatten()) fashion_model.add(Dense(100,activation='relu')) fashion_model.add(Dense(num_classes,activation = 'softmax')) fashion_model.compile(loss = keras.losses.categorical_crossentropy,optimizer = 'adam', metrics = ['accuracy']) fashion_model.fit(x,y, batch_size = 100, epochs = 4, validation_split = 0.2) save_model(fashion_model,"/home/aurash/Fashion_kaggle/model.hdf5",overwrite = True, include_optimizer=True)
''' squeezenet_file = open('squeezenet.json', 'r'); loaded_squeezenet_json = squeezenet_file.read(); squeezenet_file.close(); squeezenet = model_from_json(loaded_squeezenet_json); #load weights into squeezenet squeezenet.load_weights("squeezenet.h5"); squeezenet.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics = ['acc']); ''' train_size = 500; test_size = 500; patch_size = (512, 512); set_name = 'Shi-Gehler'; X_train_origin, Y_train_origin, X_train_norm, Y_train_norm, _, = generate_data(train_size, set_name, patch_size, job = 'Train'); #X_test, Y_test, _, = generate_data(test_size, set_name, patch_size, job = 'Test'); fcn_model = FCN(input_shape = X_train_origin.shape[1:4]); fcn_model.compile(optimizer = Adam(lr = 0.0002), loss = 'cosine_proximity', metrics = ['acc']); fcn_model.fit(X_train_origin, Y_train_origin, validation_split = 0.2, epochs = 20, batch_size = 16); save_model(fcn_model, 'fcn_model.h5');
def save_model(self, filepath: str): from tensorflow.python.keras.models import save_model save_model(self.model, filepath)
callback_early_stopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1) callback_tensorboard = TensorBoard(log_dir='./twitter_logs/', histogram_freq=0, write_graph=False) callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_lr=1e-4, patience=0, verbose=1) callbacks = [callback_early_stopping, callback_checkpoint, callback_tensorboard, callback_reduce_lr] print("Train Model") model.fit_generator(generator=generator, epochs=50, steps_per_epoch=100, validation_data=validation_data, callbacks=callbacks) result = model.evaluate(x=np.expand_dims(x_test, axis=0), y=np.expand_dims(y_test, axis=0)) save_model(model, "twitter_model/model.h5") print("loss (test-set):", result)
user_feature_columns = dnn_feature_columns[0:15] item_feature_columns = dnn_feature_columns[15:] print(user_feature_columns) print(item_feature_columns) user_feature_inputs_keys = sparse_features[0:15] print(user_feature_inputs_keys) item_feature_inputs_keys = sparse_features[15:] print(item_feature_inputs_keys) # other_feature_input_keys = dense_features # 3.Define Model,train,predict and evaluate user_vec_name = 'user_vec' item_vec_name = 'item_vec' model = DSSM4FatureColumn(user_feature_columns, item_feature_columns, user_inputs_keys=user_feature_inputs_keys, item_inputs_keys=item_feature_inputs_keys) model.compile("adam", "binary_crossentropy", metrics=['binary_crossentropy', tf.keras.metrics.AUC()], ) model.fit(train_model_input) print("=============train finish==================") # eval_result = model.evaluate(test_model_input) save_model(model, './dssm_model') user_embedding_model = Model(inputs=model.user_input, outputs=model.user_embedding) item_embedding_model = Model(inputs=model.item_input, outputs=model.item_embedding) # 重写input后的predict输入不能包含label,否则"AttributeError: 'Model' object has no attribute '_training_endpoints'" user_embs = user_embedding_model.predict(test_model_input) item_embs = item_embedding_model.predict(predict_model_input)
def save(self, filepath, overwrite=True, include_optimizer=True): from tensorflow.python.keras.models import save_model # pylint: disable=g-import-not-at-top save_model(self, filepath, overwrite, include_optimizer)
def save(self, filepath, overwrite=True, include_optimizer=True): from tensorflow.python.keras.models import save_model # pylint: disable=g-import-not-at-top save_model(self, filepath, overwrite, include_optimizer)