def load_models(self): # load json and create model if self.Use_Batch_Norm: out_mod_dir = 'Save_BatchNorm' else: out_mod_dir = 'Save_InstNorm' ae_json = open(os.path.join(out_mod_dir, 'autoencoder.json'), 'r') ae_model_json = ae_json.read() ae_json.close() ae_model = models.model_from_json( ae_model_json, custom_objects={'InstanceNormalization': InstanceNormalization}) # load weights into new model ae_model.load_weights(os.path.join(out_mod_dir, "autoencoder.h5")) self.AutoEncoderNet = ae_model for i in self.style_bank: ae_json = open( os.path.join(out_mod_dir, "stylenet_{}.json".format(i)), 'r') ae_model_json = ae_json.read() ae_json.close() ae_model = models.model_from_json(ae_model_json, custom_objects={ 'InstanceNormalization': InstanceNormalization }) # load weights into new model ae_model.load_weights( os.path.join(out_mod_dir, "stylenet_{}.h5".format(i))) self.StyleNet[i] = ae_model print("Loaded models from disk")
def test_unseen_class(self): loaded_model = None adam = Adam(lr=1e-5) with open(self.model_dir.joinpath('model.json'), 'r') as json_file: loaded_model_json = json_file.read() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights( str(self.model_dir.joinpath(self.prefix + "model.h5"))) print("Loaded model from disk") loaded_model.compile(loss='sparse_categorical_crossentropy', optimizer=adam, metrics=['accuracy']) probs = list() for i in range(len(self.test_labels)): pred = loaded_model.predict(np.array([ self.test_images[i], ])) print("{}: {}: max: {}".format(i, pred[0], np.max(pred[0]))) probs.append(np.max(pred[0])) plt.xlabel("DATA") plt.ylabel("PROBABLILITY") plt.title("mean: {}".format(np.mean(probs))) plt.scatter(np.arange(len(probs)), probs, s=100) plt.show()
def load_trained_tf_model( model_save_dir, model_name, use_best_val_loss=False, print_model_summary=True, ): """ Load the model architecture and weights. Parameters --------- model_save_dir : str The path to where the .json model architecture and .h5 model weights are stored. model_name : str The name the model is saved under, i.e., the part before '.h5' or '.json'. use_best_val_loss : bool print_model_summary : bool """ with open(f"{model_save_dir}{model_name}.json", "r") as f: model = model_from_json(f.read(), custom_objects={"k": K}) if use_best_val_loss: model_name += "_best_val_loss" model.load_weights(model_save_dir + model_name + ".h5") if print_model_summary: print(model.summary()) return model
def load_model(self, model_json_path, model_h5_path): json_file = open(model_json_path, 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_h5_path) return loaded_model
def load_keras_model(saved_model_path): """Load a keras.Model from SavedModel. load_model reinstantiates model state by: 1) loading model topology from json (this will eventually come from metagraph). 2) loading model weights from checkpoint. Args: saved_model_path: a string specifying the path to an existing SavedModel. Returns: a keras.Model instance. """ # restore model topology from json string model_json_filepath = os.path.join( compat.as_bytes(saved_model_path), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes(constants.SAVED_MODEL_FILENAME_JSON)) model_json = file_io.read_file_to_string(model_json_filepath) model = model_from_json(model_json) # restore model weights checkpoint_prefix = os.path.join( compat.as_text(saved_model_path), compat.as_text(constants.VARIABLES_DIRECTORY), compat.as_text(constants.VARIABLES_FILENAME)) model.load_weights(checkpoint_prefix) return model
def load_regressor(filename, newobj): dataset = [] with open(filename, 'r') as f: for line in f: jsonfile = json.loads(line[:-2]) dataset.append(jsonfile) f.close() cars = pd.DataFrame(dataset) cars = cars.drop(cars[(cars['price'] > 30000) & (cars['price'] < 300000)].index) cars_prices = cars["price"].copy() cars = cars.drop("price", axis=1) new_car = pd.DataFrame(newobj) cars = cars.append(new_car) cars_prepared = full_pipeline.fit_transform(cars) json_file = open('neuralnetregressor.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights("neuralnetregressor.h5") loaded_model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mae']) daaa = np.array(cars_prepared[-1, :11]) #loaded_model.predict(cars_prepared) return loaded_model.predict(cars_prepared[-1].reshape((1, -1)))
def scorer(filename=None): json_file = open('input/model2_json.json', 'r') loaded_model_json = json_file.read() json_file.close() tf.disable_v2_behavior() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("input/VoiceModel2.h5") print("Loaded model from disk") # the optimiser loaded_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) audio_file_name = 'input/' + filename # + '3.wav' X = prepare_data(audio_file_name, aug=1, mfcc=1, sampling_rate=44100, audio_duration=2.5) newpred = loaded_model.predict(X, batch_size=16, verbose=1) return str(score(newpred))
def train(self, train_images, train_labels, validation_images, validation_labels, retrain=False, prefix="", lr=1e-4, epochs=20): mc = ModelCheckpoint(str(INCEPTION_V4_DIR_RES.joinpath(prefix+'model.h5')), monitor='val_acc', mode='auto', verbose=1, save_best_only=True) model = None adam = Adam(lr=lr) if not retrain: model = self.model() model_json = model.to_json() with open(INCEPTION_V4_DIR_RES.joinpath('model.json'), "w") as json_file: json_file.write(model_json) print("Saved model to disk") else: print('retraining...') with open(INCEPTION_V4_DIR_RES.joinpath('model.json'), 'r') as json_file: loaded_model_json = json_file.read() model = model_from_json(loaded_model_json) # load weights into new model model.load_weights(str(INCEPTION_V4_DIR_RES.joinpath(prefix+"model.h5"))) print("Loaded model from disk") model.compile(optimizer=adam, loss='sparse_categorical_crossentropy', metrics=['accuracy']) # model.summary() # plot_model(model, show_shapes=True, to_file=INCEPTION_V4_DIR_RES.joinpath('resnet34.png')) history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(validation_images, validation_labels), callbacks=[mc], verbose=2) return history
def loadRNN(model_file_name, weight_file_name): K.reset_uids() with open(model_file_name + '.json', 'r') as f: model = model_from_json(f.read()) model.load_weights(weight_file_name + '.h5') print("Red Neuronal Cargada desde Archivo") return model
def __init__(self): json_file = open('../../resources/model.json', 'r') loaded_model_json = json_file.read() json_file.close() self.intent_classifier = model_from_json(loaded_model_json) self.intent_classifier.load_weights('../../resources/model.h5') print("Loaded model from disk")
def load_model(saved_model_path): """Load a keras.Model from SavedModel. load_model reinstantiates model state by: 1) loading model topology from json (this will eventually come from metagraph). 2) loading model weights from checkpoint. Args: saved_model_path: a string specifying the path to an existing SavedModel. Returns: a keras.Model instance. """ # restore model topology from json string model_json_filepath = os.path.join( compat.as_bytes(saved_model_path), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes(constants.SAVED_MODEL_FILENAME_JSON)) model_json = file_io.read_file_to_string(model_json_filepath) model = model_from_json(model_json) # restore model weights checkpoint_prefix = os.path.join( compat.as_text(saved_model_path), compat.as_text(constants.VARIABLES_DIRECTORY), compat.as_text(constants.VARIABLES_FILENAME)) model.load_weights(checkpoint_prefix) return model
def predict(self): K.reset_uids() classes = ['Normal', 'Neumonia'] modelo = 'neumonia/model/model_neumonia_v41.json' pesos = 'neumonia/model/weights_neumonia_v41.h5' with CustomObjectScope({'GlorotUniform': glorot_uniform()}): with open(modelo, 'r') as f: model = model_from_json(f.read()) model.load_weights(pesos) img = image.load_img(self.img, target_size=(150, 150)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) preds = model.predict(x) resultado = preds[0] #solo una dimension porcentaje = np.round(resultado * 100, 2) porcentaje = list(porcentaje) respuesta = np.argmax(resultado) # el valor mas alto de resultado for i in range(len(classes)): res = classes[i] if i == respuesta: return 'Resultado: {:.4}% {}'.format(round(max(porcentaje), 2), res)
def load(self, file_name=None): with self.graph.as_default(): with self.session.as_default(): try: model_name = file_name[0] weights_name = file_name[1] if model_name is not None: # load the model json_file_path = os.path.join(self.model_folder, model_name) json_file = open(json_file_path, 'r') loaded_model_json = json_file.read() json_file.close() self.loaded_model = model_from_json(loaded_model_json) if weights_name is not None: # load the weights weights_path = os.path.join(self.model_folder, weights_name) self.loaded_model.load_weights(weights_path) logging.info("Neural Network loaded: ") logging.info('\t' + "Neural Network model: " + model_name) logging.info('\t' + "Neural Network weights: " + weights_name) return True except Exception as e: logging.exception(e) return False
def load_model(): loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_weights_file_path) loaded_model.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-5), metrics=['accuracy']) loaded_model._make_predict_function() return loaded_model
def load_models(self, model_architecture_file, model_weights_file): json_file = open(model_architecture_file, 'r') loaded_model_json = json_file.read() json_file.close() self.neural_network = model_from_json(loaded_model_json) self.neural_network.load_weights(model_weights_file) print("Loaded model {} {}".format(model_architecture_file, model_weights_file))
def cargarRNN(archivoModelo,archivoPesos): K.reset_uids with open(archivoModelo+'.json','r') as f: model = model_from_json(f.read()) model.load_weights(archivoPesos+'.h5') print("Red Neuronal Cargada") return model
def load_trained_model(name): json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(name) print("Model loaded...") return loaded_model
def load_model_from_json(self, json_file_name): # load json and create model json_file = open(json_file_name, 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.summary() return loaded_model
def model_init(self): """模型初始化""" path_graph = os.path.join(self.path_dir, "graph.json") path_model = os.path.join(self.path_dir, "model.h5") # 加载模型结构 self.model = model_from_json(open(path_graph, "r", encoding="utf-8").read(), custom_objects=macropodus.custom_objects) # 加载模型权重 self.model.load_weights(path_model)
def load_model(arch): model_path = 'models/' + arch['name'] + '.json' weights_path = 'models/' + arch['name'] + '.h5' json_file = open(model_path) loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(weights_path) return loaded_model
def load_model(input_model_path, input_json_path=None, input_yaml_path=None): if not Path(input_model_path).exists(): raise FileNotFoundError( 'Model file `{}` does not exist.'.format(input_model_path)) try: model = keras.models.load_model(input_model_path) if FLAGS.is_tiny: model = tiny_yolo_body(Input(shape=(None, None, 3)), 3, FLAGS.num_class) else: model = yolo_body(Input(shape=(None, None, 3)), 3, FLAGS.num_class) model.load_weights('input_model_path') return model except FileNotFoundError as err: logging.error('Input mode file (%s) does not exist.', FLAGS.input_model) raise err except ValueError as wrong_file_err: if input_json_path: if not Path(input_json_path).exists(): raise FileNotFoundError( 'Model description json file `{}` does not exist.'.format( input_json_path)) try: model = model_from_json(open(str(input_json_path)).read()) model.load_weights(input_model_path) return model except Exception as err: logging.error("Couldn't load model from json.") raise err elif input_yaml_path: if not Path(input_yaml_path).exists(): raise FileNotFoundError( 'Model description yaml file `{}` does not exist.'.format( input_yaml_path)) try: model = model_from_yaml(open(str(input_yaml_path)).read()) model.load_weights(input_model_path) return model except Exception as err: logging.error("Couldn't load model from yaml.") raise err else: logging.error( 'Input file specified only holds the weights, and not ' 'the model definition. Save the model using ' 'model.save(filename.h5) which will contain the network ' 'architecture as well as its weights. ' 'If the model is saved using the ' 'model.save_weights(filename) function, either ' 'input_model_json or input_model_yaml flags should be set to ' 'to import the network architecture prior to loading the ' 'weights. \n' 'Check the keras documentation for more details ' '(https://keras.io/getting-started/faq/)') raise wrong_file_err
def load(self): inside = os.listdir(MODEL_DIR) if "model.json" not in inside or "model.h5" not in inside: raise ValueError( "Make sure both model.json and model.h5 are inside \'models/\'" ) with open(MODEL_DIR + "model.json", "w") as json_file: self.model = model_from_json(json_file.read()) self.model.load_weights(MODEL_DIR + "model.h5")
def cargarRNN(nombreArchivoModelo, nombreArchivoPesos): K.reset_uids() # Cargar la Arquitectura desde el archivo JSON with open(nombreArchivoModelo + '.json', 'r') as f: model = model_from_json(f.read()) # Cargar Pesos (weights) en el nuevo modelo model.load_weights(nombreArchivoPesos + '.h5') print("Red Neuronal Cargada desde Archivo") return model
def __init__(self,model=None): if model: self.model=model else: #from tensorflow.python.keras.models import model_from_json jfile = open(Recognizer.path_model, "r") loaded_model = jfile.read() jfile.close() self.model = model_from_json(loaded_model) self.model.load_weights(Recognizer.weights_path) self.threshold=0.93 jfile = open(Recognizer.path_model2, "r") loaded_model = jfile.read() jfile.close() self.model2 = model_from_json(loaded_model) self.model2.load_weights(Recognizer.weights_path2) self.__labels={0:'speech', 1:'silence'} self.__labels2={0: '0', 1: '1', 2: '10', 3: '2', 4: '3', 5: '4', 6: '5', 7: '6', 8: '7', 9: '8', 10: '9', 11: 'no', 12: 'other', 13: 'yes'}
def load_model(): # Model reconstruction from JSON file with open('model_architecture.json', 'r') as f: model = models.model_from_json(f.read()) # Load weights into the new model model.load_weights('model_weights.h5') return model
def load_model(self, name): # load json and create model json_file = open(name + 'model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights(name + "model.h5") print("Loaded model from disk") return loaded_model
def get_model(): json_file = open(path + name + ".json", 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights(path + name + ".h5") print("Loaded model from disk") return loaded_model
def load_model(weight_path: str, structure_path: str, trainset_path: str): # load structure of model json_file = open(structure_path, 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json) # Load weight from file model.load_weights(weight_path) return model
def readModelFromJSON(): json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("model.h5") print("Model is readed.") return loaded_model
def load_model(name): # load json and create model model_file = open('{}.json'.format(name), 'r') loaded_model_json = model_file.read() model_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("{}.h5".format(name)) print("Loaded model from disk") return loaded_model
def __init__(self): # Load pre-processing self.MAX_TWEET_LENGTH = 100 self.MIN_PREDICTION_SCORE = 0.95 self.tokenizer = load('src/save/tokenizer.joblib') self.label_encoder = load('src/save/label_encoder.joblib') # load model with open('src/save/model.json', 'r') as f: self.model = model_from_json(f.read()) self.model.load_weights('src/save/model.h5') self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def load_keras_model(saved_model_path): """Load a keras.Model from SavedModel. load_model reinstantiates model state by: 1) loading model topology from json (this will eventually come from metagraph). 2) loading model weights from checkpoint. Example: ```python import tensorflow as tf # Create a tf.keras model. model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(1, input_shape=[10])) model.summary() # Save the tf.keras model in the SavedModel format. saved_to_path = tf.contrib.saved_model.save_keras_model( model, '/tmp/my_simple_tf_keras_saved_model') # Load the saved keras model back. model_prime = tf.contrib.saved_model.load_keras_model(saved_to_path) model_prime.summary() ``` Args: saved_model_path: a string specifying the path to an existing SavedModel. Returns: a keras.Model instance. """ # restore model topology from json string model_json_filepath = os.path.join( compat.as_bytes(saved_model_path), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes(constants.SAVED_MODEL_FILENAME_JSON)) model_json = file_io.read_file_to_string(model_json_filepath) model = model_from_json(model_json) # restore model weights checkpoint_prefix = os.path.join( compat.as_text(saved_model_path), compat.as_text(constants.VARIABLES_DIRECTORY), compat.as_text(constants.VARIABLES_FILENAME)) model.load_weights(checkpoint_prefix) return model