コード例 #1
0
ファイル: model_test.py プロジェクト: SaintBacchus/tensorflow
 def test_restore_old_saved_model(self):
     saved_model_dir = os.path.join(
         flags.FLAGS['test_srcdir'].value,
         'org_tensorflow/tensorflow/python/keras',
         'mixed_precision/testdata/lso_savedmodel_tf2.2')
     # saved_model_dir = test.test_src_dir_path(
     #     'python/keras/mixed_precision/testdata/'
     #     'lso_savedmodel_tf2.2')
     model = save.load_model(saved_model_dir)
     expected_kernel = np.array([[9.229685, 10.901115],
                                 [10.370763, 9.757362]])
     self.assertAllClose(backend.eval(model.weights[0]), expected_kernel)
     self.assertEqual(type(model.optimizer),
                      loss_scale_optimizer.LossScaleOptimizer)
コード例 #2
0
 def test_state_saving_and_loading(self):
     input_data = np.random.random((1, 2))
     rff_layer = kernel_layers.RandomFourierFeatures(output_dim=10,
                                                     scale=3.0)
     inputs = input_layer.Input((2, ))
     outputs = rff_layer(inputs)
     model = training.Model(inputs, outputs)
     output_data = model.predict(input_data)
     temp_dir = self.get_temp_dir()
     self.addCleanup(shutil.rmtree, temp_dir)
     saved_model_dir = os.path.join(temp_dir, 'rff_model')
     model.save(saved_model_dir)
     new_model = save.load_model(saved_model_dir)
     new_output_data = new_model.predict(input_data)
     self.assertAllClose(output_data, new_output_data, atol=1e-4)
コード例 #3
0
ファイル: model.py プロジェクト: labesoft/ml-projects
    def __init__(self):
        """Initialize the alarm params, the model and faces parts classifiers"""
        # Prepare alarm
        mixer.init()
        self.sound = mixer.Sound('alarm.wav')
        self.score = 0
        self.__thickness = 2
        self.config = DrowsiDriveConfig()

        # Prepare model
        self.model = load_model(str(self.config.MODEL_PATH))

        # Classifiers
        self.face_classifier = None
        self.left_eye_classifier = None
        self.right_eye_classifier = None
コード例 #4
0
def load_keras_model(file):
    model = load_model(file)
    # model = load_model('model_test.h5')
    # model.load_weights(file)

    encoder_inputs = model.input[0]  # input_1
    encoder_outputs, state_h_enc, state_c_enc = model.layers[
        4].output  # lstm_1
    encoder_states = [state_h_enc, state_c_enc]

    decoder_inputs = model.input[1]  # input_2
    decoder_embedding = model.layers[3].output
    decoder_lstm = model.layers[5]
    decoder_dense = model.layers[6]

    return model, encoder_inputs, encoder_states, decoder_inputs, \
           decoder_embedding, decoder_lstm, decoder_dense
コード例 #5
0
  def test_saving_model_with_custom_object(self):
    with generic_utils.custom_object_scope(), self.cached_session():

      @generic_utils.register_keras_serializable()
      class CustomLoss(losses.MeanSquaredError):
        pass

      model = sequential.Sequential(
          [core.Dense(units=1, input_shape=(1,))])
      model.compile(optimizer='sgd', loss=CustomLoss())
      model.fit(np.zeros([10, 1]), np.zeros([10, 1]))

      temp_dir = self.get_temp_dir()
      filepath = os.path.join(temp_dir, 'saving')
      model.save(filepath)

      # Make sure the model can be correctly load back.
      _ = save.load_model(filepath, compile=True)
コード例 #6
0
def predict_separate(path,
                     test_path=config.TEST_PATH,
                     dat_file=config.SHAPE_PREDICTOR_DAT_PATH):
    print('Loading Model from: ', path)
    name_of_model = path.split('_')[1]
    model = load_model(path)
    images_processed = 0
    images_with_no_face = 0
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(dat_file)
    y_predProbs = np.empty((0, 7))
    y_true = []
    for label in config.CLASS_LABEL:
        files = sorted(glob(os.path.join(test_path, label.lower()) + '/*.jpg'))
        for images in files:
            img = cv2.resize(cv2.imread(images),
                             (config.TARGET_SIZE['CV2_LANDMARKS_RESIZE'][0],
                              config.TARGET_SIZE['CV2_LANDMARKS_RESIZE'][1]))
            img = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY)
            faces = detector(img)
            if len(faces) == 0:
                images_with_no_face += 1
                y_predProbs = np.vstack(
                    (y_predProbs, np.zeros(config.NUM_CLASSES)))
            else:
                row = []
                for face in faces:
                    landmarks = predictor(image=img, box=face)
                    for n in range(0, 68):
                        row.append(landmarks.part(n).x)
                        row.append(landmarks.part(n).y)
                    break
                y_predProbs = np.vstack(
                    (y_predProbs,
                     model.predict(np.array(row).reshape(1, -1), verbose=1)))
            y_true.append(label)
            images_processed += 1
            print("No of Images Processed:", images_processed)
    print('\n\nTotal Images:', images_processed, '\nFace detected in: ',
          images_processed - images_with_no_face,
          'images\nFace not detected in: ', images_with_no_face, 'images')
    return y_predProbs, y_true
コード例 #7
0
 def test_save_load_tf_pathlib(self):
   if sys.version_info < (3, 6):
     self.skipTest('pathlib is only available for python version >= 3.6')
   path = pathlib.Path(self.get_temp_dir()) / 'model'
   save.save_model(self.model, path, save_format='tf')
   save.load_model(path)
コード例 #8
0
 def test_save_load_tf_string(self):
   path = os.path.join(self.get_temp_dir(), 'model')
   save.save_model(self.model, path, save_format='tf')
   save.load_model(path)
コード例 #9
0
 def test_save_load_tf_pathlib(self):
     if sys.version_info >= (3, 4):
         path = pathlib.Path(self.get_temp_dir()) / 'model'
         save.save_model(self.model, path, save_format='tf')
         save.load_model(path)
コード例 #10
0
 def _load_and_run_model(self, distribution, saved_dir, predict_dataset,
                         output_name):
   restored_keras_model = save.load_model(saved_dir)
   return restored_keras_model.predict(
       predict_dataset, steps=test_base.PREDICT_STEPS)
コード例 #11
0
    def test_save_model_with_dynamic_loss_scaling(
            self, strategy_fn, h5=False, use_v1_loss_scale_optimizer=False):
        # TODO(reedwm): Support and test saving model with a mixed_[b]float16 policy
        # as well.
        strategy = strategy_fn()
        if (isinstance(strategy, mirrored_strategy.MirroredStrategy)
                and not context.executing_eagerly()):
            # TODO(b/121381184): Enable running the test in this case.
            return

        # Create and run model.
        with strategy.scope():
            x = layers.Input(shape=(2, ), batch_size=2, dtype=dtypes.float32)
            y = mp_test_util.MultiplyLayer()(x)
            model = models.Model(inputs=x, outputs=y)

            opt = gradient_descent.SGD(1.)
            if use_v1_loss_scale_optimizer:
                loss_scale = loss_scale_module.DynamicLossScale(
                    initial_loss_scale=1., increment_period=2.)
                opt = loss_scale_optimizer.LossScaleOptimizerV1(
                    opt, loss_scale)
            else:
                opt = loss_scale_optimizer.LossScaleOptimizer(
                    opt, initial_scale=1., dynamic_growth_steps=2.)
            model.compile(optimizer=opt,
                          loss='mse',
                          run_eagerly=testing_utils.should_run_eagerly())
        # Run for 3 steps (6 examples with a batch size of 2)
        model.fit(np.ones((6, 2)), np.zeros((6, 2)), batch_size=2)
        self.assertEqual(backend.get_value(opt.loss_scale), 2)
        self.assertEqual(backend.get_value(opt.dynamic_counter), 1)
        (weight, ) = model.trainable_weights
        orig_weight = backend.get_value(weight)

        # Save model weights.
        save_path = os.path.join(self.get_temp_dir(), 'model')
        model.save(save_path, save_format='h5' if h5 else 'tf')

        # Run model again for 1 step (2 examples with a batch size of 2)
        model.fit(np.ones((2, 2)), np.zeros((2, 2)), batch_size=2)
        new_weight = backend.get_value(weight)
        self.assertNotEqual(new_weight, orig_weight)
        self.assertEqual(backend.get_value(opt.loss_scale), 4)
        self.assertEqual(backend.get_value(opt.dynamic_counter), 0)

        # Load model weights and ensure loss scale weights are restored.
        model = save.load_model(
            save_path,
            custom_objects={'MultiplyLayer': mp_test_util.MultiplyLayer})
        (weight, ) = model.trainable_weights
        loaded_weight = backend.get_value(weight)
        self.assertEqual(loaded_weight, orig_weight)
        # Currently the loss scale isn't always saved when the model is saved with
        # Model.save(). So we assert the loss scale either has the value when it was
        # saved, or the value it was initialized with.
        # TODO(reedwm): Always save/restore the loss scale with Model.save().
        self.assertIn(backend.get_value(model.optimizer.loss_scale), (1, 2))
        self.assertIn(backend.get_value(model.optimizer.dynamic_counter),
                      (0, 1))

        # Test optimizer attributes and type
        self.assertEqual(model.optimizer.initial_scale, 1.)
        self.assertEqual(model.optimizer.dynamic_growth_steps, 2.)
        self.assertEqual(type(model.optimizer),
                         loss_scale_optimizer.LossScaleOptimizer)
コード例 #12
0
import tensorflow as tf
from tensorflow.python.keras.saving.save import load_model
import tensorflow_datasets as tfds
import numpy as np
import pandas as pd
from tensorflow.keras import layers
import tensorflow_hub as hub

IMAGE_SHAPE = (224, 224)
classifier_model  = load_model(path)

classifier = tf.keras.Sequential([
    hub.KerasLayer(classifier_model, input_shape=IMAGE_SHAPE+(3,))
])

labels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())

path = <your path here>

batch_size = 64
img_height = 224
img_width = 224

train_ds = tf.keras.preprocessing.image_dataset_from_directory(
  path,
  validation_split=0.2,
  subset="training",
  seed=123,
  image_size=(img_height, img_width),
  batch_size=batch_size)
コード例 #13
0
def load_model(path,
               generator=None,
               augmenter=None,
               custom_objects=None,
               compile=True):
    """
    Load the model

    Example
    -------
    model = load_model('model.h5', augmenter)

    """
    if custom_objects:
        if isinstance(custom_objects, dict):
            base_objects = CUSTOM_LAYERS
            custom_objects = dict(
                list(base_objects.items()) + list(custom_objects.items()))
    else:
        custom_objects = CUSTOM_LAYERS

    if isinstance(path, str):
        if path.endswith(".h5") or path.endswith(".hdf5"):
            filepath = path
        else:
            raise ValueError("file must be .h5 file")
    else:
        raise TypeError("file must be type `str`")

    train_model = save.load_model(filepath,
                                  custom_objects=custom_objects,
                                  compile=compile)

    with h5py.File(filepath, "r") as h5file:
        train_generator_config = h5file.attrs.get("train_generator_config")
        if train_generator_config is None:
            raise ValueError("No data generator found in config file")
        train_generator_config = json.loads(
            train_generator_config.decode("utf-8"))["config"]

        model_config = h5file.attrs.get("pose_model_config")
        if model_config is None:
            raise ValueError("No pose model found in config file")
        model_name = json.loads(model_config.decode("utf-8"))["class_name"]
        model_config = json.loads(model_config.decode("utf-8"))["config"]

    if generator:
        signature = inspect.signature(TrainingGenerator.__init__)
        keys = [key for key in signature.parameters.keys()]
        keys.remove("self")
        keys.remove("augmenter")
        keys.remove("generator")
        kwargs = {key: train_generator_config[key] for key in keys}
        kwargs["augmenter"] = augmenter
        kwargs["generator"] = generator
        train_generator = TrainingGenerator(**kwargs)
    else:
        train_generator = None

    Model = MODELS[model_name]
    signature = inspect.signature(Model.__init__)
    keys = [key for key in signature.parameters.keys()]
    keys.remove("self")
    keys.remove("train_generator")
    if "kwargs" in keys:
        keys.remove("kwargs")
    kwargs = {key: model_config[key] for key in keys}
    kwargs["train_generator"] = train_generator

    # Pass to skip initialization and manually intialize
    kwargs["skip_init"] = True

    model = Model(**kwargs)
    model.train_model = train_model
    model.__init_train_model__()
    model.__init_input__(model_config["image_shape"])

    kwargs = {}
    kwargs["output_shape"] = model_config["output_shape"]
    kwargs["keypoints_shape"] = model_config["keypoints_shape"]
    kwargs["downsample_factor"] = model_config["downsample_factor"]
    kwargs["output_sigma"] = model_config["output_sigma"]
    model.__init_predict_model__(**kwargs)

    return model
コード例 #14
0
model.compile(loss=loss_function,
              optimizer=optimizer,
              metrics=['accuracy'])

# Start training
model.fit(
        train_datagen,
        epochs=no_epochs,
        shuffle=False)

# Save the model
filepath = './saved_model'
save_model(model, filepath)

# Save the model in h5 format
filepath = './saved_model/h5'
save_model(model, filepath, save_format='h5')

# load model
model = load_model("./saved_model/h5")

# Validate recognition
img = image.load_img('Banana.jpg', target_size = (25,25))
#img = image.load_img('apricot.jpg', target_size = (25,25))
array = image.img_to_array(img)
x = np.expand_dims(array, axis=0)
vimage = np.vstack([x])

img_classification = model.predict(vimage)
print(img_classification)
コード例 #15
0
ファイル: save_test.py プロジェクト: zhangzhg0508/tensorflow
 def test_save_load_hdf5_pathlib(self):
   path = pathlib.Path(self.get_temp_dir()) / 'model'
   save.save_model(self.model, path, save_format='h5')
   save.load_model(path)
コード例 #16
0
ファイル: agent.py プロジェクト: MarkusLund/snake
 def load_model(self, id, name):
     self.model = load_model(f'./models/{id}/{name}')
                                                        y_train,
                                                        test_size=0.1,
                                                        random_state=0)

    del train

    # 교차검증 학습
    train_model(x_train, y_train, k)

    del x_train, y_train

    # 학습된 모델 저장
    for n in range(k):
        model = load_model('models/model' + str(n) + '.h5',
                           custom_objects={
                               'score': score,
                               'fscore_keras': fscore_keras
                           })
        models.append(model)

    # 모델 성능 일반화
    preds = []
    for model in models:
        preds.append(model.predict(x_test))
        print(mae_over_fscore(y_test, preds[-1]))

    pred = sum(preds) / len(preds)
    print(mae_over_fscore(y_test, pred))

    print("here is rain comparision")
    # 강수량 비교( pred와 real의 강수량 차의 기본 통계량)
コード例 #18
0
def evaluate(model, X_v, y_v):
    'evaluating model performance'

    loss, accuracy = model.evaluate(X_v, y_v)
    print('Loss: %.2f' % (loss))
    print('Accuracy: %.2f' % (accuracy * 100))


if __name__ == "__main__":
    sample_training = "../dataset/fv_train_sample.npy"
    sample_validation = "../dataset/fv_validation_sample.npy"
    save_path = "../result/nn_model.h5"

    X, y = load_data(sample_training)
    X_v, y_v = load_data(sample_validation)

    # model = build_feedforward_nn(X, y, save_path)
    model = load_model(save_path)
    evaluate(model, X_v, y_v)
    pre_y = model.predict(X_v)
    pre_y = np.where(pre_y >= .5, 1, 0)
    cnf_matrix = metrics.confusion_matrix(y_v, pre_y)
    print("Confusion Matrix:\n")
    print(cnf_matrix)
    print("Accuracy:", metrics.accuracy_score(y_v, pre_y))
    print("Precision:",
          metrics.precision_score(y_v, pre_y, zero_division='warn'))
    print("Recall:", metrics.recall_score(y_v, pre_y, zero_division='warn'))
    exit()