Exemplo n.º 1
0
    def load_train(self, config: BuildModelConfig, model_config_path: str=None, model_weights_path: str=None):
        with open(model_config_path, "r", encoding='utf-8') as f:
            if config.use_crf:
                from tensorflow.keras_contrib.layers import CRF
                custom_objects = {'ReversedLSTM': ReversedLSTM, 'CRF': CRF}
                self.train_model = model_from_yaml(f.read(), custom_objects=custom_objects)
            else:
                custom_objects = {'ReversedLSTM': ReversedLSTM}
                self.train_model = model_from_yaml(f.read(), custom_objects=custom_objects)
        self.train_model.load_weights(model_weights_path)

        loss = {}
        metrics = {}
        if config.use_crf:
            out_layer_name = 'crf'
            offset = 0
            if config.use_pos_lm:
                offset += 2
            if config.use_word_lm:
                offset += 2
            loss[out_layer_name] = self.train_model.layers[-1-offset].loss_function
            metrics[out_layer_name] = self.train_model.layers[-1-offset].accuracy
        else:
            out_layer_name = 'main_pred'
            loss[out_layer_name] = 'sparse_categorical_crossentropy'
            metrics[out_layer_name] = 'accuracy'

        if config.use_pos_lm:
            prev_layer_name = 'shifted_pred_prev'
            next_layer_name = 'shifted_pred_next'
            loss[prev_layer_name] = loss[next_layer_name] = 'sparse_categorical_crossentropy'
            metrics[prev_layer_name] = metrics[next_layer_name] = 'accuracy'
        self.train_model.compile(Adam(clipnorm=5.), loss=loss, metrics=metrics)

        self.eval_model = Model(inputs=self.train_model.inputs, outputs=self.train_model.outputs[0])
Exemplo n.º 2
0
 def load_eval(self, config: BuildModelConfig, eval_model_config_path: str,
               eval_model_weights_path: str) -> None:
     with open(eval_model_config_path, "r", encoding='utf-8') as f:
         if config.use_crf:
             from tensorflow.keras_contrib.layers import CRF
             custom_objects = {'ReversedLSTM': ReversedLSTM, 'CRF': CRF}
             self.eval_model = model_from_yaml(f.read(), custom_objects=custom_objects)
         else:
             custom_objects = {'ReversedLSTM': ReversedLSTM}
             self.eval_model = model_from_yaml(f.read(), custom_objects=custom_objects)
     self.eval_model.load_weights(eval_model_weights_path)
     self.eval_model._make_predict_function()
def load_keras_model(arch_fpath, weights_fpath, custom_objects=None, n_gpus=0):
    """Load a Keras architecture and weights

    Parameters
    ----------
    arch_fpath: str
        Architecture saved as YAML file
    weights_fpath: str
        Weights saved as h5 file
    custom_objects: dicts
        Custom objects needed when loading the Keras model.
    n_gpus: int
        Number of gpus available to run prediction. Default 0.

    Returns
    -------
    model: keras.engine.training.Model
    """

    if not op.splitext(arch_fpath)[-1] == '.yaml':
        raise ValueError('Model filepath must have `.yaml` extension.')
    if not op.splitext(weights_fpath)[-1] == '.h5':
        raise ValueError('Weights filepath must have `.h5` extension.')

    with open(arch_fpath, "r") as yaml_file:
        yaml_architecture = yaml_file.read()

    if n_gpus > 1:
        # Load weights on CPU to avoid taking up GPU space
        with tf.device('/cpu:0'):
            template_model = model_from_yaml(yaml_architecture,
                                             custom_objects=custom_objects)
            template_model.load_weights(weights_fpath)

            for layer in template_model.layers:
                layer.trainable = False

        model = multi_gpu_model(template_model, gpus=n_gpus)
    # If on only 1 gpu (or cpu), train as normal
    else:
        model = model_from_yaml(yaml_architecture,
                                custom_objects=custom_objects)
        model.load_weights(weights_fpath)

        for layer in model.layers:
            layer.trainable = False

    return model
Exemplo n.º 4
0
def predict():
    yaml_file = open("../sensing_data/models/dnn_tf_1_1.yaml", 'r')
    loaded_model_yaml = yaml_file.read()
    yaml_file.close()
    dnn_pred = model_from_yaml(loaded_model_yaml)
    # load weights into new model
    dnn_pred.load_weights("../sensing_data/models/dnn_tf_1_1.h5")
    print("Loaded model from disk")

    dnn_pred.compile(loss='categorical_crossentropy',
                     optimizer='Adam',
                     metrics=['accuracy'])

    dnn_pred.summary()

    X, y, shape = data.load_prediction(ratio=1,
                                       normalize=False,
                                       osm_roads=False,
                                       split_struct=False,
                                       army_gt=False)

    normalizer = preprocessing.Normalizer().fit(X)
    X = normalizer.transform(X)

    y_pred = dnn_pred.predict(X)
    y_pred = [np.argmax(pred) for pred in tqdm(y_pred)]

    kappa = cohen_kappa_score(y - 1, y_pred)
    print(f'Kappa: {kappa}')
    print(classification_report(y - 1, y_pred))

    y_pred = np.array(y_pred)
    yr = y_pred.reshape(shape)

    viz.createGeotiff(OUT_RASTER, yr, REF_FILE, gdal.GDT_Byte)
Exemplo n.º 5
0
 def load(self, model_config_path: str, model_weights_path: str) -> None:
     with open(model_config_path, "r", encoding='utf-8') as f:
         self.model = model_from_yaml(f.read())
     self.model.load_weights(model_weights_path)
     self.char_layer = TimeDistributed(
         Model(self.model.input_layers[0].output,
               self.model.layers[-2].input))
Exemplo n.º 6
0
    def _fit(self, df):
        """Private fit method of the Estimator, which trains the model.
        """
        simple_rdd = df_to_simple_rdd(
            df,
            categorical=self.get_categorical_labels(),
            nb_classes=self.get_nb_classes(),
            features_col=self.getFeaturesCol(),
            label_col=self.getLabelCol())
        simple_rdd = simple_rdd.repartition(self.get_num_workers())
        keras_model = model_from_yaml(self.get_keras_model_config())
        metrics = self.get_metrics()
        loss = self.get_loss()
        optimizer = get_optimizer(self.get_optimizer_config())
        keras_model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

        spark_model = SparkModel(model=keras_model,
                                 mode=self.get_mode(),
                                 frequency=self.get_frequency(),
                                 num_workers=self.get_num_workers())
        spark_model.fit(simple_rdd,
                        epochs=self.get_epochs(),
                        batch_size=self.get_batch_size(),
                        verbose=self.get_verbosity(),
                        validation_split=self.get_validation_split())

        model_weights = spark_model.master_network.get_weights()
        weights = simple_rdd.ctx.broadcast(model_weights)
        return ElephasTransformer(
            labelCol=self.getLabelCol(),
            outputCol='prediction',
            keras_model_config=spark_model.master_network.to_yaml(),
            weights=weights,
            loss=loss)
Exemplo n.º 7
0
    def train(self, data_iterator):
        """Train a keras model on a worker
        """
        history = None
        optimizer = get_optimizer(self.master_optimizer)
        self.model = model_from_yaml(self.yaml, self.custom_objects)
        self.model.compile(optimizer=optimizer,
                           loss=self.master_loss,
                           metrics=self.master_metrics)
        self.model.set_weights(self.parameters.value)

        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        weights_before_training = self.model.get_weights()
        if x_train.shape[0] > self.train_config.get('batch_size'):
            history = self.model.fit(x_train, y_train, **self.train_config)
        weights_after_training = self.model.get_weights()
        deltas = subtract_params(weights_before_training,
                                 weights_after_training)
        if history:
            yield [deltas, history.history]
        else:
            yield [deltas, None]
Exemplo n.º 8
0
    def __init__(self, corpus_path, model_path, weights_path):
        with open(corpus_path, encoding='utf-16') as corpus_file:
            self.corpus =  corpus_file.read()

        # Get a unique identifier for each char in the corpus,
        # then make some dicts to ease encoding and decoding

        self.chars = sorted(list(set(self.corpus)))
        self.encoding = {c: i for i, c in enumerate(self.chars)}
        self.decoding = {i: c for i, c in enumerate(self.chars)}

        print(len(self.chars))

        print(self.chars)



        # Some fields we'll need later
        self.num_chars = len(self.chars)
        self.sentence_length = 50
        self.corpus_length = len(self.corpus)

        # Build our network from loaded architecture and weights
        with open(model_path) as model_file:
            architecture = model_file.read()

        self.model = model_from_yaml(architecture)
        self.model.load_weights(weights_path)
        self.model.compile(loss='categorical_crossentropy', optimizer='adam')
Exemplo n.º 9
0
    def load(self):
        log.info("loading project %s ..." % self.path)

        if not self.exists():
            return "%s does not exist" % self.path

        err = self.logic.load()
        if err is not None:
            return err

        if os.path.exists(self.weights_path):
            log.debug("loading model from %s ...", self.weights_path)
            self.model = load_model(self.weights_path)
            # https://github.com/keras-team/keras/issues/6462
            self.model.make_predict_function()

        elif os.path.exists(self.model_path):
            log.debug("loading model from %s ...", self.model_path)
            with open(self.model_path, 'r') as fp:
                self.model = model_from_yaml(fp.read())

        if os.path.exists(self.history_path):
            log.debug("loading history from %s ...", self.history_path)
            with open(self.history_path, 'r') as fp:
                self.history = json.load(fp)

        if os.path.exists(self.classes_path):
            log.debug("loading classes from %s ...", self.classes_path)
            with open(self.classes_path, 'r') as fp:
                self.classes = {int(k): v for k, v in json.load(fp).items()}

        return None
Exemplo n.º 10
0
    def load_model(self):
        print("Load model...")
        # open model file.
        model_path = self.yml["Modelsetting"]["model_path"]
        if "json" in model_path:
            model = model_from_json(open(model_path).read())
        elif "yaml" in model_path:
            model = model_from_yaml(open(model_path).read())
        else:
            model = load_model(model_path)

        # Read the weight of the model.
        if self.yml["Modelsetting"]["retrain_model"]:
            print("Load model weight.")
            model.load_weights(self.yml["Modelsetting"]["weight_path"])

        # model freeze?
        if not self.yml["Modelsetting"]["trainable"]:
            model.trainable = False

        # model compile. summury.
        model.compile(loss=self.set_modelloss(self.yml["Modelsetting"]["model_loss"]),
                      optimizer=self.set_optimizers(self.yml["Modelsetting"]["optimizers"],
                                                    self.yml["Trainsetting"]["learnrate"]),
                      metrics=["accuracy"])
        print("compile ok. summmury")
        # Display the results of the compiled model.
        model.summary()

        return model
Exemplo n.º 11
0
 def extract_features_and_predict(model_yaml: str, custom_objects: dict,
                                  features_col: str,
                                  model_type: ModelType, data):
     model = model_from_yaml(model_yaml, custom_objects)
     model.set_weights(weights.value)
     predict_function = determine_predict_function(model, model_type)
     return predict_function(
         np.stack([from_vector(x[features_col]) for x in data]))
Exemplo n.º 12
0
    def load_model_from_yaml(self, file_name):

        yaml_model = open(file_name, 'r')
        loaded_model_yaml = yaml_model.read()
        yaml_model.close()
        loaded_model = model_from_yaml(loaded_model_yaml)

        return loaded_model
Exemplo n.º 13
0
 def _evaluate(model, optimizer, loss, custom_objects, metrics, kwargs, data_iterator):
     model = model_from_yaml(model, custom_objects)
     model.compile(optimizer, loss, metrics)
     model.set_weights(weights.value)
     feature_iterator, label_iterator = tee(data_iterator, 2)
     x_test = np.asarray([x for x, y in feature_iterator])
     y_test = np.asarray([y for x, y in label_iterator])
     return [model.evaluate(x_test, y_test, **kwargs)]
Exemplo n.º 14
0
def loadKerasModel(filepath, string_model=None, format_export='json'):
    if string_model is None:
        with open(filepath, 'r') as file:
            string_model = file.read()
    if format_export == 'json':
        model = model_from_json(string_model)
    else:
        model = model_from_yaml(string_model)
    return model
Exemplo n.º 15
0
def loadModel(yamlPathName, h5PathName):
    with open(yamlPathName + '.yaml', 'r') as yaml_file:
        loaded_model_yaml = yaml_file.read()
        loaded_model = model_from_yaml(loaded_model_yaml)
        loaded_model.load_weights(h5PathName + '.h5')
    loaded_model.compile(loss='binary_crossentropy',
                         optimizer='adam',
                         metrics=['accuracy'])
    return loaded_model
Exemplo n.º 16
0
    def train(self, data_iterator):
        """Train a keras model on a worker and send asynchronous updates
        to parameter server
        """
        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        if x_train.size == 0:
            return

        self.model = model_from_yaml(self.yaml, self.custom_objects)
        self.model.compile(optimizer=get_optimizer(self.master_optimizer),
                           loss=self.master_loss,
                           metrics=self.master_metrics)
        self.model.set_weights(self.parameters.value)

        epochs = self.train_config['epochs']
        batch_size = self.train_config.get('batch_size')
        nb_train_sample = x_train.shape[0]
        nb_batch = int(np.ceil(nb_train_sample / float(batch_size)))
        index_array = np.arange(nb_train_sample)
        batches = [(i * batch_size, min(nb_train_sample, (i + 1) * batch_size))
                   for i in range(0, nb_batch)]

        if self.frequency == 'epoch':
            for epoch in range(epochs):
                weights_before_training = self.client.get_parameters()
                self.model.set_weights(weights_before_training)
                self.train_config['epochs'] = 1
                if x_train.shape[0] > batch_size:
                    self.model.fit(x_train, y_train, **self.train_config)
                self.train_config['epochs'] = epochs
                weights_after_training = self.model.get_weights()
                deltas = subtract_params(weights_before_training,
                                         weights_after_training)
                self.client.update_parameters(deltas)
        elif self.frequency == 'batch':
            for epoch in range(epochs):
                if x_train.shape[0] > batch_size:
                    for (batch_start, batch_end) in batches:
                        weights_before_training = self.client.get_parameters()
                        self.model.set_weights(weights_before_training)
                        batch_ids = index_array[batch_start:batch_end]
                        x = slice_arrays(x_train, batch_ids)
                        y = slice_arrays(y_train, batch_ids)
                        self.model.train_on_batch(x, y)
                        weights_after_training = self.model.get_weights()
                        deltas = subtract_params(weights_before_training,
                                                 weights_after_training)
                        self.client.update_parameters(deltas)
        else:
            raise ValueError(
                'frequency parameter can be `epoch` or `batch, got {}'.format(
                    self.frequency))
        yield []
Exemplo n.º 17
0
def loadModel(MODEL_NAME):
    print('Loading Model..')
    model = model_from_yaml(open(MODEL_NAME + '.yaml').read())
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.load_weights(MODEL_NAME + '.h5')
    model.summary()
    print('Done')
    return model
Exemplo n.º 18
0
def load_model():
    y_fil = os.path.join(MODEL_FOLDER, GENERATOR_YAML)
    w_fil = os.path.join(MODEL_FOLDER, GENERATOR_H5)
    with open(y_fil, 'r') as yf:
        yaml = yf.read()

    model = model_from_yaml(yaml)
    model.load_weights(w_fil)

    return model
def load(path):

    output_dir = Path(path)
    with open(output_dir / 'model_config.yaml') as file:
        model_config = file.read()

    model = model_from_yaml(model_config)
    model.load_weights(output_dir / "model.h5")

    return model
def load_model(input_model_path, input_json_path=None, input_yaml_path=None):
    if not Path(input_model_path).exists():
        raise FileNotFoundError('Model file `{}` does not exist.'.format(input_model_path))
    try:
        # model = keras.models.load_model(input_model_path, compile=False)
        with open(input_json_path) as json_file:
            json_config = json_file.read()
            model = tf.keras.models.model_from_json(json_config, custom_objects={'tf': tf})

            # Load weights
            model.load_weights(input_model_path)
            return model
    except FileNotFoundError as err:
        logging.error('Input mode file (%s) does not exist.', FLAGS.input_model)
        raise err
    except ValueError as wrong_file_err:
        if input_json_path:
            if not Path(input_json_path).exists():
                raise FileNotFoundError(
                    'Model description json file `{}` does not exist.'.format(
                        input_json_path))
            try:
                model = model_from_json(open(str(input_json_path)).read())
                model.load_weights(input_model_path)
                return model
            except Exception as err:
                logging.error("Couldn't load model from json.")
                raise err
        elif input_yaml_path:
            if not Path(input_yaml_path).exists():
                raise FileNotFoundError(
                    'Model description yaml file `{}` does not exist.'.format(
                        input_yaml_path))
            try:
                model = model_from_yaml(open(str(input_yaml_path)).read())
                model.load_weights(input_model_path)
                return model
            except Exception as err:
                logging.error("Couldn't load model from yaml.")
                raise err
        else:
            logging.error(
                'Input file specified only holds the weights, and not '
                'the model definition. Save the model using '
                'model.save(filename.h5) which will contain the network '
                'architecture as well as its weights. '
                'If the model is saved using the '
                'model.save_weights(filename) function, either '
                'input_model_json or input_model_yaml flags should be set to '
                'to import the network architecture prior to loading the '
                'weights. \n'
                'Check the keras documentation for more details '
                '(https://keras.io/getting-started/faq/)')
            raise wrong_file_err
Exemplo n.º 21
0
def load_model(path="results/"):

    yaml_file = open(path + 'model.yaml', 'r')
    loaded_model_yaml = yaml_file.read()
    yaml_file.close()
    loaded_model = models.model_from_yaml(loaded_model_yaml)
    # load weights into new model
    loaded_model.load_weights(
        "machine_learning/models/pl_sb_model/model_saved/model.h5")
    print("Loaded model from disk")
    return loaded_model
Exemplo n.º 22
0
def get_model():
    print('loading model......')
    #从yaml中恢复LSTM模型配置
    with open('../model/lstm.yml', 'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)
    #model = build_model()
    model.load_weights('../model/lstm.h5')
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Exemplo n.º 23
0
def load_from_yaml_with_weights(file_path,
                                weight_path=None,
                                custom_objects=None,
                                **kwargs):
    '''
    '''
    if weight_path is None:
        file_path, weight_path = find_architecture_and_weight_paths(file_path)
    with open(file_path, 'r') as fin:
        model = model_from_yaml(fin.read(), custom_objects=custom_objects)
    model.load_weights(weight_path)
    return model
Exemplo n.º 24
0
def load_model(model_fpath, weights_fpath):
    """Load a model from yaml architecture and h5 weights."""
    assert model_fpath[-5:] == '.yaml'
    assert weights_fpath[-3:] == '.h5'

    with open(model_fpath, "r") as yaml_file:
        yaml_architecture = yaml_file.read()

    model = model_from_yaml(yaml_architecture)
    model.load_weights(weights_fpath)

    return model
Exemplo n.º 25
0
 def extract_features_and_predict(model_yaml: str,
                                  custom_objects: dict,
                                  features_col: str,
                                  data,
                                  inference_batch_size: int = None):
     model = model_from_yaml(model_yaml, custom_objects)
     model.set_weights(weights.value)
     if inference_batch_size is not None and inference_batch_size > 0:
         return batched_prediction(data, inference_batch_size,
                                   features_col, model.predict)
     else:
         return model.predict(
             np.array([from_vector(x[features_col]) for x in data]))
Exemplo n.º 26
0
def load_model(model_fpath, weights_fpath):
    """Load a model from yaml architecture and h5 weights."""
    assert model_fpath[-5:] == '.yaml'
    assert weights_fpath[-3:] == '.h5'

    with open(model_fpath, "r") as yaml_file:
        yaml_architecture = yaml_file.read()

    model = model_from_yaml(yaml_architecture)
    # Issue when using pyyaml 5.3.1, downgrading to 3.13
    model.load_weights(weights_fpath)

    return model
Exemplo n.º 27
0
    def load_data():
        d = load("labelencoder1_X.joblib1.dat")
        yaml_file = open('model.yaml', 'r')
        loaded_model_yaml1 = yaml_file.read()
        yaml_file.close()
        loaded_model2 = model_from_yaml(loaded_model_yaml1)
        # load weights into new model
        loaded_model2.load_weights("model.h5")
        print("Loaded model from disk")
        test_case = load("test_case1.joblib1.dat")
        test_case = array(test_case).reshape(1, 1, 6)

        return loaded_model2, test_case
Exemplo n.º 28
0
    def reload_model(self):
        K.clear_session()

        if os.path.exists(self.weights_path):
            self.model = load_model(self.weights_path)
            # https://github.com/keras-team/keras/issues/6462
            self.model._make_predict_function()
        elif os.path.exists(self.model_path):
            with open(self.model_path, 'r') as fp:
                self.model = model_from_yaml(fp.read())
        else:
            self.model = self.logic.builder(True)

        gc.collect()
Exemplo n.º 29
0
    def loadModels(cls, filename, type):
        # load json and create model
        if type == 'json':
            json_file = open(filename, 'r')
            loaded_model_json = json_file.read()
            json_file.close()
            loaded_model = model_from_json(loaded_model_json)
        elif type == 'yaml':
            yaml_file = open(filename, 'r')
            loaded_model_yaml = yaml_file.read()
            yaml_file.close()
            loaded_model = model_from_yaml(loaded_model_yaml)

        return loaded_model
def lstm_predict(string):
    print('loading model......')
    with open('./store/lstm.yml', 'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)

    print('loading weights......')
    model.load_weights('./store/lstm.h5')
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    data = input_transform(string)
    data.reshape(1, -1)
    result = model.predict_classes(data)
    return result