예제 #1
0
def load_model_from_file(model_path):
    json_file = open(model_path, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    print("Loaded model from disk")
    return loaded_model
예제 #2
0
파일: _trvae.py 프로젝트: theislab/trVAE
    def restore_model_config(self, compile=True):
        """
            restores model config from ``model_path``.

            Parameters
            ----------
            compile: bool
                if ``True`` will compile model after restoring its config.

            Returns
            -------
            ``True`` if the model config has been successfully restored.
            ``False`` if `model_path` is invalid or the model config couldn't be found in the specified ``model_path``.
        """
        if os.path.exists(os.path.join(self.model_path, f"{self.model_name}.json")):
            json_file = open(os.path.join(self.model_path, f"{self.model_name}.json"), 'rb')
            loaded_model_json = json_file.read()
            self.cvae_model = model_from_json(loaded_model_json)
            self.encoder_model = self.cvae_model.get_layer("encoder")
            self.decoder_model = self.cvae_model.get_layer("decoder")

            if compile:
                self.compile_models()

            print(f"{self.model_name}'s network's config has been successfully restored!")
            return True
        else:
            return False
예제 #3
0
def up_down_test():
    json_file = open(model_path + 'lstm_model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)
    model.load_weights(model_path + "lstm_model.h5")
    print("=======Loaded model ok========")
    predicted = model.predict(test_x)

    print("===========test==========")
    # max_error = [0.2, 0.15, 0.1, 0.08, 0.05, 0.03, 0.02, 0.01]
    max_error = [0.08]
    for error_rate in max_error:
        c = 0
        a = 0
        print("===========test==========", error_rate)
        for i in range(len(predicted)):
            if (predicted[i] - test_y[i]) / test_y[i] < error_rate:
                c += 1
            a += 1
        print("=====accuracy=====", str(c / a))
    fig = plt.figure()
    plt.plot(predicted[0:100], color='r', label='predicted_data')
    plt.plot(test_y[0:100], color='b', label='real_data')
    plt.legend()
    plt.show()
    fig.savefig('temp.png')
 def __init__(self):
     """Implementation of initialization"""
     # load json and create model
     self.model = None
     if os.path.exists('Models/BiLSTM_Glove_de_identification_model.json'):
         json_file = open(
             'Models/BiLSTM_Glove_de_identification_model.json', 'r')
         loaded_model_json = json_file.read()
         json_file.close()
         self.model = model_from_json(loaded_model_json)
     self.GLOVE_DIR = "Resources/"
     if os.path.isdir(self.GLOVE_DIR) == False or os.path.isfile(
             self.GLOVE_DIR + "glove.840B.300d.txt") == False:
         if os.path.exists(self.GLOVE_DIR) == False:
             os.mkdir(self.GLOVE_DIR)
         print('Beginning file download with urllib2...')
         url = 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
         urllib.request.urlretrieve(url,
                                    self.GLOVE_DIR + 'glove.840B.300d.zip')
         with ZipFile(self.GLOVE_DIR + 'glove.840B.300d.zip',
                      'r') as zipObj:
             # Extract all the contents of zip file in current directory
             zipObj.extractall(self.GLOVE_DIR)
         os.remove(self.GLOVE_DIR + "glove.840B.300d.zip")
     # load weights into new model
     self.model.load_weights(
         "Models/BiLSTM_Glove_de_identification_model.h5")
     print("Loaded model from disk")
     self.model.compile(loss='categorical_crossentropy',
                        optimizer='rmsprop',
                        metrics=['accuracy'])
     self.word_index = pickle.load(open("Models/word_index.pkl", "rb"))
     self.MAX_SEQUENCE_LENGTH = 200
     self.EMBEDDING_DIM = 300
     self.MAX_NB_WORDS = 2200000
예제 #5
0
def test_process():
    modelname_text = open("model.json").read()
    json_strings = modelname_text.split('##########')
    textlist = json_strings[1].replace("[",
                                       "").replace("]",
                                                   "").replace("\'",
                                                               "").split()
    model = model_from_json(json_strings[0])
    model.load_weights("last.hdf5")  # best.hdf5 で損失最小のパラメータを使用

    x_test, y_test, filenames = load_images(TEST_DIR)

    preds = model.predict(x_test, batch_size=1, verbose=0)

    collect = 0
    for i, pred in enumerate(preds):
        result = np.argmax(pred)
        if y_test[i][result] == 1:
            collect += 1
        print(filenames[i])
        print(str(pred))
        print(">> 「" + CLASSES[result] + "」")

    print(f"試験数: {len(y_test)}")
    print(f"正解数: {collect}")
    print(f"正解率: {collect / len(y_test)}")
    return result
예제 #6
0
def image2vect():
    json_file = open(datadir + 'ckplus.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    ckplus = model_from_json(loaded_model_json)

    # load weights into new model
    ckplus.load_weights(datadir + 'ckplus.h5')
    # ckplus.summary()

    layer_name = 'flatten_1'
    intermediate_layer_model = Model(
        input=ckplus.input, output=ckplus.get_layer(layer_name).output)
    print(ckplus.input)
    x_fname = datadir + 'x_train.npy'
    x_train = np.load(x_fname)

    videos = []
    i = 0
    for video in x_train:
        print(i)
        images = []
        for image in video:
            resized = (np.moveaxis(image, -1, 0)).reshape((1, 1, 48, 48))
            # print(resized.shape)
            vector = intermediate_layer_model.predict(resized)
            # print(vector.shape)
            images.append(vector.reshape(4608))
        videos.append(images)
        # print(np.copy(videos).shape)
        i = i + 1
    videos = np.copy(videos)
    print(videos.shape)

    np.save(datadir + 'x_train_vec', videos)
예제 #7
0
def load_from_file(name):
    json_file = open(name + ".json", 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights(name + ".h5")
    print("Loaded model from disk")
예제 #8
0
파일: net.py 프로젝트: netflymachine/SCNET
 def load(self, model_path, type='json'):
     if type == 'json':
         # save as JSON
         self.model = model_from_json(open(model_path + ".json").read())
         # model_main.trainable = False
         self.model.load_weights(model_path + ".h5")
     else:
         self.model.load_model(model_path + ".h5")
예제 #9
0
def load_models():
    json = open('MobileNet.json', 'r')
    model = json.read()
    json.close()
    model = model_from_json(model)
    model.load_weights('DMNfullmodel.h5')
    model_emotions = load_model('distilled_model.h5', compile=False)
    return model, model_emotions
def loadModel(name):
    json_file = open(name + '.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)
    # load weights into new model
    model.load_weights(name + ".h5")

    return model
예제 #11
0
def load_model():
    json_file = open("model.json", 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights("weights.h5")
    loaded_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return loaded_model
예제 #12
0
def load_model():
    json_file = open('model/embedding_v3.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights("model/embedding_v3.h5")
    print("Loaded model from disk")
    return loaded_model
예제 #13
0
def load_model_from_file(name):
    with open(name + ".json", 'r') as f:
        model = model_from_json(f.read())
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    model.load_weights(name + ".h5")
    return model
def test(test_img_path, test_dm_path):
    test_x = gen_var_from_paths(test_img_path[:], unit_len=None)
    test_y = gen_var_from_paths(test_dm_path[:], stride=8, unit_len=None)
    test_x = norm_by_imagenet(test_x)

    dataset = 'A'
    net = 'CSRNet'

    dis_idx = 16 if dataset == 'B' else 0
    weights_dir_neo = 'D:/Crowd Web/crowd_web/upload/weights_A_MSE_bestMAE294.332_Sun-Jul-14'
    model = model_from_json(open('models/{}.json'.format(net), 'r').read())
    model.load_weights(os.path.join(weights_dir_neo, '{}_best.hdf5'.format(net)))
    ct_preds = []
    ct_gts = []

    for i in range(len(test_x[:])):
        if i % 100 == 0:
            print('{}/{}'.format(i, len(test_x)))
        i += 0
        test_x_display = np.squeeze(test_x[i])
        test_y_display = np.squeeze(test_y[i])
        path_test_display = test_img_path[i]
        pred = np.squeeze(model.predict(np.expand_dims(test_x_display, axis=0)))
        ct_pred = np.sum(pred)
        ct_gt = round(np.sum(test_y_display))
        ct_preds.append(ct_pred)
        ct_gts.append(ct_gt)

    plt.plot(ct_preds, 'r>')
    plt.plot(ct_gts, 'b+')
    plt.legend(['ct_preds', 'ct_gts'])
    plt.title('Pred vs GT')
    plt.show()
    error = np.array(ct_preds) - np.array(ct_gts)
    plt.plot(error)
    plt.title('Pred - GT, mean = {}, MAE={}'.format(
        str(round(np.mean(error), 3)),
        str(round(np.mean(np.abs(error)), 3))
    ))
    plt.show()
    idx_max_error = np.argsort(np.abs(error))[::-1]

    # Show the 5 worst samples
    for worst_idx in idx_max_error[:5].tolist() + [dis_idx]:
        test_x_display = np.squeeze(test_x[worst_idx])
        test_y_display = np.squeeze(test_y[worst_idx])
        path_test_display = test_img_path[worst_idx]
        pred = np.squeeze(model.predict(np.expand_dims(test_x_display, axis=0)))
        fg, (ax_x_ori, ax_y, ax_pred) = plt.subplots(1, 3, figsize=(20, 4))
        ax_x_ori.imshow(cv2.cvtColor(cv2.imread(path_test_display), cv2.COLOR_BGR2RGB))
        ax_x_ori.set_title('Original Image')
        ax_y.imshow(test_y_display, cmap=plt.cm.jet)
        ax_y.set_title('Ground_truth: ' + str(np.sum(test_y_display)))
        ax_pred.imshow(pred, cmap=plt.cm.jet)
        ax_pred.set_title('Prediction: ' + str(np.sum(pred)))
        plt.show()
예제 #15
0
파일: test_train.py 프로젝트: mysilver/PhD
def define_models(num_encoder_tokens, num_decoder_tokens):
    # Define an input sequence and process it.
    encoder_inputs = Input(
        shape=(None, num_encoder_tokens))  # num_encoder_tokens = 300
    encoder = LSTM(latent_dim, return_state=True)
    encoder_outputs, state_h, state_c = encoder(encoder_inputs)
    # We discard `encoder_outputs` and only keep the states.
    encoder_states = [state_h, state_c]

    # Set up the decoder, using `encoder_states` as initial state.
    decoder_inputs = Input(shape=(None, num_decoder_tokens))  # 3000
    # We set up our decoder to return full output sequences,
    # and to return internal states as well. We don't use the
    # return states in the training model, but we will use them in inference.
    decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
    decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
                                         initial_state=encoder_states)
    decoder_dense = Dense(num_decoder_tokens, activation='softmax')
    decoder_outputs = decoder_dense(decoder_outputs)

    # Define the model that will turn
    # `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
    model = Model([encoder_inputs, decoder_inputs], decoder_outputs)

    # Run training
    try:
        json_file = open('check-points/model.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        model = model_from_json(loaded_model_json)
        model.load_weights("check-points/best-epoch.hdf5")
    except:
        print('No saved model')

    try:
        model = multi_gpu_model(model, cpu_relocation=True)
        print("Training using multiple GPUs..")
    except:
        print("Training using single GPU or CPU..")

    with open("check-points/model.json", "w") as json_file:
        json_file.write(model.to_json())
    # Define sampling models
    encoder_model = Model(encoder_inputs, encoder_states)

    decoder_state_input_h = Input(shape=(latent_dim, ))
    decoder_state_input_c = Input(shape=(latent_dim, ))
    decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
    decoder_outputs, state_h, state_c = decoder_lstm(
        decoder_inputs, initial_state=decoder_states_inputs)
    decoder_states = [state_h, state_c]
    decoder_outputs = decoder_dense(decoder_outputs)
    decoder_model = Model([decoder_inputs] + decoder_states_inputs,
                          [decoder_outputs] + decoder_states)

    return model, encoder_model, decoder_model
예제 #16
0
def load_model(project_path, model_name):
    # load json and create model
    json_file = open(os.path.join(project_path, '%s.json' % model_name), 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights(os.path.join(project_path, '%s.h5' % model_name))
    print("Loaded model from disk")
    return loaded_model
예제 #17
0
def load_model(path_src):
    json_file = open(path_src + ".json", 'r')
    model_json = json_file.read()
    model = model_from_json(model_json)
    model.load_weights(path_src + ".h5")
    model._make_predict_function()
    model.compile(loss='categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])
    return model
예제 #18
0
 def predict(self, test_x):
     test_x = self.getData(test_x)
     if not self.model:
         json_file = open('lstm_model.json', 'r')
         loaded_model_json = json_file.read()
         json_file.close()
         self.model = model_from_json(loaded_model_json)
         self.model.load_weights("lstm_model.h5")
     predicted = self.model.predict(test_x)
     return predicted
예제 #19
0
def decoder_hook(d):
    if d.get('__enum__'):
        name, member = d["__enum__"].split(".")
        return getattr(PUBLIC_ENUMS[name], member)
    if d.get('__sequential__'):
        return model_from_json(d["__sequential__"])
    if d.get('__sequential__'):
        return dp.parse(d.get('__isoformat__'))
    else:
        return d
예제 #20
0
def readModelFromJSON():
    json_file = open('./model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()

    model = model_from_json(loaded_model_json)
    model.load_weights("./model.h5")
    model.summary()
    print("Model okundu.")
    return model
예제 #21
0
파일: run.py 프로젝트: 0xballistics/wamdare
def load_model(model_name):
    save_dir = config.MODEL_SAVE_DIR
    json_file = open('{}/{}.json'.format(save_dir, model_name), 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights("{}/{}.h5".format(save_dir, model_name))

    return loaded_model
예제 #22
0
def test_lstm(x):
    json_file = open('lstm_model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)
    model.load_weights("lstm_model.h5")
    print("=======Loaded model ok========")
    predicted = model.predict(x)
    print("=====predict as======", predicted)
    print("=====previous =======", x[-1][-1])
예제 #23
0
def plot_weight(crime_type):
    # 学習済みのモデルを読み込み
    model_2014 = model_from_json(
        open('result/' + crime_type + '2014.json', 'r').read())
    model_2015 = model_from_json(
        open('result/' + crime_type + '2015.json', 'r').read())
    model_2016 = model_from_json(
        open('result/' + crime_type + '2016.json', 'r').read())
    model_2017 = model_from_json(
        open('result/' + crime_type + '2017.json', 'r').read())
    model_2018 = model_from_json(
        open('result/' + crime_type + '2018.json', 'r').read())
    # 重みを読み込む
    model_2014.load_weights('result/' + crime_type + '2014.hdf5')
    model_2015.load_weights('result/' + crime_type + '2015.hdf5')
    model_2016.load_weights('result/' + crime_type + '2016.hdf5')
    model_2017.load_weights('result/' + crime_type + '2017.hdf5')
    model_2018.load_weights('result/' + crime_type + '2018.hdf5')
    # 入力層の重みを抽出
    w_2014 = model_2014.layers[0].get_weights()[0]
    w_2015 = model_2015.layers[0].get_weights()[0]
    w_2016 = model_2016.layers[0].get_weights()[0]
    w_2017 = model_2017.layers[0].get_weights()[0]
    w_2018 = model_2018.layers[0].get_weights()[0]
    # 入力層の重みをプロットする
    fp = FontProperties(fname=r'C:\WINDOWS\Fonts\msgothic.ttc', size=14)
    fig = pyplot.figure(figsize=(12, 6))
    pyplot.plot((w_2014**2).mean(axis=1), 'o-', label='2014')
    pyplot.plot((w_2015**2).mean(axis=1), '^-', label='2015')
    pyplot.plot((w_2016**2).mean(axis=1), 's-', label='2016')
    pyplot.plot((w_2017**2).mean(axis=1), 'x-', label='2017')
    pyplot.plot((w_2018**2).mean(axis=1), 'D-', label='2018')
    pyplot.xlabel('入力ユニット', fontproperties=fp)
    pyplot.ylabel('重みの平均値', fontproperties=fp)
    pyplot.legend(prop=fp)
    pyplot.show()
    fig.savefig('result/weight.svg',
                dpi=300,
                facecolor='None',
                edgecolor='None',
                transparent=True,
                format='svg')
예제 #24
0
def init(model):
    json = 'models/bin/' + model + '.json'
    h5 = 'models/bin/' + model + '.h5'
    json_file = open(json, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    loaded_model.load_weights(h5)
    loaded_model.compile(loss='mean_squared_error', optimizer='adam')
    graph = get_default_graph()
    return loaded_model, graph
예제 #25
0
 def load_model(self, model_json_file, model_weights_file):
     json_file = open(model_json_file, 'r')
     loaded_model_json = json_file.read()
     json_file.close()
     loaded_model = model_from_json(loaded_model_json)
     loaded_model.load_weights(model_weights_file)
     optimizer = RMSprop(lr=0.0001, clipvalue=1.0)
     loaded_model.compile(loss='categorical_crossentropy',
                          optimizer=optimizer)
     loaded_model.summary()
     return loaded_model
예제 #26
0
def build_model():
    # load json
    json_file = open(BACKUP_PATH + "/" + filename + '.json', 'r')
    model_json = json_file.read()
    json_file.close()

    # load weights
    new_model = model_from_json(model_json)
    new_model.load_weights(BACKUP_PATH + "/" + filename + ".h5")

    return new_model
예제 #27
0
 def load_model(self, object_class, core_width, core_height):
     file_name = "class" + str(object_class) + str(core_width) + str(
         core_height)
     json_file = open(
         'networks/' + self.data_folder + "/" + file_name + '.json', 'r')
     loaded_model_json = json_file.read()
     json_file.close()
     loaded_model = model_from_json(loaded_model_json)
     loaded_model.load_weights('networks/' + self.data_folder + "/" +
                               file_name + '.h5')
     return loaded_model
예제 #28
0
def load_model(file_name="model"):
    # load json and create model
    json_file = open(file_name + '.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights(file_name + '.h5')
    print("Loaded model from disk")

    return loaded_model
예제 #29
0
def import_model(path="model.json", path_weights="model.h5", path_history="history.json"):
    # load json and create model
    json_file = open(path, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights(path_weights)
    loaded_model_history = json.load(open(path_history, 'r'))
    print("Loaded model from disk")
    return loaded_model, loaded_model_history
예제 #30
0
 def load(self):
     model_structure_path = os.path.join(self.path_to_weights_folder, 'model.json')
     model_weights_path = os.path.join(self.path_to_weights_folder, 'weights.hd5')
     scaler_path = os.path.join(self.path_to_weights_folder, 'scaler.pkl')
     with open(model_structure_path, 'r') as f:
         model_json = json.load(f)
     self.model = model_from_json(model_json)
     self.model.load_weights(model_weights_path)
     self.model.compile(loss='mean_squared_error', optimizer='adam')
     with open(scaler_path, 'rb') as f:
         self.scaler = pickle.load(f)