Пример #1
0
def nn_rd_model(nndir,name):
    # ---   Load the model   ----------
    jsonfile = os.path.join(nndir,name+".json")
    h5file = os.path.join(nndir,name+".h5")

    # load json and create model
    json_file = open(jsonfile, 'r')
    model_json = json_file.read()
    json_file.close()
    model = model_from_json(model_json)
    # load weights into new model
    model.load_weights(h5file)
    print("Loaded model from disk")
    
    return model    
    
    
Пример #2
0
# In[276]:

#model4_json = model4.to_json()
#with open("model4.json", "w") as json_file:
#    json_file.write(model4_json)
#model4.save_weights("model4.h5")

# Reading the models again

# In[11]:

json_file = open("model1.json", "r")
loaded_model_json = json_file.read()
json_file.close()
model1_load = model_from_json(loaded_model_json)
model1_load.load_weights("model1.h5")

json_file = open("model2.json", "r")
loaded_model_json = json_file.read()
json_file.close()
model2_load = model_from_json(loaded_model_json)
model2_load.load_weights("model2.h5")

json_file = open("model3.json", "r")
loaded_model_json = json_file.read()
json_file.close()
model3_load = model_from_json(loaded_model_json)
model3_load.load_weights("model3.h5")

json_file = open("model4.json", "r")
Пример #3
0
from tensorflow.keras.models import model_from_json

import numpy as np

json_file = open("alphabet_ASL_Model.json", "r")
loaded_json_model = json_file.read()
json_file.close()

model = model_from_json(loaded_json_model)
model.load_weights("alphabet_ASL_Model_weights.h5")

labels = list("ABCDEFGHIJ")


def image_predict(image):
    return labels[np.argmax(model.predict(image))]
Пример #4
0
from tensorflow.keras.models import model_from_json
import os
import cv2
import numpy as np
import re

# Load model from JSON
with open("models/cnn-simple-model.json", 'r') as json_file:
    loaded_model_json = json_file.read()

model = model_from_json(loaded_model_json)

# Load weights into model
model_list = sorted([
    model for model in os.listdir("models")
    if model.startswith("cnn-simple-model-") and model.endswith('.h5')
],
                    key=lambda x: int(re.search(r'\d+', x).group(0)))
print(model_list)
print("Loading model weights: {}".format(model_list[-1]))
model.load_weights("models/" + model_list[-1])

test_images_names = []
test_images = []

for test_image in sorted(os.listdir("data/test"),
                         key=lambda x: int(re.search(r'\d+', x).group(0))):
    img = cv2.imread("data/test/" + test_image, 0)
    img = cv2.resize(img, (150, 150))
    img = np.expand_dims(img, axis=2)
    test_images.append(img * 1. / 255)
Пример #5
0
    TCN(nb_filters=12, dropout_rate=0.5, kernel_size=6, dilations=[1, 2, 4]),
    Dense(units=1, activation='sigmoid')
])

# get model as json string and save to file
model_as_json = model.to_json()
with open('model.json', "w") as json_file:
    json_file.write(model_as_json)
# save weights to file (for this format, need h5py installed)
model.save_weights('weights.h5')

# Make inference.
inputs = np.ones(shape=(1, 100))
out1 = model.predict(inputs)[0, 0]
print('*' * 80)
print('Inference after creation:', out1)

# load model from file
loaded_json = open('model.json', 'r').read()
reloaded_model = model_from_json(loaded_json, custom_objects={'TCN': TCN})

tcn_full_summary(model, expand_residual_blocks=False)

# restore weights
reloaded_model.load_weights('weights.h5')

# Make inference.
out2 = reloaded_model.predict(inputs)[0, 0]
print('*' * 80)
print('Inference after loading:', out2)
scores = model.evaluate(np.array(x_test),
                        np.array(y_test),
                        batch_size=batch_size)
print("Acurácia: " + str(scores[1]))
print("Perda/Loss: " + str(scores[0]))
"""## Carregaremos os dados para gerar a matriz de confusão"""

true_y = []  #Valores reais
pred_y = []  #Valores previstos
x = np.load('mod_xtest.npy')
y = np.load('mod_ytest.npy')
json_file = open(arquivo_modelo_json,
                 'r')  #Abrindo o arquivo json do modelo (somente leitura)
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(
    loaded_model_json)  #Carregando o modelo salvo do arquivo JSON
loaded_model.load_weights(
    arquivo_modelo)  #Carregando os valores dos pesos da rede neural
y_pred = loaded_model.predict(
    x
)  #Efetivamente estamos realizando a previsão das emoções contidas nas imagens

#Obtendo a lista de probabilidades previstas e probabilidades reais
yp = y_pred.tolist()
yt = y.tolist()

count = 0
for i in range(len(y)):
    yy = max(yp[i])  #Buscando a maior probabilidade prevista
    yyt = max(yt[i])  #Buscando a maior probabilidade real
    pred_y.append(yp[i].index(yy))
Пример #7
0
#%% Model to JSON

json_string = model.to_json()

# with open('json_model.json', 'w') as outfile:
#     json.dump(json_string[0], outfile)

#%% JSON to Model
# model reconstruction from JSON:
from tensorflow.keras.models import model_from_json

# with open('json_model.json','r') as json_file:
#     json_string_new = json.load(json_file)

json_model = model_from_json(json_string)
json_model.summary()

#%% Model.save_weights()

import os.path
if os.path.isfile('models/ali_model_weights.h5') is False:
    model.save_weights('models/ali_model_weights.h5')

model2 = Sequential([
    Dense(units=16, input_shape=(1, ), activation='relu'),
    Dense(units=32, activation='relu'),
    Dense(units=2, activation='softmax')
])

model2.load_weights('models/ali_model_weights.h5')
Пример #8
0
    # parse options
    parser = argparse.ArgumentParser(description='keras-pi.')
    parser.add_argument('-m', '--model', default='./model/mnist_deep_model.json')
    parser.add_argument('-w', '--weights', default='./model/weights.99.hdf5')
    parser.add_argument('-l', '--labels', default='./model/labels.txt')
    parser.add_argument('-d', '--device', default='normal_cam') # normal_cam /jetson_nano_raspi_cam

    args = parser.parse_args()

    labels = []
    with open(args.labels,'r') as f:
        for line in f:
            labels.append(line.rstrip())
    print(labels)

    model_pred = model_from_json(open(args.model).read())
    model_pred.load_weights(args.weights)

    # model_pred.summary()
    if args.device == 'normal_cam':
        cam = cv2.VideoCapture(0)
    elif args.device == 'jetson_nano_raspi_cam':
        GST_STR = 'nvarguscamerasrc \
            ! video/x-raw(memory:NVMM), width=3280, height=2464, format=(string)NV12, framerate=(fraction)21/1 \
            ! nvvidconv ! video/x-raw, width=(int)640, height=(int)480, format=(string)BGRx \
            ! videoconvert \
            ! appsink'
        cam = cv2.VideoCapture(GST_STR, cv2.CAP_GSTREAMER) # Raspi cam
    else:
        print('wrong device')
        sys.exit()
Пример #9
0
def main():

    # give folder name for pretraine files
    folder_pretrained = "run1"

    # load all the data
    path = "../data/tweets/stemmed/*.csv"
    files = glob.glob(path)

    # load tokenizer
    print("Loading tokenizer")
    with open('results/{}/tokenizer.pickle'.format(folder_pretrained),
              'rb') as handle:
        tokenizer = pickle.load(handle)

    # load json and create model
    json_file = open('results/{}/model.json'.format(folder_pretrained), 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)

    # load weights into new model
    loaded_model.load_weights("results/{}/model.h5".format(folder_pretrained))
    print("Loaded model from disk")

    # evaluate loaded model on test data
    loaded_model.compile(loss='categorical_crossentropy',
                         optimizer='adam',
                         metrics=['accuracy'])

    # save negative and postitive counter in a dict
    scores = {}

    # for every file predict positive and negative tweets
    for file in files:
        print("predicting score for file: " + file)

        # read tweets
        data = pd.read_csv(file, encoding='latin-1', header=None)

        # give dataframe column names
        data = data.rename(columns={0: "tweet"})

        # make sure all tweets are strings
        data['tweet'] = data['tweet'].apply(lambda x: str(x))

        # create one hot vectors from sequence
        X = tokenizer.texts_to_sequences(data['tweet'].values)

        # pad sequences with same length as training
        X = pad_sequences(X, maxlen=40)

        # predict setiment of tweets
        predictions = loaded_model.predict(X)

        # count negative and postive tweets
        negative, positive = count_pos_neg(predictions)

        # save counts in dict
        scores[file] = (negative, positive)

    # write to a csv
    create_csv(scores)
import socket
import select
from mpl_toolkits import mplot3d
import numpy as np
import tkinter as tk
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation as animation
import matplotlib.animation as animation
from numpy.core.fromnumeric import shape
from tensorflow.keras.models import model_from_json
model = model_from_json(open("model1.json", "r").read())
model.load_weights('weights.h5')
model.summary()
HEADER_LENGTH = 10

IP = "0.0.0.0"
PORT = 1234
x_arr, y_arr, z_arr = [], [], []
x_gry, y_gry, z_gry = [], [], []

# Create a socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Sets REUSEADDR (as a socket option) to 1 on socket
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((IP, PORT))
# This makes server listen to new connections
server_socket.listen()
# List of sockets for select.select()
sockets_list = [server_socket]
# List of connected clients - socket as a key, user header and name as data
Пример #11
0


'''CODE FOR EMPIRICAL NTK CALCULATION'''

#%%
# net="resnet50"
net="mobilenetv2"
# net="nasnet"
# net="vgg19"
# net="densenet121"
filename = net+"_True_4_None_0000_max_gaussian_model"
json_string_filename = filename
arch_json_string = open("archs/"+filename, "r") .read()
from tensorflow.keras.models import model_from_json
model = model_from_json(arch_json_string)
model.compile("sgd",loss=lambda target, pred: pred)
import tensorflow.keras.backend as K

num_layers = len(model.trainable_weights)
trainable_weights = model.trainable_weights
# num_layers
#%%
##
fs = []

num_chunks = 5
layers_per_chunk = num_layers//num_chunks
for layer in range(num_chunks):
    print(layer)
    grads = model.optimizer.get_gradients(model.total_loss, trainable_weights[layer*layers_per_chunk:(layer+1)*layers_per_chunk])
    # save model architecture
    with open(os.path.join(model_path, 'model_architecture'),
              'w') as json_file:
        json.dump(config, json_file)

    for i in tqdm(range(start_session, training_sessions + start_session)):
        Arena.ARENA_WIDTH = 100
        Arena.ARENA_HEIGHT = 100
        if i > 0 and i % 200 == 0:
            Arena.ARENA_WIDTH += 50
            Arena.ARENA_HEIGHT += 50
        if i == start_session + 1:
            # Set player to be reinforcement player
            with open(os.path.join(model_path,
                                   'model_architecture')) as json_file:
                model = model_from_json(json.load(json_file))
                game.set_player(1, 'd', model)
        if i >= start_session + 1:
            # Set players weights to be the trained weights from last session
            game.players[1]._net.load_weights(model_path +
                                              f'/session_{i}_weights')

        rewards, num_actions = agent.train(1, None, batch_size)

        # Save all values that need to be saved
        accum_rewards = accum_rewards.append(rewards)
        agent.save_weights(os.path.join(model_path,
                                        f'session_{i + 1}_weights'))
        with open(os.path.join(model_path, 'rewards.csv'), 'w') as reward_file:
            accum_rewards.to_csv(reward_file)
Пример #13
0
    def __init__(self, attrData, config):
        super(PredictBasicVggAction, self).__init__()

        img_paths = attrData.get('Image_Paths')
        mod_path = attrData.get('Model_Path')
        set_path = attrData.get('Settings_Path')
        dat_path = attrData.get('Image_Dir')

        # Define Hyperparameters
        INPUT_SIZE = int(attrData.get('Input_Size'))
        BATCH_SIZE = int(attrData.get('Batch_Size'))
        CLASS_MODE = attrData.get('Class_Mode')
        LOSS = attrData.get('Loss')
        OPTIMIZER = attrData.get('Optimizer')
        STEPS = int(attrData.get('Steps'))
        PREDICT_BATCH_SIZE = int(attrData.get('Predict_Batch_Size'))

        testing_data_generator = ImageDataGenerator(rescale=1. / 255)
        '''
        test_set = testing_data_generator.flow_from_directory (dat_path,
                                                     target_size=(INPUT_SIZE, INPUT_SIZE),
                                                     batch_size=BATCH_SIZE,
                                                     class_mode=CLASS_MODE)
        '''

        for img_path in img_paths:
            test_set = image.load_img(img_path,
                                      target_size=(INPUT_SIZE, INPUT_SIZE))

        json_file = open(set_path, 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model = model_from_json(loaded_model_json)

        # load weights into new model
        loaded_model.load_weights(mod_path)
        print("Loaded model from disk")

        # evaluate loaded model on test data
        loaded_model.compile(loss=LOSS,
                             optimizer=OPTIMIZER,
                             metrics=['accuracy'])

        score = loaded_model.evaluate_generator(test_set, steps=STEPS)

        for idx, metric in enumerate(loaded_model.metrics_names):
            print("{}: {}".format(metric, score[idx]))

        # dimensions of our images
        img_width, img_height = 32, 32

        xList = []

        # predicting images
        for img_path in img_paths:
            img = image.load_img(img_path, target_size=(img_width, img_height))

            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            xList.append(x)

        images = np.vstack(xList)
        classes = loaded_model.predict(images, batch_size=PREDICT_BATCH_SIZE)

        print(classes)
Пример #14
0
 def load_file(self, model_file, json_file):
     with open(json_file) as f:
         json_model = f.read()
     self.model = model_from_json(json_model)
     self.model.load_weights("{}".format(model_file))
Пример #15
0
    fpr = {}
    tpr = {}
    auc1 = {}
    scores = []
    labels = []
    for f in folders:
        model_name = f.split('_')[-1]
        model_file = f + '/model.json'
        with open(model_file) as json_file:
            json_config = json_file.read()

        model = model_from_json(json_config,
                                custom_objects={
                                    'Clip': Clip,
                                    'QDense': QDense,
                                    'QConv2D': QConv2D,
                                    'QActivation': QActivation,
                                    'QBatchNormalization': QBatchNormalization
                                })
        model.load_weights(f + '/bestWeights.h5')
        # loop over each layer and get weights and biases'
        plt.figure()
        if options.doProfile:
            numerical(keras_model=model, X=X_test)
        plt.savefig(f + 'profile.png')
        if options.doWeights:
            allWeightsByLayer = {}
            for layer in model.layers:
                print("----")
                print(layer.name)
                if len(layer.get_weights()) < 1: continue
def train(data_dir,
          model_output_dir,
          epochs=100,
          name=None,
          batch_size=16,
          gpus=1,
          learning_rate=0.1,
          nb_slices=1,
          threshold=10.0,
          load_weights=None,
          initial_epoch=0,
          nb_layers_per_block=4,
          nb_blocks=4,
          nb_initial_filters=16,
          growth_rate=12,
          compression_rate=0.5,
          activation='relu',
          initializer='glorot_uniform',
          batch_norm=True):

    args = locals()

    # Set up dataset
    train_image_dir = os.path.join(data_dir, 'images/train')
    val_image_dir = os.path.join(data_dir, 'images/val')
    train_meta_file = os.path.join(data_dir, 'meta/train.csv')
    val_meta_file = os.path.join(data_dir, 'meta/val.csv')
    train_labels = pd.read_csv(train_meta_file)['ZOffset'].values
    val_labels = pd.read_csv(val_meta_file)['ZOffset'].values

    train_generator = SliceSelectionSequence(train_labels,
                                             train_image_dir,
                                             batch_size,
                                             1000,
                                             jitter=True,
                                             sigmoid_scale=threshold)
    val_generator = SliceSelectionSequence(val_labels,
                                           val_image_dir,
                                           batch_size,
                                           50,
                                           sigmoid_scale=threshold)

    # Directories and files to use
    if name is None:
        name = 'untitled_model_' + datetime.datetime.now().strftime(
            '%Y_%m_%d_%H_%M_%S')
    output_dir = os.path.join(model_output_dir, name)
    tflow_dir = os.path.join(output_dir, 'tensorboard_log')
    weights_path = os.path.join(output_dir,
                                'weights-{epoch:02d}-{val_loss:.4f}.hdf5')
    architecture_path = os.path.join(output_dir, 'architecture.json')
    tensorboard = TensorBoard(log_dir=tflow_dir,
                              histogram_freq=0,
                              write_graph=False,
                              write_images=False)

    if load_weights is None:
        os.mkdir(output_dir)
        os.mkdir(tflow_dir)

        args_path = os.path.join(output_dir, 'args.json')
        with open(args_path, 'w') as json_file:
            json.dump(args, json_file, indent=4)

        # Create the model
        print('Compiling model')
        with tf.device('/cpu:0'):
            model = DenseNet(img_dim=(256, 256, 1),
                             nb_layers_per_block=nb_layers_per_block,
                             nb_dense_block=nb_blocks,
                             growth_rate=growth_rate,
                             nb_initial_filters=nb_initial_filters,
                             compression_rate=compression_rate,
                             sigmoid_output_activation=True,
                             activation_type=activation,
                             initializer=initializer,
                             output_dimension=nb_slices,
                             batch_norm=batch_norm)

        # Save the architecture
        with open(architecture_path, 'w') as json_file:
            json_file.write(model.to_json())

    else:
        with open(architecture_path, 'r') as json_file:
            model = model_from_json(json_file.read())

        # Load the weights
        model.load_weights(load_weights)

    # Move to multi GPUs
    # Use multiple devices
    if gpus > 1:
        parallel_model = multi_gpu_model(model, gpus)
        model_checkpoint = MultiGPUModelCheckpoint(weights_path,
                                                   monitor='val_loss',
                                                   save_best_only=False)
    else:
        parallel_model = model
        model_checkpoint = ModelCheckpoint(weights_path,
                                           monitor='val_loss',
                                           save_best_only=False)

    # Set up the learning rate scheduler
    def lr_func(e):
        print("Learning Rate Update at Epoch", e)
        if e > 0.75 * epochs:
            return 0.01 * learning_rate
        elif e > 0.5 * epochs:
            return 0.1 * learning_rate
        else:
            return learning_rate

    lr_scheduler = LearningRateScheduler(lr_func)

    # Compile multi-gpu model
    loss = 'mean_absolute_error'
    parallel_model.compile(optimizer=Adam(lr=learning_rate), loss=loss)

    print('Starting training...')

    parallel_model.fit_generator(
        train_generator,
        epochs=epochs,
        shuffle=False,
        validation_data=val_generator,
        callbacks=[model_checkpoint, tensorboard, lr_scheduler],
        use_multiprocessing=True,
        workers=16,
        initial_epoch=initial_epoch)

    return model
Пример #17
0
def main():

    # Loading the auto-encoder model:
    model_file = open("Trained_Models/Auto_Encoder_Trained_Model.json", "r")
    model = model_file.read()
    model_file.close()

    Auto_Encoder = model_from_json(model)
    Auto_Encoder.load_weights("Trained_Models/Auto_Encoder.h5")
    print("Auto_Encoder Model Loaded Successfully")

    (xTrain, yTrain), (xTest, yTest) = mnist.load_data()

    xTrain = xTrain.astype('float32') / 255.
    xTest = xTest.astype('float32') / 255.
    xTrain = np.reshape(xTrain, (len(xTrain), 28, 28, 1))
    xTest = np.reshape(xTest, (len(xTest), 28, 28, 1))

    noiseFactor = 0.5
    xTrain_noisy = xTrain + noiseFactor * np.random.normal(loc=0.0, scale=1.0, size=xTrain.shape)
    xTest_noisy = xTest + noiseFactor * np.random.normal(loc=0.0, scale=1.0, size=xTest.shape)

    xTrain_noisy = np.clip(xTrain_noisy, 0., 1.)
    xTest_noisy = np.clip(xTest_noisy, 0., 1.)

    # Creating the classifying layers:

    classifier = Sequential()
    # print(Auto_Encoder.summary())

    for layer in Auto_Encoder.layers:
        classifier.add(layer)
    for layer in classifier.layers:
        layer.trainable = False

    classifier.add(Flatten())
    classifier.add(Dense(128, activation='relu'))
    classifier.add(Dense(128, activation='relu'))
    classifier.add(Dense(10, activation='softmax'))

    # Test_image = np.reshape(xTest[0], (28, 28))
    # plt.imshow(Test_image)
    # plt.show()

    print(xTest[0].shape)

    # Now I check if classifier model is trained or not, if not the script trains it or else ask the user to decide if they
    # wish to retrain the model.

    ch = int(input("do you wish to retrain the Classification model? 1 for yes, 2 for no."))

    if os.path.exists("./Trained_Models/Classifier_Model.h5") and os.path.exists("./Trained_Models/Classifier_Trained_Model.json") == False or ch == 1:

        classifier.compile(optimizer='adam',
                               loss='sparse_categorical_crossentropy',
                               metrics=['accuracy']
                               )
        history = classifier.fit(xTrain_noisy, yTrain,
                           epochs=3,
                           validation_split=.1
                           )
        Classifier_model_trained = classifier.to_json()

        with open("Trained_Models/Classifier_Trained_Model.json", 'w') as json_model:
            json_model.write(Classifier_model_trained)
        classifier.save_weights("Trained_Models/Classifier.h5")
        json_model.close()
        print("\n\t Classifier Model has been trained and Saved Successfully! ")
        print("\tYou can Find Trained Models Under Trained_Model Directory")

        # Plotting curves:
        plt.plot(history.history['loss'])
        plt.plot(history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()

        print(classifier.summary())

    print("Model is trained. Now Testing with 10 images:")

    # I test the model by adding noise to the input image and then passing them through the entire network
    # to see if it can classify them

    noise_factor = 0.5
    xTest_noisy = []
    xTest_noisy = xTest + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=xTest.shape)
    xTest_noisy = np.clip(xTest_noisy, 0., 1.)

    prediction = classifier.predict(xTest_noisy)

    for i in range(10):
        print(np.argmax(prediction[i]))

        plt.subplot(2, 1, 1)
        plt.imshow(xTest_noisy[i].reshape(28, 28))
        plt.subplot(2, 1, 2)
        plt.imshow(xTest[i].reshape(28, 28))
        plt.show()
Пример #18
0
    def apply(self):
        self.logger.info("DnnOptimizer::apply, input size: %d", self.dim_input)

        json_file = open("%s/model_%s_nEv%d.json" % \
                         (self.dirmodel, self.suffix, self.train_events), "r")
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model = \
            model_from_json(loaded_model_json, {'SymmetryPadding3d' : SymmetryPadding3d})
        loaded_model.load_weights("%s/model_%s_nEv%d.h5" % \
                                  (self.dirmodel, self.suffix, self.train_events))

        myfile = TFile.Open("%s/output_%s_nEv%d.root" % \
                            (self.dirval, self.suffix, self.train_events), "recreate")
        h_dist_all_events = TH2F(
            "%s_all_events_%s" % (self.h_dist_name, self.suffix), "", 500, -5,
            5, 500, -5, 5)
        h_deltas_all_events = TH1F(
            "%s_all_events_%s" % (self.h_deltas_name, self.suffix), "", 1000,
            -1., 1.)
        h_deltas_vs_dist_all_events = TH2F("%s_all_events_%s" % \
                                           (self.h_deltas_vs_dist_name, self.suffix),
                                           "", 500, -5.0, 5.0, 100, -0.5, 0.5)

        for iexperiment in self.partition['apply']:
            indexev = iexperiment
            inputs_, exp_outputs_ = load_train_apply(
                self.dirinput_apply, indexev, self.selopt_input,
                self.selopt_output, self.grid_r, self.grid_phi, self.grid_z,
                self.opt_train, self.opt_predout)
            inputs_single = np.empty(
                (1, self.grid_phi, self.grid_r, self.grid_z, self.dim_input))
            exp_outputs_single = np.empty(
                (1, self.grid_phi, self.grid_r, self.grid_z, self.dim_output))
            inputs_single[0, :, :, :, :] = inputs_
            exp_outputs_single[0, :, :, :, :] = exp_outputs_

            distortion_predict_group = loaded_model.predict(inputs_single)
            distortion_predict_flat_m = distortion_predict_group.reshape(-1, 1)
            distortion_predict_flat_a = distortion_predict_group.flatten()

            distortion_numeric_group = exp_outputs_single
            distortion_numeric_flat_m = distortion_numeric_group.reshape(-1, 1)
            distortion_numeric_flat_a = distortion_numeric_group.flatten()
            deltas_flat_a = (distortion_predict_flat_a -
                             distortion_numeric_flat_a)
            deltas_flat_m = (distortion_predict_flat_m -
                             distortion_numeric_flat_m)

            h_suffix = "Ev%d_Mean%d_%s" % (iexperiment[0], iexperiment[1],
                                           self.suffix)
            h_dist = TH2F("%s_%s" % (self.h_dist_name, h_suffix), "", 500, -5,
                          5, 500, -5, 5)
            h_deltas = TH1F("%s_%s" % (self.h_deltas_name, h_suffix), "", 1000,
                            -1., 1.)
            h_deltas_vs_dist = TH2F(
                "%s_%s" % (self.h_deltas_vs_dist_name, h_suffix), "", 500,
                -5.0, 5.0, 100, -0.5, 0.5)

            fill_hist(h_dist_all_events, np.concatenate((distortion_numeric_flat_m, \
                                distortion_predict_flat_m), axis=1))
            fill_hist(
                h_dist,
                np.concatenate(
                    (distortion_numeric_flat_m, distortion_predict_flat_m),
                    axis=1))
            fill_hist(h_deltas, deltas_flat_a)
            fill_hist(h_deltas_all_events, deltas_flat_a)
            fill_hist(
                h_deltas_vs_dist,
                np.concatenate((distortion_numeric_flat_m, deltas_flat_m),
                               axis=1))
            fill_hist(
                h_deltas_vs_dist_all_events,
                np.concatenate((distortion_numeric_flat_m, deltas_flat_m),
                               axis=1))

            prof = h_deltas_vs_dist.ProfileX()
            prof.SetName("%s_%s" % (self.profile_name, h_suffix))

            h_dist.Write()
            h_deltas.Write()
            h_deltas_vs_dist.Write()
            prof.Write()

            h1tmp = h_deltas_vs_dist.ProjectionX("h1tmp")
            h_std_dev = h1tmp.Clone("%s_%s" % (self.h_std_dev_name, h_suffix))
            h_std_dev.Reset()
            h_std_dev.SetXTitle("Numerical distortion fluctuation (cm)")
            h_std_dev.SetYTitle(
                "std.dev. of (Pred. - Num.) distortion fluctuation (cm)")
            nbin = int(h_std_dev.GetNbinsX())
            for ibin in range(0, nbin):
                h1diff = h_deltas_vs_dist.ProjectionY("h1diff", ibin + 1,
                                                      ibin + 1, "")
                stddev = h1diff.GetStdDev()
                stddev_err = h1diff.GetStdDevError()
                h_std_dev.SetBinContent(ibin + 1, stddev)
                h_std_dev.SetBinError(ibin + 1, stddev_err)
            h_std_dev.Write()

        h_dist_all_events.Write()
        h_deltas_all_events.Write()
        h_deltas_vs_dist_all_events.Write()
        prof_all_events = h_deltas_vs_dist_all_events.ProfileX()
        prof_all_events.SetName("%s_all_events_%s" %
                                (self.profile_name, self.suffix))
        prof_all_events.Write()

        h1tmp = h_deltas_vs_dist_all_events.ProjectionX("h1tmp")
        h_std_dev_all_events = h1tmp.Clone("%s_all_events_%s" %
                                           (self.h_std_dev_name, self.suffix))
        h_std_dev_all_events.Reset()
        h_std_dev_all_events.SetXTitle("Numerical distortion fluctuation (cm)")
        h_std_dev_all_events.SetYTitle(
            "std.dev. of (Pred. - Num.) distortion fluctuation (cm)")
        nbin = int(h_std_dev_all_events.GetNbinsX())
        for ibin in range(0, nbin):
            h1diff = h_deltas_vs_dist_all_events.ProjectionY(
                "h1diff", ibin + 1, ibin + 1, "")
            stddev = h1diff.GetStdDev()
            stddev_err = h1diff.GetStdDevError()
            h_std_dev_all_events.SetBinContent(ibin + 1, stddev)
            h_std_dev_all_events.SetBinError(ibin + 1, stddev_err)
        h_std_dev_all_events.Write()

        myfile.Close()
        self.logger.info("Done apply")
Пример #19
0
# labels_train = data_train[:,features_train.shape[1]]
# labels_test = data_test[:,features_train.shape[1]]

# #Declaring type for any kind of normalization
# features_train = features_train.astype("float32")
# features_test = features_test.astype("float32")

# #KNN classifier
# KNN = KNeighborsClassifier(n_neighbors = 6)
# KNN.fit(features_train,labels_train)

#Loading CNN model
json_file = open("../model/CNN.json")
json = json_file.read()
json_file.close()
modely = model_from_json(json)
modely.compile(loss = 'categorical_crossentropy',optimizer = 'sgd', metrics = ['accuracy'])
modely.load_weights("../model/CNN.h5")

#Loading LogReg model
json_file = open("../model/logreg.json")
json = json_file.read()
json_file.close()
model = model_from_json(json)
model.compile(loss = 'categorical_crossentropy',optimizer = 'sgd', metrics = ['accuracy'])
model.load_weights("../model/logreg.h5")

#Loading SVC model
foo = open('../model/svc.pkl','rb')
classy = pickle.load(foo)
foo.close()
Пример #20
0
images = pd.read_csv('./images.csv')['link'].to_numpy()
test_images = pd.read_csv('./images.csv')[:5]
test_images = test_images['link'].to_list()

config = tf.ConfigProto(device_count={'GPU': 1},
                        intra_op_parallelism_threads=1,
                        allow_soft_placement=True)
session = tf.Session(config=config)
tf.compat.v1.keras.backend.set_session(session)

with open('./models/resnet50.json', 'r') as f:
    model_json = f.read()

knn_model = pickle.load(open('./models/knn.pkl', 'rb'))
graph = tf.compat.v1.get_default_graph()
model = model_from_json(model_json)
model.load_weights('./models/resnet50.h5')
model._make_predict_function()


def predict_model(url):

    global knn_model

    with session.as_default():
        with session.graph.as_default():
            im = Image.open(requests.get(url, stream=True).raw).resize(
                (224, 224))
            x = image.img_to_array(im)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
Пример #21
0
def model_load(model_fn, model_weights_fn):
    with open(model_fn, 'r') as f:
        model = model_from_json(f.read())
    model.load_weights(model_weights_fn)
    return model
minX_array = np.zeros((num_of_frames, 1), np.float32)
maxX_array = np.zeros((num_of_frames, 1), np.float32)
for i in range(num_of_frames):
    minX_array[i] = mag_spect_Frames[i, :, :].min()
    mag_spect_Frames[i, :, :] = mag_spect_Frames[i, :, :] - minX_array[i]
    maxX_array[i] = mag_spect_Frames[i, :, :].max()
    mag_spect_Frames[i, :, :] = mag_spect_Frames[i, :, :] / maxX_array[i]

#-------------------------------------------------
# Load & Use Model
#-------------------------------------------------
# Load Model
json_file = open('UNET_Model_Saves\\UNET_Model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
my_unet = model_from_json(loaded_model_json)
# load weights int new model
my_unet.load_weights('UNET_Model_Saves\\UNET_Model_Weights.h5')

# Make predictions for every frame
mag_spect_Frames_Predicted = my_unet.predict(mag_spect_Frames)
mag_spect_Frames_Predicted = mag_spect_Frames_Predicted[:, :, :, 0]

#-------------------------------------------------
# Create Enhanced Audio
#-------------------------------------------------
# Denormalize Data
for i in range(num_of_frames):
    mag_spect_Frames_Predicted[i, :, :] = (
        mag_spect_Frames_Predicted[i, :, :] * maxX_array[i]) + minX_array[i]
Пример #23
0
parser.add_argument('--outputLayer',dest='outputLayer',default='encoded_vector/Relu')
parser.add_argument('--outputGraph',dest='outputGraph',default='encoder')

args = parser.parse_args()

print(args.outputDir)


f_model = args.inputModel
with open(f_model,'r') as f:
    if 'QActivation' in f.read():
        from qkeras import QDense, QConv2D, QActivation,quantized_bits,Clip
        f.seek(0)
        model = model_from_json(f.read(),
                                custom_objects={'QActivation':QActivation,
                                                'quantized_bits':quantized_bits,
                                                'QConv2D':QConv2D,
                                                'QDense':QDense,
                                                'Clip':Clip})
        hdf5  = f_model.replace('json','hdf5')
        model.load_weights(hdf5)
    else:
        f.seek(0)
        model = model_from_json(f.read())
        hdf5  = f_model.replace('json','hdf5')
        model.load_weights(hdf5)


print(model.summary())

## get_session is deprecated in tf2
tfsession = tfv1.keras.backend.get_session()
Пример #24
0
def load_model():
    model = model_from_json(open('model.json').read())
    #loads the weights of the model from a HDF5 file (created by save_weights)
    model.load_weights('weights.h5')
    model.compile(optimizer='RMSprop', loss='mean_squared_error')
    return model
import numpy as np
import scipy.misc
from tensorflow import keras
from tensorflow.keras.models import model_from_json
from tensorflow.keras.optimizers import SGD
from PIL import Image
import os
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION"] = "1"  
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1" 
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_LOSS_SCALING"] = "1" 
os.environ['TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LOG_PATH'] ="./amp_log/"

# 加载模型
model_architecture = 'cifar10_architecture.json'
model_weights = 'cifar_weights.h5'
model = model_from_json(open(model_architecture).read())		# 加载模型结构
model.load_weights(model_weights)		# 加载模型权重

# 加载图片
img_names = ['cat.jpg', 'deer.jpg', 'dog.jpg']
imgs_ = [np.transpose(Image.open(img_names[0]).resize((32, 32)), (1, 0, 2)).astype('float32') for img_name in img_names]
imgs = np.array(imgs_) / 255	# 归一化

# 训练
optim = SGD()
model.compile(loss='categorical_crossentropy', optimizer=optim, metrics=['accuracy'])		# 编译模型

# 预测样本属于每个类别的概率
print(model.predict(imgs))		# 打印概率
print(np.argmax(model.predict(imgs), axis=1))		# 打印最大概率对应的标签
# 原文链接:https://blog.csdn.net/tszupup/article/details/85275111
Пример #26
0
# Path de imagenes de prueba
test_path = './data/test'
test_batches = ImageDataGenerator().flow_from_directory(
    test_path,
    target_size=(height, length),
    classes=['dogs', 'gardens'],
    batch_size=90)

print("Carga del modelo de prediccion")

# se obtiene el modelos en formato json
json_file = open(model_path, 'r')
loaded_model = json_file.read()
json_file.close()
cnn = model_from_json(loaded_model)

# se obtienen los pesos
cnn.load_weights(weights_model_path)

# compilacion del modelo con:
#  funcion de perdida categorical_crossentropy
#  funcion Adam con learning rate = 0.0005
#  metricas accuracy
cnn.compile(loss='categorical_crossentropy',
            optimizer=Adam(lr=lr),
            metrics=['accuracy'])

cnn.summary()

# informacion para la generacion de la matriz
Пример #27
0
import tensorflow as tf
import pickle
import numpy as np
import tools as tools
from tensorflow.keras.models import Sequential, model_from_json, load_model
from keras.preprocessing.sequence import pad_sequences


# -----  Loading the trained model   -------
# step 1 : load the stored structure
with open('model_architecture.json', 'r') as f:
    model = model_from_json(f.read())
# step 2 : Load weights into the new model
model.load_weights('model_weights.h5')

# load the tokenizer to encode the sentence
with open('tokenizer.pickle', 'rb') as handle:
    tokenizer = pickle.load(handle)

# enter the sentence
phrase = "So sleepy again and it's not even that late. I fail once again."#input("Enter the sentence to be tested :\n")

# prediction with the model

# 1 bag_of_words and not rnn, lstm or bi-lstm
# encodedInput = tokenizer.texts_to_matrix([phrase], mode='count')[:, 1:]

# 2 bag_of_words and cnn, rnn, lstm or bi-lstm
# encodedInput = tokenizer.texts_to_matrix([phrase], mode='count')[:, 1:]
# encodedInput = np.reshape(encodedInput, (encodedInput.shape[0], 1, encodedInput.shape[1]))
Пример #28
0
    models = config['Training']['network'].split(',')
    folds_number = int(config['Data']['folds_number'])

    path_dataset = config['Data']['path_dataset']
    gt = config['Data']['path_image_gt']
    org = config['Data']['path_image_org']
    field = config['Data']['path_image_field']

    models_path = config['Network']['models_path']
    predict_save = config['Predict']['predict_save']
    predict_to = config['Predict']['to_predict']

    Folds = get_fold_dataset_dict(folds_number, path='./Training_data/org/')
    for fold_id in range(folds_number):
        fold = f'Fold_{fold_id}'
        for net in models:
            net = net.strip()
            model_path = f'{models_path}{net}/model_{fold}.json'
            model_weights = f'{models_path}{net}/model_weights_{fold}.h5'
            model = model_from_json(open(model_path).read())
            model.load_weights(model_weights)

            for impath in Folds[fold]['Test']:
                imname = os.path.split(impath)[-1]
                path_to_save = f'{predict_save}{fold}/{net}/{imname}'
                os.makedirs(os.path.split(path_to_save)[0], exist_ok=True)

                pred = predict_img(impath, model, patch_height, patch_width,
                                   stride_height, stride_width, batch_size)
                io.imsave(path_to_save, pred)
Пример #29
0
def read_model(json_name, weight_name):
   json_name = "cache/" + json_name
   weight_name = "cache/" +  weight_name
   model = model_from_json(open(json_name).read())
   model.load_weights(weight_name)
   return model
Пример #30
0
"""
Created on Tue Aug 25 16:34:31 2020

@author: HP
"""

import cv2
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import model_from_json
import numpy as np
import face_recognition

webcam_video_stream = cv2.VideoCapture(0)

face_exp_model = model_from_json(
    open(
        "C:/Users/HP/Desktop/Face-recognition/dataset/facial_expression_model_structure.json",
        "r").read())

face_exp_model.load_weights(
    'C:/Users/HP/Desktop/Face-recognition/dataset/facial_expression_model_weights.h5'
)

emotions_label = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise',
                  'neutral')

all_face_locations = []

while True:
    ret, current_frame = webcam_video_stream.read()

    current_frame_small = cv2.resize(current_frame, (0, 0), fx=0.25, fy=0.25)
Пример #31
0
    def __init__(self, model_json_file, model_weights_file):
        with open(model_json_file, "r") as json_file:
            loaded_model_json = json_file.read()
            self.loaded_model = model_from_json(loaded_model_json)

        self.loaded_model.load_weights(model_weights_file)