Пример #1
0
def init_model_for_thread(example):
    import tensorflow as tf
    from tensorflow.contrib.keras import models
    # create tensorflow graph
    graph = tf.get_default_graph()
    with graph.as_default():

        sess = tf.Session()  #tf.contrib.keras.backend.get_session()

        sess.run(tf.global_variables_initializer())

        NN_MODEL = models.load_model(NN_MODEL_FILE)

        # global variable with all the data to be shared
        model = {}

        model["NN_MODEL"] = NN_MODEL

        # dummy run to create graph
        x = tf.contrib.keras.preprocessing.sequence.pad_sequences(
            [example], maxlen=300, padding='post')
        x = x.reshape(x.shape + (1, ))

        y = NN_MODEL.predict(x)
        print('Testing model inference: {}'.format(y))

        # PROBLEM: I want, but cannot lock graph, as child process
        # wants to run its own tf.global_variables_initializer()
        graph.finalize()

        model["GRAPH"] = graph
        return model
Пример #2
0
def train(neurons, hidden, act, epochs=10, repetition=0, summary=False):
    samples = int(1e6)
    h = 1
    norms = np.random.uniform(0, 3, (samples, 1))
    kn = gaussian(norms, h)

    X = norms
    y = kn

    inputs = layers.Input(shape=(1, ))
    x = layers.Dense(neurons, activation=act)(inputs)
    for i in range(hidden - 1):
        x = layers.Dense(neurons, activation=act)(x)
    outputs = layers.Dense(1, activation='linear')(x)

    save_path = "models/kernel/h{}/nn_{}_{}.h5".format(hidden, neurons, repetition)
    model = models.Model(inputs=inputs, outputs=outputs)
    early_stop = callbacks.EarlyStopping(monitor='val_mean_absolute_percentage_error', patience=10)
    check_point = callbacks.ModelCheckpoint(save_path,
                                            monitor='val_mean_absolute_percentage_error', save_best_only=True,
                                            mode='min')
    opt = optimizers.Adam(lr=1e-3, decay=1e-5)
    model.compile(optimizer=opt,
                  loss='mean_squared_error',
                  metrics=['mean_absolute_percentage_error'])

    if summary:
        model.summary()
    history = model.fit(X, y, epochs=epochs, batch_size=50,
                        callbacks=[check_point, early_stop], validation_split=0.01)
    return models.load_model(save_path)
Пример #3
0
 def restore_model(self, model_path):
     """Restore the underlying model from `model_path`"""
     if not self._loss_func:
         # Have to initialize this to load the model
         self._init_loss_func()
     cust_objects = {self._loss_func.__name__: self._loss_func}
     self.model = models.load_model(model_path, custom_objects=cust_objects)
     self._all_layers = self.model.layers
Пример #4
0
def init_model_for_process():

    # importing tensorflow for each process separately
    # import tensorflow as tf
    from tensorflow.contrib.keras import models

    model = models.load_model(NN_MODEL_FILE)

    return model
 def restore_model(self, model_path, training_betas=None, num_perplexities=None):
     """Restore the underlying model from `model_path`"""
     if not self._loss_func:
         # Have to initialize this to load the model
         self.num_perplexities = self._get_num_perplexities(training_betas, num_perplexities)
         self._init_loss_func()
     cust_objects = {self._loss_func.__name__: self._loss_func}
     self.model = models.load_model(model_path, custom_objects=cust_objects)
     self._all_layers = self.model.layers
Пример #6
0
 def __init__(self):
     """Constructor that loads all used models"""
     self.fr = self.init_face_model()
     self.pr = PersonDetection(self.PATH_PERSON_DETECTION_MODEL)
     self.vgg = models.load_model(self.PATH_TRAINED_VGG,
                                  custom_objects={
                                      'precision_male': precision_male,
                                      "precision_female": precision_female,
                                      "precision": precision,
                                      'recall_male': recall_male,
                                      "recall_female": recall_female,
                                      "recall": recall,
                                      "f1_score_male": f1_score_male,
                                      "f1_score_female": f1_score_female,
                                      "f1_score": precision
                                  })
Пример #7
0
def constantize(fname):
    K.clear_session()
    tf.reset_default_graph()
    K.set_learning_phase(False)
    mod = models.load_model(fname)
    outputs = mod.output
    if not isinstance(outputs, collections.Sequence):
        outputs = [outputs]
    output_names = []
    for output in outputs:
        output_names.append(output.name.split(':')[0])
    sess = K.get_session()
    cg = graph_util.convert_variables_to_constants(
        sess, sess.graph.as_graph_def(add_shapes=True), output_names)
    K.clear_session()
    return cg
Пример #8
0
def init_model_for_process(example):

    # restore in eager execution
    # https://github.com/keras-team/keras/issues/8136

    NN_MODEL = models.load_model(NN_MODEL_FILE)

    # Need to add some optimizer to make model compiled
    # Only TF native optimizers are supported in Eager mode.
    NN_MODEL.compile(optimizer=tf.train.GradientDescentOptimizer(0.01),
                     loss='mean_squared_error')

    # global variable with all the data to be shared
    model = {}

    model["NN_MODEL"] = NN_MODEL
    '''
    _________________________________________________________________                                                                                                  
    Layer (type)                 Output Shape              Param #                                                                                                     
    =================================================================                                                                                                  
    gru_1 (GRU)                  (None, 300, 50)           7800                                                                                                        
    _________________________________________________________________                                                                                                  
    gru_2 (GRU)                  (None, 1)                 156                                                                                                         
    _________________________________________________________________                                                                                                  
    activation_1 (Activation)    (None, 1)                 0                                                                                                           
    =================================================================                                                                                                  
    Total params: 7,956                                                                                                                                                
    Trainable params: 7,956                                                                                                                                            
    Non-trainable params: 0                                                                                                                                            
    _________________________________________________________________  
    '''

    # Testing
    x = tf.contrib.keras.preprocessing.sequence.pad_sequences([example],
                                                              maxlen=300,
                                                              padding='post')
    # now it wants float somewhy
    x = np.array(x.reshape(x.shape + (1, )), dtype=float)

    y = NN_MODEL.predict(x)
    print('Testing model inference: {}'.format(y))

    return model
Пример #9
0
def train(neurons, hidden=1, act='relu', epochs=10, repetition=0):
    samples = int(1e6)
    norms = np.random.uniform(0, 3, samples)
    veldiffs = np.random.uniform(0, 1, samples)
    dkn = dgaussian(norms, 1)
    cont = continuity(veldiffs, dkn)

    X = np.zeros((samples, 2))
    X[:, 0] = norms / 3
    X[:, 1] = veldiffs
    y = cont

    inputs = layers.Input(shape=(2, ))
    x = layers.Dense(neurons, activation=act)(inputs)
    for i in range(hidden - 1):
        x = layers.Dense(neurons, activation=act)(x)
    outputs = layers.Dense(1, activation='linear')(x)

    save_path = "models/continuity/h{}/nn_{}_{}.h5".format(
        hidden, neurons, repetition)
    model = models.Model(inputs=inputs, outputs=outputs)
    early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=10)
    check_point = callbacks.ModelCheckpoint(save_path,
                                            monitor='val_loss',
                                            save_best_only=True,
                                            mode='min')
    opt = optimizers.Adam(lr=1e-3, decay=1e-5)
    model.compile(optimizer=opt,
                  loss='mean_squared_error',
                  metrics=['mean_absolute_percentage_error'])

    history = model.fit(X,
                        y,
                        epochs=epochs,
                        batch_size=100,
                        callbacks=[early_stop, check_point],
                        validation_split=0.01)
    return models.load_model(save_path)
Пример #10
0
def dice_loss(y_true, y_pred):
    loss = 1 - dice_coeff(y_true, y_pred)
    return loss


def bce_dice_loss(y_true, y_pred):
    loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
    return loss

# imports data from directory and returns numpy arrays input and labels with size [512, 512, num_slices]

# load trained model
print("Loading model")
save_model_path = './temp/weights.hdf5'
model = models.load_model(save_model_path, custom_objects={'bce_dice_loss': bce_dice_loss, 'dice_loss': dice_loss})

files = os.listdir(directoryOfFiles)
input_size = len(files)

for filename in files:
    input_shape = [512, 512]
    input_matrix = np.zeros((input_shape[0], input_shape[1], 0))

    print("Start importing data from: " + filename)
    directory = directoryOfFiles + filename
    if directory.find("normalized") >= 0:
        current_image = nib.load(directory).get_fdata()
        input_matrix = np.concatenate((input_matrix, current_image), axis=2)

        print("Import done, Input Size:")
from models import kerasmodels
from data import dataload
from tasking import general
from tensorflow.contrib.keras import models
import numpy as np
import pandas as pd

display = 5

X_test, y_test, filenames = dataload.load_general_patch(
    resolution=100, path='C:/Dataset/img/Test', input_size=10000)

X_test_reshape = general.simple_reshape(X_test, 100)
X_test_reshape = general.simple_norm(X_test_reshape)

model = models.load_model('repo/DogCat100.h5')
predictions = model.predict(X_test_reshape)

predictions = np.array(predictions).reshape(len(predictions), )
predictions = [round(a, 4) for a in predictions
               ]  # round prediction to 5 digits / no need more

ds = pd.DataFrame()
ds['index'] = range(len(y_test))
ds['filename'] = pd.Series(filenames)
ds['y_test'] = pd.Series(y_test)
ds['predictions'] = pd.Series(predictions)

######################################################################################
#few most correct dogs
# indexes,predict = dogcat.most_correct_dogs(ds)
from tensorflow.contrib.keras import models
import matplotlib.pyplot as plt
from generate_data import retrieve_data
import numpy as np
import tables

# model = models.load_model('model.hdf5')
model = models.load_model('1000_epochs_600_samples.hdf5')

_, t, _, _, _, _ = retrieve_data(False, False)


def test_sample(index):

    index_test = index

    E_actual = E_real[index_test] + 1j * E_imag[index_test]

    pred = model.predict(frog[index_test].reshape(1, 58, 106, 1))

    E_imag_pred = pred[0][128:]
    E_real_pred = pred[0][:128]
    E_pred = E_real_pred + 1j * E_imag_pred

    fig, ax = plt.subplots(2, 2)
    # ax[0][0].pcolormesh(frog[index_test].reshape(58, 106), cmap='jet')
    ax[1][1].plot(t, np.abs(E_pred), color='blue', linestyle='dashed')
    ax[1][1].plot(t, np.real(E_pred), color='blue')
    ax[1][1].plot(t, np.imag(E_pred), color='red')

    ax[0][0].pcolormesh(frog[index_test].reshape(58, 106), cmap='jet')
Пример #13
0
# Per non iniziare da 0 puoi caricare in model un modello salvato, ma il conto delle epoch parte da 0 di nuovo
# model = tf.keras.models.load_model('saved_models/transfer_learning_epoch_03_0.9185.h5')
# INIZIA TRAINING
history = model.fit_generator(
    generator=batch_generator(train_files, BATCH_SIZE),
    epochs=NUM_EPOCHS,
    steps_per_epoch=STEPS_PER_EPOCH,
    class_weight=cl_weight,
    validation_data=batch_generator(val_files, BATCH_SIZE),
    validation_steps=VAL_STEPS,
    callbacks=callbacks_list,
)

# Da qui in poi si fa la predizione per creare la confusion matrix
# Carica un modello salvato, magari saltanto la fase di training
model = models.load_model(
    filepath='saved_models/transfer_learning_epoch_03_0.9185.h5')

TEST_STEPS = len(test_files) // BATCH_SIZE

# Fai tutte le predizioni
pred_probs = model.predict_generator(generator=batch_generator(
    test_files, BATCH_SIZE),
                                     steps=TEST_STEPS)
pred = np.argmax(pred_probs, axis=-1)

from sklearn.metrics import confusion_matrix, accuracy_score
import itertools


def plot_confusion_matrix(cm,
                          classes,
Пример #14
0
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.contrib.keras import models
from ann.equations import dgaussian, continuity
from sph.tools import check_dir

save_path = "../models/continuity/"
check_dir(save_path)

model1 = models.load_model(save_path + "h1/nn_250_0.h5")
model2 = models.load_model(save_path + "h2/nn_250_0.h5")
model3 = models.load_model(save_path + "h3/nn_250_0.h5")

X = np.linspace(0, 3, 250)
V = np.linspace(0, 1, 250)
X, V = np.meshgrid(X, V)
X = X.reshape((-1, 1))
V = V.reshape((-1, 1))
dkn = dgaussian(X, 1)
y = continuity(V, dkn).ravel()
y_pred1 = model1.predict(np.concatenate((X, V), axis=-1)).ravel()
y_pred2 = model2.predict(np.concatenate((X, V), axis=-1)).ravel()
y_pred3 = model3.predict(np.concatenate((X, V), axis=-1)).ravel()

plt.plot(X, y)
plt.plot(X, y_pred3)
plt.show()

plt.plot(y, abs(y - y_pred3.ravel()) / y, 'g.', label='3 capas')
plt.plot(y, abs(y - y_pred2.ravel()) / y, 'c.', label='2 capas')
plt.plot(y, abs(y - y_pred1.ravel()) / y, 'm.', label='1 capa')
        frogtrace, tau_frog, w_frog = plot_frog(E=E_pred, t=t, w=w, dt=dt, w0=w0, plot=False)
        ax4 = plt.subplot2grid((4, 2), (3, 0), rowspan=1, colspan=2)
        ax4.pcolormesh(frogtrace, cmap="jet")
        ax4.set_title("Network Reconstructed FROG trace")
        ax4.set_xticks([])
        ax4.set_yticks([])

        # add y labels on left side of graph only
        if j == 0:
            ax1.set_ylabel('Actual E(t)')
            ax2.set_ylabel('FROG trace')
            ax3.set_ylabel('Retrieved E(t)')
            ax4.set_ylabel("Reconstructed FROG trace")

# load model
model = models.load_model("./model.hdf5")

# load test data
hdf5_file = tables.open_file('frogtestdata.hdf5', mode='r')
E_real = hdf5_file.root.E_real[:, :]
E_imag = hdf5_file.root.E_imag[:, :]
frog = hdf5_file.root.frog[:, :]
# print((frog[1].reshape(58, 106)))
# image = frog[1].reshape(58, 106)
# plt.imshow(image)
# plt.show()
E_apended = np.concatenate((E_real, E_imag), 1)
hdf5_file.close()


results = model.evaluate(frog.reshape(-1, 58, 106, 1), E_apended)
Пример #16
0
 def __init__(self, isVideo=False):
     self.model = models.load_model('vgg_pretrained.h5')
     self.prev_detections = None
     self.frames_since_full_search = 0
     self.frame_count = 0
     self.isVideo = isVideo
Пример #17
0
import tensorflow as tf
from tensorflow.contrib.keras import models
import numpy as np

tf.enable_eager_execution()

from keras.datasets import imdb
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=100)

example = X_test[1]

# restore in eager execution
# https://github.com/keras-team/keras/issues/8136

NN_MODEL = models.load_model("model_without_opt.h5")

# Need to add some optimizer to make model compiled
# Only TF native optimizers are supported in Eager mode.
NN_MODEL.compile(optimizer=tf.train.GradientDescentOptimizer(0.01),
                 loss='mean_squared_error')

x = tf.contrib.keras.preprocessing.sequence.pad_sequences([example],
                                                          maxlen=300,
                                                          padding='post')
# now it wants float somewhy
x = np.array(x.reshape(x.shape + (1, )), dtype=float)

# returns numpy, strangely. Not tensor
y = NN_MODEL.predict(x)
print('Testing model inference: {}'.format(y))  # [[0.49816433]]
'''
def loadFileModel(path):
    model = models.load_model(path)
    return model