예제 #1
1
                    validation_split=.3,        # split data into .7 train, .3 test each run
                    epochs=2500,                # do this many runs unless stop early
                    callbacks=callbacks_list)   # after each run do this list of things, defined above


# ############################## Predictions vs Actual #######################################

best = pd.Series(history.history['val_accuracy']).idxmax()
print(np.array(history.history['val_accuracy']).max())

# 2
print('')
print('train accuracy',history.history['accuracy'][best])
print('test accuracy',history.history['val_accuracy'][best])
print('\n')

my_model = load_model(filepath)

predictions = my_model.predict(features)

result_dict = {'prediction': list(predictions),
               'actual': list(target)}
df_result = pd.DataFrame(result_dict)

df_result['Name'] = corners_data['Name']
df_result['Draft Year'] = corners_data['Draft Year']
df_result['Games'] = corners_data['Games']
df_result['Draft Pos'] = corners_data['Draft Pos']

print(df_result.head(50))
 def build_encoder_decoder_inference_from_file(self, model_path, sentence_encoder, include_sentence_encoder=True,
                                               attention=False):
     if attention:
         model = load_model(model_path, custom_objects={'AttentionGRU': AttentionGRU})
     else:
         model = load_model(model_path)
     return self.build_encoder_decoder_inference(model, sentence_encoder, include_sentence_encoder, attention)
예제 #3
0
def predict():
    model1_dir = G.MOD + 'deep_nodense/temp_model.hdf5'
    model2_dir = G.MOD + 'deep_dropout_full/temp_model.hdf5'
    model3_dir = G.MOD + 'deep_nodense_padded/temp_model.hdf5'
    model4_dir = G.MOD + 'deep_nodense_square/temp_model.hdf5'
    folder_dir = G.TST

    model1 = load_model(model1_dir)
    model2 = load_model(model2_dir)
    model3 = load_model(model3_dir)
    model4 = load_model(model4_dir)
    
    breed_folders = sorted(os.listdir(folder_dir))
    img_pred_list = []

    for idx, breed in enumerate(breed_folders):
        print breed
        img_dir = folder_dir + breed + '/'
        img_list = os.listdir(img_dir)
        for img_loc in img_list:
            img_path = img_dir + img_loc
            img = Image.open(img_path)
            if img.mode != 'RGB':
                continue
            img = img.resize((model1.input_shape[3], model1.input_shape[2]))
            img = np.asarray(img, dtype='float32')
            img /= 255.
            img = np.transpose(img, (2,0,1))
            img = np.array([img])
            try:
                class_pred1 = model1.predict(img)
                class_pred2 = model2.predict(img)
                class_pred3 = model3.predict(img)
                class_pred4 = model4.predict(img)
            except:
                print "Pic didn't work"
                continue

            class_pred1 = class_pred1.flatten()
            class_pred2 = class_pred2.flatten()
            class_pred3 = class_pred3.flatten()
            class_pred4 = class_pred4.flatten()

            class_pred = class_pred1 + class_pred2 + class_pred3 + class_pred4
            sort_pred = np.argsort(class_pred)[:-6:-1]

            if idx == sort_pred[0]:
                ens1_1 = True
            else:
                ens1_1 = False
            if idx in sort_pred:
                ens5_1 = True
            else:
                ens5_1 = False
            
            img_pred_list.append([ens1_1, ens5_1])

    results_df = pd.DataFrame(img_pred_list)
    
    return results_df.mean()
예제 #4
0
def test_model_saving_to_pre_created_h5py_file():
    model, x = _get_sample_model_and_input()

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    with h5py.File(fname, mode='r+') as h5file:
        save_model(model, h5file)
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test non-default options in h5
    with h5py.File('does not matter', driver='core',
                   backing_store=False) as h5file:
        save_model(model, h5file)
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    with h5py.File(fname, mode='r+') as h5file:
        g = h5file.create_group('model')
        save_model(model, g)
        loaded_model = load_model(g)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
예제 #5
0
def test_functional_model_saving():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)
    out = model.predict(x)

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    new_model_disk = load_model(fname)
    os.remove(fname)

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(filename=fname)
        save_model(model, gcs_filepath)
        file_io_proxy.assert_exists(gcs_filepath)
        new_model_gcs = load_model(gcs_filepath)
        file_io_proxy.delete_file(gcs_filepath)  # cleanup

    for new_model in [new_model_disk, new_model_gcs]:
        new_out = new_model.predict(x)
        assert_allclose(out, new_out, atol=1e-05)
예제 #6
0
def test_saving_overwrite_option_gcs():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    org_weights = model.get_weights()
    new_weights = [np.random.random(w.shape) for w in org_weights]

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(
            filename='test_saving_overwrite_option_gcs.h5')
        # we should not use same filename in several tests to allow for parallel
        # execution
        save_model(model, gcs_filepath)
        model.set_weights(new_weights)

        with patch('keras.engine.saving.ask_to_proceed_with_overwrite') as ask:
            ask.return_value = False
            save_model(model, gcs_filepath, overwrite=False)
            ask.assert_called_once()
            new_model = load_model(gcs_filepath)
            for w, org_w in zip(new_model.get_weights(), org_weights):
                assert_allclose(w, org_w)

            ask.return_value = True
            save_model(model, gcs_filepath, overwrite=False)
            assert ask.call_count == 2
            new_model = load_model(gcs_filepath)
            for w, new_w in zip(new_model.get_weights(), new_weights):
                assert_allclose(w, new_w)

        file_io_proxy.delete_file(gcs_filepath)  # cleanup
예제 #7
0
def test_saving_overwrite_option():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    org_weights = model.get_weights()
    new_weights = [np.random.random(w.shape) for w in org_weights]

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    model.set_weights(new_weights)

    with patch('keras.engine.saving.ask_to_proceed_with_overwrite') as ask:
        ask.return_value = False
        save_model(model, fname, overwrite=False)
        ask.assert_called_once()
        new_model = load_model(fname)
        for w, org_w in zip(new_model.get_weights(), org_weights):
            assert_allclose(w, org_w)

        ask.return_value = True
        save_model(model, fname, overwrite=False)
        assert ask.call_count == 2
        new_model = load_model(fname)
        for w, new_w in zip(new_model.get_weights(), new_weights):
            assert_allclose(w, new_w)

    os.remove(fname)
예제 #8
0
def cnn_models():
	cnn_modi = load_model(os.path.join(way,"models/cnn_modi.h5"))
	cnn_kejriwal = load_model(os.path.join(way,"models/cnn_kejriwal.h5"))
	cnn_modi.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
	cnn_kejriwal.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
	graph = tf.get_default_graph()
	return cnn_kejriwal, cnn_modi, graph
예제 #9
0
def test_sequential_model_saving_2():
    # test with custom optimizer, loss
    custom_opt = optimizers.rmsprop
    custom_loss = losses.mse
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(Dense(3))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)
    out = model.predict(x)

    load_kwargs = {'custom_objects': {'custom_opt': custom_opt,
                                      'custom_loss': custom_loss}}
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    new_model_disk = load_model(fname, **load_kwargs)
    os.remove(fname)

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(filename=fname)
        save_model(model, gcs_filepath)
        file_io_proxy.assert_exists(gcs_filepath)
        new_model_gcs = load_model(gcs_filepath, **load_kwargs)
        file_io_proxy.delete_file(gcs_filepath)  # cleanup

    for new_model in [new_model_disk, new_model_gcs]:
        new_out = new_model.predict(x)
        assert_allclose(out, new_out, atol=1e-05)
예제 #10
0
def predict(images):
    if len(images) == 4:
        size = [120, 120]
        model = load_model('test2.h5')
    elif len(images) == 16:
        size = [60, 60]
        model = load_model('test4.h5')
    elif len(images) == 25:
        size = [60, 60]
        model = load_model('test5.h5')

    images = np.array(images).astype('float32') / 255
    #
    # mean = 130
    # std = 80
    # images = (np.array(images) - mean) / (std + 1e-7)

    x = []
    for i in range(images.shape[0]):
        x.append(cv2.resize(images[i], (size[0], size[1]), interpolation=cv2.INTER_AREA)[np.newaxis, :, :, np.newaxis])

    test_preds = model.predict(x)
    test_preds = np.argmax(test_preds, axis=2)
    labels = list(test_preds)
    return labels
예제 #11
0
def test_model_saving_to_pre_created_h5py_file():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    with h5py.File(fname, mode='r+') as h5file:
        save_model(model, h5file)
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test non-default options in h5
    with h5py.File('does not matter', driver='core',
                   backing_store=False) as h5file:
        save_model(model, h5file)
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
예제 #12
0
파일: agent.py 프로젝트: awentzonline/yarp
 def try_load_model(self):
     filename = self.model_filename
     if os.path.exists(filename):
         self.model = load_model(filename, custom_objects=self.model_custom_objects())
         self.target_model = load_model(filename, custom_objects=self.model_custom_objects())
         return True
     return False
예제 #13
0
def test_export_keras_model(_, _1):
    Constant.MAX_ITER_NUM = 1
    Constant.MAX_MODEL_NUM = 1
    Constant.SEARCH_MAX_ITER = 1
    Constant.T_MIN = 0.8
    train_x = np.random.rand(100, 25, 25, 1)
    train_y = np.random.randint(0, 5, 100)
    test_x = np.random.rand(100, 25, 25, 1)
    clean_dir(TEST_TEMP_DIR)
    clf = ImageClassifier(path=TEST_TEMP_DIR, verbose=False, resume=False)
    clf.n_epochs = 100
    clf.fit(train_x, train_y)
    score = clf.evaluate(train_x, train_y)
    assert score <= 1.0

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_keras_model.graph')
    clf.export_keras_model(model_file_name)
    from keras.models import load_model
    model = load_model(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    del model, results, model_file_name

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_autokeras_model.pkl')
    clf.export_autokeras_model(model_file_name)
    from autokeras.utils import pickle_from_file
    model = pickle_from_file(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    score = model.evaluate(train_x, train_y)
    assert score <= 1.0
    before = model.graph
    model.fit(train_x, train_y, train_x, train_y)
    assert model.graph == before
    clean_dir(TEST_TEMP_DIR)

    clf = ImageRegressor(path=TEST_TEMP_DIR, verbose=False, resume=False)
    clf.n_epochs = 100
    clf.fit(train_x, train_y)
    score = clf.evaluate(train_x, train_y)
    assert score >= 0.0

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_keras_model.graph')
    clf.export_keras_model(model_file_name)
    from keras.models import load_model
    model = load_model(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    del model, results, model_file_name

    model_file_name = os.path.join(TEST_TEMP_DIR, 'test_autokeras_model.pkl')
    clf.export_autokeras_model(model_file_name)
    from autokeras.utils import pickle_from_file
    model = pickle_from_file(model_file_name)
    results = model.predict(test_x)
    assert len(results) == len(test_x)
    score = model.evaluate(train_x, train_y)
    assert score >= 0.0
    clean_dir(TEST_TEMP_DIR)
예제 #14
0
def robust_load_model(filepath, custom_objects=None):
    try:
        model = load_model(filepath, custom_objects=custom_objects)
    except ValueError:
        import h5py
        f = h5py.File(filepath, 'r+')
        del f['optimizer_weights']
        f.close()
        model = load_model(filepath, custom_objects=custom_objects)
예제 #15
0
    def __init__(self,srccode="opencv"):
        self.fd=FaceDetection(srccode)
        # parameters
        emotion_model_path = os.path.join(gnu_code_path,'trained_models/emotion_models/simple_CNN.530-0.65.hdf5')
        gender_model_path = os.path.join(gnu_code_path,'trained_models/gender_models/simple_CNN.81-0.96.hdf5')

        # loading models

        self.emotion_classifier = load_model(emotion_model_path)
        self.gender_classifier = load_model(gender_model_path)
def detect(seq,irrWindowSz):
	irrigationRegionRegressor = load_model(pIrrRegionRegressorCNN)
	seqPP = preprocessData(seq)
	irrTimepoints = irrigationRegionRegressor.predict(seqPP)
	seqIrr = cropSequenceWRTIrrigations(seq, irrTimepoints, irrWindowSz)
	irrigationDetector = load_model(pIrrDetectorCNN)
	seqIrr2 = preprocessDataForIrrDetector(seqIrr)
	seqIrr2 = seqIrr2[..., np.newaxis]
	irrMask = irrigationDetector.predict(seqIrr2)
	return irrTimepoints, seqIrr, irrMask
예제 #17
0
def test_saving_unused_layers_is_ok():
    a = Input(shape=(256, 512, 6))
    b = Input(shape=(256, 512, 1))
    c = Lambda(lambda x: x[:, :, :, :1])(a)

    model = Model(inputs=[a, b], outputs=c)

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    load_model(fname)
    os.remove(fname)
예제 #18
0
 def load_model(self, epoch=None):
     '''
     Loads a saved model. If epoch id is provided, will load the corresponding model. Or else,
     will load the best model.
     '''
     if not epoch:
         self.model = load_model("%s.model" % self.model_name_prefix,
                                 custom_objects=self.custom_objects)
     else:
         self.model = load_model("%s_%d.model" % (self.model_name_prefix, epoch),
                                 custom_objects=self.custom_objects)
     self.model.summary()
     self.data_processor = pickle.load(open("%s.dataproc" % self.model_name_prefix, "rb"))
예제 #19
0
  def __init__(self):

    print("Load registry")
    self.registry       = pickle.load(open('../models/facerecog/registry.pkl','rb'))
    self.class_labels = pickle.load(open('../models/facerecog/class_labels.pkl','rb'))
    print("Load Facerecog model")
    self.model         =   load_model('../models/facerecog/final_classifier.h5')
    print("Extract Siamese model")
    full_model       = load_model('../models/facerecog/facerecog_2.h5',custom_objects={'triplet_loss': triplet_loss})
    print("Load inception model")
    self.frmodel = full_model.get_layer('model_1')
    self.image_w     = 96
    self.image_h      = 96
    self.detector      = dlib.get_frontal_face_detector()
    self.align_dlib = AlignDlib()
예제 #20
0
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
예제 #21
0
    def classify(self, examples, output, model=None, finishBeforeReturn=False, replaceRemoteFiles=True):
        print >> sys.stderr, "Predicting devel examples"
        output = os.path.abspath(output)
        # Return a new classifier instance for following the training process and using the model
        classifier = copy.copy(self)
        
        if model == None:
            classifier.model = model = self.model
        model = os.path.abspath(model)
        model = self.connection.upload(model, uncompress=True, replace=replaceRemoteFiles)
        classifier.predictions = self.connection.getRemotePath(output, True)
        examples = self.getExampleFile(examples, replaceRemote=replaceRemoteFiles)
        classifier._filesToRelease = [examples]
        
        self.kerasModel = load_model(model)
        numFeatures = self.kerasModel.layers[0].get_input_shape_at(0)[1]
        
        features, classes = datasets.load_svmlight_file(examples, numFeatures)
        #features = features.toarray()
        #predictions = self.kerasModel.predict(features, 128, 1)
        predictions = self.kerasModel.predict_generator(predict_batch_generator(features, 1), features.shape[0] / 1)
        predClasses = predictions.argmax(axis=-1)

        predictionsPath = self.connection.getRemotePath(output, False)
        with open(predictionsPath, "wt") as f:
            for i in range(predictions.shape[0]):
                f.write(str(predClasses[i] + 1) + " " + " ".join([str(x) for x in  predictions[i]]) + "\n")                
def main():
    """Generate different test models and save them to the given directory."""
    if len(sys.argv) != 3:
        print('usage: [model name] [destination file path]')
        sys.exit(1)
    else:
        model_name = sys.argv[1]
        dest_path = sys.argv[2]

        get_model_functions = {
            'small': get_test_model_small,
            'sequential': get_test_model_sequential,
            'full': get_test_model_full
        }

        if not model_name in get_model_functions:
            print('unknown model name: ', model_name)
            sys.exit(2)

        assert K.backend() == "tensorflow"
        assert K.floatx() == "float32"
        assert K.image_data_format() == 'channels_last'

        np.random.seed(0)

        model_func = get_model_functions[model_name]
        model = model_func()
        model.save(dest_path, include_optimizer=False)

        # Make sure models can be loaded again,
        # see https://github.com/fchollet/keras/issues/7682
        model = load_model(dest_path)
        print(model.summary())
예제 #23
0
def test_saving_multiple_metrics_outputs():
    inputs = Input(shape=(5,))
    x = Dense(5)(inputs)
    output1 = Dense(1, name='output1')(x)
    output2 = Dense(1, name='output2')(x)

    model = Model(inputs=inputs, outputs=[output1, output2])

    metrics = {'output1': ['mse', 'binary_accuracy'],
               'output2': ['mse', 'binary_accuracy']
               }
    loss = {'output1': 'mse', 'output2': 'mse'}

    model.compile(loss=loss, optimizer='sgd', metrics=metrics)

    # assure that model is working
    x = np.array([[1, 1, 1, 1, 1]])
    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
예제 #24
0
def test_sequential_model_saving_2():
    # test with funkier config
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=objectives.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
def testModel():
	windowSz = 300
	
	model = load_model('dpa_beta_0.38.h5')
	pTrainingData = "trainDoubleExpHeating_60Hz.h5"
	print("loading synthetic data " + pTrainingData)
	with h5py.File(pTrainingData, 'r') as hf:
		x_test = np.array(hf.get('x_test'))
		y_test = np.array(hf.get('y_test'))
	x_test = x_test[..., np.newaxis]
	y_ai = model.predict(x_test)
	print('ai' + str(y_ai[0,:]) + '\ngt' + str(y_test[0,:]) + '\n')
	print('ai' + str(y_ai[1,:]) + '\ngt' + str(y_test[1,:]) + '\n')
	print('ai' + str(y_ai[2,:]) + '\ngt' + str(y_test[2,:]) + '\n')
	
	print("loading io. data ")
	pTrainingData="../Data/irr_dr_training_new.mat"
	f = h5py.File(pTrainingData, "r")
	bg = f["seqBackground"]
	fg = f["seqSpuelung"]
	x_fg = ThermographicDataHandler.slidingWindowPartitioning(np.array(fg.value).T, windowSz, 150)
	x_bg = ThermographicDataHandler.slidingWindowPartitioning(np.array(bg.value).T, windowSz, 150)
	
#	x_ai = model.predict(encoder.predict(scaleData(x_fg[:,0,:])))
#	x_ai_bg = model.predict(encoder.predict(scaleData(x_bg[:,0,:])))
	x_fg = x_fg[..., np.newaxis]
	x_bg = x_bg[..., np.newaxis]
	x_ai = model.predict((scaleData(x_fg[:,0,:,:])))
	x_ai_bg = model.predict((scaleData(x_bg[:,0,:,:])))
	print("fg: " +  str(x_ai[0,]))
	print("bg_1: " + str(x_ai_bg[0,]))
	print("bg_2: " + str(x_ai_bg[1,]))
예제 #26
0
파일: Model.py 프로젝트: shayanpr/Enhance
def autoEncoderGen(path, input_shape=theShape):
    if os.path.exists(path):
        print('loading: ' + str(sorted(os.listdir('models/'))[-1]))
        autoencoder = load_model(os.path.join('models', sorted(os.listdir('models/'))[-1]))
        name = str(sorted(os.listdir('models/'))[-1])
        print('loaded: ' + name)
    else:
        print('No previous model found.')
        print('Building a new model.')
        input_img = Input(shape=input_shape)  # adapt this if using `channels_first` image data format

        x = UpSampling2D((2, 2))(input_img)
        x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
        x = UpSampling2D((2,2))(x)
        x = Conv2D(8, (5, 5), activation='relu', padding='same')(x)
        x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
        encoded = x

        x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
        x = Conv2D(8, (5, 5), activation='relu', padding='same')(x)
        x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
        decoded = Conv2D(3, (3, 3), activation='relu', padding='same')(x)

        autoencoder = Model(input_img, decoded)
        autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
        autoencoder.save('models/autoencoder.h5')
        name = 'autoencoderV.h5'
    eye_temp = name.replace('.h5', '')
    try:
        eye = int(eye_temp.replace('autoencoderV', '')) - 10000000
    except ValueError:
        eye = 0

    return autoencoder, name, eye
예제 #27
0
def load_params():
    X_test = os.listdir('./test-jpg')
    X_test = [fn.replace('.jpg', '') for fn in X_test]
    model = load_model('model_amazon6.h5', custom_objects={'fbeta': train_keras.fbeta})
    with open('tag_columns.txt', 'r') as f:
        tag_columns = f.read().split('\n')
    return X_test, model, tag_columns
예제 #28
0
 def load(self, file, compile=False):
     try:
         del self.network
     except Exception:
         pass
     self.network = load_model(file, custom_objects={"objective_function_for_policy":AIPlayer.objective_function_for_policy,
                                                     "objective_function_for_value":AIPlayer.objective_function_for_value}, compile=compile)
예제 #29
0
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='rmsprop', metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)

    new_model = load_model(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test load_weights on model file
    model.load_weights(fname)
    os.remove(fname)
예제 #30
0
def load(path, opts, vars):
    try:
        print('\nLoading model\nCreating session and graph')   
        server = tf.train.Server.create_local_server()
        sess = tf.Session(server.target) 
        graph = tf.get_default_graph()
        backend.set_session(sess) 
        
        model_path = path + '.' + opts['network'] + '.h5'
        print('Loading model from {}'.format(model_path))
        model = load_model(model_path);     

        
        print('Create prediction function')

        model._make_predict_function()
        with graph.as_default():
            with sess.as_default():
                input_shape = list(model.layers[0].input_shape)
                input_shape[0] = 1
                model.predict(np.zeros(tuple(input_shape)))

        vars['graph'] = graph
        vars['session'] = sess
        vars['model'] = model
    except Exception as e:
        print_exception(e, 'load')
        sys.exit()
    def __init__(self, nb_classes, model, seq_length,
                 saved_model=None, features_length=2048):
        """
        `model` = one of:
            lstm
            lrcn
            mlp
            conv_3d
            c3d
        `nb_classes` = the number of classes to predict
        `seq_length` = the length of our video sequences
        `saved_model` = the path to a saved Keras model to load
        """

        # Set defaults.
        self.seq_length = seq_length
        self.load_model = load_model
        self.saved_model = saved_model
        self.nb_classes = nb_classes
        self.feature_queue = deque()

        # Set the metrics. Only use top k if there's a need.
        metrics = ['accuracy']
        if self.nb_classes >= 10:
            metrics.append('top_k_categorical_accuracy')

        # Get the appropriate model.
        if self.saved_model is not None:
            print("Loading model %s" % self.saved_model)
            self.model = load_model(self.saved_model)
        elif model == 'lstm':
            print("Loading LSTM model.")
            self.input_shape = (seq_length, features_length)
            self.model = self.lstm()
        elif model == 'lrcn':
            print("Loading CNN-LSTM model.")
            self.input_shape = (seq_length, 80, 80, 3)
            self.model = self.lrcn()
        elif model == 'mlp':
            print("Loading simple MLP.")
            self.input_shape = (seq_length, features_length)
            self.model = self.mlp()
        elif model == 'conv_3d':
            print("Loading Conv3D")
            self.input_shape = (seq_length, 80, 80, 3)
            self.model = self.conv_3d()
        elif model == 'c3d':
            print("Loading C3D")
            self.input_shape = (seq_length, 80, 80, 3)
            self.model = self.c3d()
        else:
            print("Unknown network.")
            sys.exit()

        # Now compile the network.
        optimizer = Adam(lr=1e-5, decay=1e-6)
        self.model.compile(loss='categorical_crossentropy', optimizer=optimizer,
                           metrics=metrics)

        print(self.model.summary())
예제 #32
0
vel = vel.astype('float32') 

feq = np.asarray([feq]*Re.shape[0])
fnet = np.append(feq,np.zeros((feq.shape[0],1,feq.shape[-2],feq.shape[-1])),axis=1)

Re_test = Re_test/Re_max; Re_test = Re_test.astype('float32')
fnet_test = np.empty((1,fnet.shape[1],fnet.shape[-2],fnet.shape[-1]))
vel_test = np.empty((1,vel.shape[1],vel.shape[-2],vel.shape[-1]))

fnet_test[0,:,:,:] = fnet[0,:,:,:]
fnet_test[0,-1,:,:] = Re_test

for i in np.arange(feq.shape[0]):
    fnet[i,-1,:,:] = Re[i]  

modelx = load_model('cnn2_x.h5')
print('MODELX SUMMARY: ')
print(modelx.summary())
print('------------------------------------------------------')
print('MODELY SUMMARY: ')
modely = load_model('cnn2_y.h5')
print(modely.summary())

velx = modelx.predict(fnet_test)   
vely = modely.predict(fnet_test)

print(velx.shape)
print(vely.shape)
print(vel_test.shape)

예제 #33
0
import tensorflow
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from os import walk
from keras.preprocessing.image import load_img, img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions

model_path = './model/image_classifier_model.h5'
model_weights_path = './model/image_classifier_model_weights.h5'
model = load_model(model_path)
model.load_weights(model_weights_path)

test_path = 'Dataset/test_set'
training_path = 'Dataset/training_set'

labels = list()

for (dirpath, dirnames, filenames) in walk(training_path):
    for dirname in dirnames:
        labels.append(dirname)

labels = sorted(labels)
print(labels)
picture_array = list()

for (dirpath, dirnames, filenames) in walk(test_path):
    for filename in filenames:
        picture_array.append(test_path + "/" + filename)
    break
예제 #34
0
        conversation_name = args.video.split('/')[-1].split('.')[0]

    out_file = args.out_dir + conversation_name

    if os.path.isfile(out_file + ".pkl") and pd.read_pickle(
            out_file +
            ".pkl").shape[0] > 0:  # and os.path.isfile (out_file + ".png"):
        print("Video already processed")
        exit(1)

    # hyper-parameters for bounding boxes shape
    emotion_offsets = (20, 40)

    # loading models
    #face_detector = dlib.get_frontal_face_detector()
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    # starting video streaming
    video_capture = cv2.VideoCapture(args.video)

    # Frames frequence : fps frame per seconde
    fps = video_capture.get(cv2.CAP_PROP_FPS)
    length = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))

    emotions_states = {
예제 #35
0
from keras.backend import clear_session
import numpy as np
from keras.models import load_model
import tensorflow as tf
clear_session()
np.set_printoptions(suppress=True)
input_graph_name = "dogbreed.h5"
output_graph_name = input_graph_name[:-3] + '.tflite'
model = load_model(input_graph_name)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.post_training_quantize = True
tflite_model = converter.convert()
open(output_graph_name, "wb").write(tflite_model)
print ("generate:",output_graph_name)
예제 #36
0
def attentions_layer(x):
  from keras import backend as K
  x1 = x[:,:,1:]
  x2 = x[:,:,0:1]
  x2 = K.softmax(x2)
#  x2 = keras.backend.print_tensor(x2, str(x2))
#  x1 = keras.backend.print_tensor(x1, str(x1))
  x=x1*x2
#  x = keras.backend.print_tensor(x, str(x))
  return x

hidden_size = args.hidden_size

if args.pretrained_name is not None:
  from keras.models import load_model
  model = load_model(args.pretrained_name)
  print("loaded model",model.layers[0].input_shape[1])
#  ml = model.layers[0].input_shape[1]
#  if (ml != max_length):
#    print("model length",ml,"different from data length",max_length)
#    max_length = ml
else:
#  model = Sequential()
#  model.add(Embedding(len(vocab), len(vocab), embeddings_initializer='identity', trainable=False, input_shape=(max_length,)))
#  model.add(LSTM_use(hidden_size, return_sequences=True))
#  model.add(LSTM_use(max_output + 1, return_sequences=False))
#  model.add(Dense(max_output +1))
#  model.add(Activation('softmax'))
  
  inputs = Input(shape=(None,None))
  print("k",inputs.shape)
예제 #37
0
                              callbacks=callbacks_list,
                              verbose=1)
    # --------------------------Logs of loss and acc---------------------------------
    train_loss=hist.history['loss']
    val_loss=hist.history['val_loss']
    train_acc=hist.history['acc']
    val_acc=hist.history['val_acc']
    xc=range(epochs)
    max_acc_val = max(val_acc)
    min_loss_val =min(val_loss)
    print('best_acc',max(val_acc), 'min loss', min_loss_val)
    #-------------------inference zone

    #Delete existing last model
    del model
    model = load_model(filename+'_'+h5_name)

    print ('Evaluation Best Model')
    test_loss, test_acc=model.evaluate(x=x_test, y=y_test, batch_size=batch_size, verbose=1, sample_weight=None, steps=None)
    print('Test_acc',test_acc)
    #------------report result CSV
    row_n=[str(job_id),script_name,str(round(max_acc_val,5)),str(round(min_loss_val,5)), str(round(test_acc,5)),str(round(test_loss,5)),str(lr),str(s),str(mw),str(multi),str(sig),h5_name,time.strftime("%h/%d/%m/%Y")]

    with open(dest_path+'cifar_monogenic_haze'+'.csv', 'a') as f:
            writer = csv.writer(f, delimiter='\t',lineterminator='\n')
            writer.writerow(row_n)
            f.close()

end_time = time.monotonic()
#end_time = time.time()
print(end_time)
from keras.preprocessing import image
from PIL import Image

# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer

# Define a flask app
app = Flask(__name__)

# Model saved with Keras model.save()
MODEL_PATH = 'model/malariaModel.h5'

# Load your trained model
model = load_model(MODEL_PATH)
# model._make_predict_function()  # Necessary
print('Model loaded. Start serving...')

print('Model loaded. Check http://127.0.0.1:5000/')


def model_predict(img_path, model):
    img = image.load_img(img_path, target_size=(130, 130))

    # Preprocessing the image
    x = image.img_to_array(img)
    # x = np.true_divide(x, 255)
    x = np.expand_dims(x, axis=0)

    # Be careful how your trained model deals with the input
    # Saving evolution history of epochs in this fold
    f = open("%s/history_fold_%d.txt" % (directory, fold), 'w')
    f.write("best_epoch: %d\n" % best_epoch)
    f.write(
        "epoch,training_accuracy,training_loss,validation_accuracy,validation_loss\n"
    )
    for i in range(MAX_EPOCHS):
        f.write("%d,%f,%f,%f,%f\n" %
                (i, epoch_history['acc'][i], epoch_history['loss'][i],
                 epoch_history['val_acc'][i], epoch_history['val_loss'][i]))
    f.close()

    # load the best model saved on disk
    del model
    model = load_model("%s/model_fold_%d.h5" % (directory, fold))

    evaluation = model.evaluate(x=x_test, y=y_test)
    logging.info("Accuracy: %f" % evaluation[1])

    prediction = model.predict(x_test)

    # save predictions to disk
    test_indexes = test_indexes.reshape(test_indexes.shape[0], 1)
    tweet_ids = data_set[:, DATA_SET_USER_ID][test_indexes]
    true_labels = np.asarray(y_corpus_raw, dtype=int)[test_indexes]
    class_1 = prediction[:, 2]
    class_2 = prediction[:, 1]
    class_3 = prediction[:, 0]
    output = np.append(tweet_ids, true_labels, axis=1)
    output = np.append(output,
예제 #40
0
import numpy as np
np.random.seed(1337)  # for reproducibility
from keras.utils import np_utils
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Flatten,Dropout
from keras.optimizers import Adam
import cv2
model = Sequential()
model = load_model('my_model.h5')


X_test = []
y_test = []
fp = open('little/test.txt', "r")

line = fp.readline()
while line:
    lab = np.zeros(15,dtype = np.float32)
    mid = line.find(".jpg")+4
    lab[int(line[mid:len(line)])] = 1
    img = cv2.imread(line[0:mid])
    img = cv2.resize(img,dsize=(160,120))
    X_test.append(img)
    y_test.append(lab)
    line = fp.readline()
X_test = np.asarray(X_test,dtype=np.float32)
y_test = np.asarray(y_test)

X_test = X_test.reshape(-1, 3,160, 120)/255.
y_test = y_test.reshape(-1, 15)
예제 #41
0
 def load_model(self):
     self.online_model = load_model(self.name)
예제 #42
0
filelist = []
labels = []

for file in listdir('./encoded/NORMAL'):
    filelist.append('./encoded/NORMAL/{}'.format(file))
    labels.append(0)

for file in listdir('./encoded/PNEUMONIA'):
    filelist.append('./encoded/PNEUMONIA/{}'.format(file))
    labels.append(1)

generator = DataGenerator(filelist, labels)

model = load_model('./model.h5',
                   custom_objects={
                       'myDense': myDense,
                       'myConv2d': myConv2d,
                       'maxpool': maxpool
                   })

probs = model.evaluate_generator(generator)

print("The model had an accuracy of {}% with a loss of {}.".format(
    probs[1] * 100, probs[0]))

yhat_probs = model.predict_generator(generator, verbose=0)

yhat_probs = yhat_probs[:, 0]

yhat_probs = yhat_probs.round().astype(int)

cm = ConfusionMatrix(labels, yhat_probs)
예제 #43
0
LEARNING_RATE = 0.001
eps = float(sys.argv[1])
directions = [Direction.North, Direction.South, Direction.East, Direction.West, Direction.Still]
actions = np.array([0,1,2,3,4])

ecart_type = 12
reward_gaussian_weight = np.zeros(128)
for i in range(128):
    reward_gaussian_weight[i] = 3*exp(-i**2/(2*ecart_type**2))


os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)

if loadmodel:
   model = load_model('my_model.h5', custom_objects={'huber_loss': huber_loss, 'tf': tf})
else:
   model = create_model(443, 5, LEARNING_RATE, 'my_model.h5')

secondDropoff = False
total_halite = 0
total_reward = 0
counter = 0
hash_dim_turn = {32:400, 40:425, 48:450, 56:475, 64:500}
np.random.seed()

# This game object contains the initial game state.
game = hlt.Game()
game.ready("DQN-PER-Para3-GoodMove")
""" <<<Game Loop>>> """
while True:
예제 #44
0
파일: model.py 프로젝트: g-e0s/recsys
 def load(self, path):
     self.model = m.load_model(path)
예제 #45
0
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from keras.models import load_model
import random as r
import pickle as p
import numpy as np
from PIL import Image


model = load_model('Spiral_Model-84.38-1020053228.h5')
data = p.load(open("train_model/picarray.pickle", "rb"))
repeats = int(input("How many tests? "))
randpics=[]
for repeat in range(1,repeats+1):
    randpics.append(data[r.randint(0, len(data)-1)])
predictions = model.predict(np.array([pic[0] for pic in randpics]))
#randpics = [[[value*255 for value in row] for row in randpic[0]] for randpic in randpics]]
guesses=[]
guesscorrect = 0

for i in range(len(randpics)):
    print ("\nTEST",i+1,"/",repeats)
    img = Image.fromarray(np.uint8(randpics[i][0][0]*255), mode="L")#(randpics[0][0])*255)
    img.show()
    guess = True if int(input("Parkinsons? ")) else False
    empty_ids_df = pd.DataFrame(most_senti,
                                index=phrase_id_list,
                                columns=["Sentiment"])
    submission_df = submission_df.append(empty_ids_df)
    submission_df.index.name = "PhraseId"
    submission_df.sort_index(inplace=True)
    # submission_df.to_csv("../data/output/submissions/lstm_submission_matrix_fill.csv")
    submission_df.to_csv(
        "../data/output/submissions/sk_rf_submission_matrix_fill.csv")


if __name__ == "__main__":
    # For reproducibility
    np.random.seed(2)
    tf.set_random_seed(2)

    # X_train, X_val, X_test, X_test_id, y_train, y_val = gen_train_val_test_data()  # vector
    X_train, X_val, X_test, y_train, y_val = gen_train_val_test_matrix(
    )  # matrix
    print(
        f"X_train.shape:{X_train.shape}\nX_val.shape:{X_val.shape}\nX_test.shape:{X_test.shape}\ny_train.shape:{y_train.shape}\ny_val.shape:{y_val.shape}\n"
    )
    """
    model_train_val(X_train, X_val, y_train, y_val)
    # plot_hist()
    """

    model_name = "best_model_05_0.05.hdf5"
    model = load_model(f"../data/output/models/{model_name}")
    model_predict(model, X_test, X_val, y_val)
예제 #47
0
                                                         self.xai_rnn.real_sp,
                                                         1]

        return test_data, P_test_1, P_test_2


parser = argparse.ArgumentParser(usage="it's usage tip.",
                                 description="help info.")
parser.add_argument("--f",
                    type=int,
                    default=5,
                    required=True,
                    help="the select feature number.")
args = parser.parse_args()
print '[Load model...]'
model = load_model('model/O1_Bi_Rnn.h5')
PATH_TEST_DATA = 'data/elf_x86_32_gcc_O1_test.pkl'
n_fea_select = args.f
#PATH_TEST_DATA = 'elf_x86_32_gcc_O0_test.pkl'
print '[Load data...]'
data = pickle.load(file(PATH_TEST_DATA))
print "len(data):", len(data)
data_num = len(data[0])
print 'Data_num:', data_num
seq_len = len(data[0][0])
print 'Sequence length:', seq_len

### Padding sequence ....
x_test = pad_sequences(data[0],
                       maxlen=seq_len,
                       dtype='int32',
예제 #48
0
plt.legend()
plt.show()

#GRÁFICO DE PERDA DE VALIDAÇÃO E TREINO
plt.plot(epochs, loss, 'b', label='Treino')
plt.plot(epochs, val_loss, 'r', label='Validação')
plt.title('Perda por época')
plt.xlabel('épocas')
plt.ylabel('porcentagem')
plt.grid(True)
plt.legend()
plt.show()

#CARREGA MODELO TREINADO

model = load_model('modelo.h5')

#MOSTRA COMO A REDE ESTÁ CONFIGURADA
sumario_modelo = model.summary()

#DIRETÓRIO DAS IMAGENS QUE DESEJA USAR COMO TESTE
imagens_teste = [
    'C:\\Users\\Brennus\\Downloads\\Dataset_Teste\\{}'.format(i)
    for i in os.listdir(diretorio_teste)
]

#NÚMERO DE IMAGENS PRESENTE DENTRO DE imagens_teste
ImagensParaAvaliar = 40

#NÚMERO DE COLUNAS NA PLOTAGEM DAS IMAGENS DE TESTE
coluna_imagens_teste = 5
import nltk, pickle, json, random
from nltk.stem import WordNetLemmatizer
from keras.models import load_model
import numpy as np

model = load_model('chatbotmodel.h5')
lemmatizer = WordNetLemmatizer()
intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))


def clean_up_sentence(sentence):
    # tokenize the pattern - split words into array
    sentence_words = nltk.word_tokenize(sentence)
    # stem each word - create short form for word
    sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
    return sentence_words

# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=True):
    # tokenize the pattern
    sentence_words = clean_up_sentence(sentence)
    # bag of words - matrix of N words, vocabulary matrix
    bag = [0]*len(words)  
    for s in sentence_words:
        for i,w in enumerate(words):
            if w == s: 
                # assign 1 if current word is in the vocabulary position
                bag[i] = 1
                if show_details:
예제 #50
0
#IMPORT LIBRARIES
import numpy as np
import cv2
from keras.models import load_model
import imutils
from imutils.contours import sort_contours

model = load_model('./model_jd_96.h5')


def prep_labels():
  labelNames = "0123456789"
  labelNames += "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
  #labelNames += "abcdefghijklmnopqrstuvwxyz"
  labelNames = [l for l in labelNames]
  return labelNames


def get_image_predict(image_path):
  image = cv2.imread(image_path)
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  blurred = cv2.GaussianBlur(gray, (5, 5), 0)
  # perform edge detection, find contours in the edge map, and sort the
  # resulting contours from left-to-right
  edged = cv2.Canny(blurred, 30, 150)
  cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
    cv2.CHAIN_APPROX_SIMPLE)
  cnts = imutils.grab_contours(cnts)
  cnts = sort_contours(cnts, method="left-to-right")[0]
  # initialize the list of contour bounding boxes and associated
  # characters that we'll be OCR'ing
예제 #51
0
keras_model = best_dnn.model_
history = keras_model.fit(X_train_ori,
                          y_train_ori,
                          epochs=all_epochs,
                          batch_size=all_batches,
                          verbose=0)
keras_model.save(model_name)
with open(model_name + ".pickle", 'wb') as file_pi:
    pickle.dump(history.history, file_pi)
# Delete the existing model.
del keras_model

# TESTING PHASE
X_test = np.load(X_test_name)
y_test = np.load(y_test_name)
model = load_model(model_name)
y_pred = 1 * (model.predict(X_test) > 0.5)
y_pred = y_pred.flatten()
conf_matrix = confusion_matrix(y_test, y_pred)
accuracy = accuracy_score(y_test, y_pred)
print("\n Accuracy on test set: " + str(accuracy))
print(conf_matrix)

# Testing on training to check learning
y_pred = 1 * (model.predict(X_train) > 0.5)
y_pred = y_pred.flatten()
conf_matrix = confusion_matrix(y_train, y_pred)
accuracy = accuracy_score(y_train, y_pred)
print("\nAccuracy on train set: " + str(accuracy))
print(conf_matrix)
NUM_CLASSES = test_segs.shape[1] + 1                 # update for required number of classes

# boundary names should be a list of strings with length = NUM_CLASSES - 1
# class names should be a list of strings with length = NUM_CLASSES
AREA_NAMES = ["area_" + str(i) for i in range(NUM_CLASSES)]
BOUNDARY_NAMES = ["boundary_" + str(i) for i in range(NUM_CLASSES - 1)]
PATCH_CLASS_NAMES = ["BG"]
for i in range(len(BOUNDARY_NAMES)):
    PATCH_CLASS_NAMES.append(BOUNDARY_NAMES[i])

GSGRAD = 1
CUSTOM_OBJECTS = dict(list(custom_losses.custom_loss_objects.items()) +
                      list(custom_metrics.custom_metric_objects.items()))

eval_imdb = imdb.ImageDatabase(images=test_images, labels=None, patch_labels=test_patch_labels, segs=test_segs, image_names=test_image_names,
                               boundary_names=BOUNDARY_NAMES, area_names=AREA_NAMES,
                               fullsize_class_names=AREA_NAMES, patch_class_names=PATCH_CLASS_NAMES, num_classes=NUM_CLASSES, name=TEST_DATA_NAME, filename=TEST_DATA_NAME, mode_type='fullsize')

batch_size = 992    # CURRENTLY THIS NEEDS TO BE CHOSEN AS A VALUE WHICH IS A FACTOR OF THE AREA (IN PIXELS) OF THE FULL IMAGE (i.e. 992 is a factor of a 761856 (1536x496) pixel image [992 x 768 = 761856])
network_folder = parameters.RESULTS_LOCATION + "\\2021-04-21 14_35_20 Complex CNN 32x32 myexampledata\\" # name of network folder for which to evaluate model
model_name = "model_epoch06.hdf5"   # name of model file inside network folder to evaluate

loaded_model = load_model(network_folder + "/" + model_name, custom_objects=CUSTOM_OBJECTS)

aug_fn_arg = (aug.no_aug, {})

eval_helper.evaluate_network(eval_imdb, model_name, network_folder,
                             batch_size, save_parameters.SaveParameters(pngimages=True, raw_image=True, temp_extra=True, boundary_maps=True, area_maps=True, comb_area_maps=True, seg_plot=True),
                             gsgrad=GSGRAD, aug_fn_arg=aug_fn_arg, eval_mode='both', boundaries=True, boundary_errors=True, dice_errors=False, col_error_range=None, normalise_input=True, transpose=False)

예제 #53
0
		print (test_image.shape)
	else:
		test_image= np.expand_dims(test_image, axis=0)
		print (test_image.shape)
#%%
print((model.predict(test_image)))
print(model.predict_classes(test_image))
#%%
#saving
from keras.models import model_from_json
from keras.models import load_model

model_json = model.to_json()
with open("model.json", "w") as json_file:
    json_file.write(model_json)

model.save_weights("model.h5")
print("Saved model to disk")


json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)

loaded_model.load_weights("model.h5")
print("Loaded model from disk")

model.save('model.hdf5')
loaded_model=load_model('model.hdf5')
예제 #54
0
def Single_main(Base_dir,
                Docx,
                DocY,
                epoch=3000,
                batch_size=50,
                TF=False,
                mode=None):
    in_shape = (None, None, 1)
    #    Four_InputX=Docx['4by4_data']
    #    Four_InputY=DocY['4by4_data']
    Five_InputX = Docx['5by5_data']
    Five_InputY = DocY['5by5_data']
    #    Six_InputX=Docx['6by6_data']
    #    Six_InputY=DocY['6by6_data']

    if TF == False:
        print("here")
        Network = Sequential()
        print("here1")
        ModelBuild(Network, in_shape)
        print("here11")
        configure(Network)
    else:
        H5_file = Base_dir + '/predict_h5file/5by5_ConcatNet.h5'
        Network = load_model(H5_file)

    timer = ElapsedTimer()

    history = LossHistory()
    print('/*******************************************************/\n')
    print(' Now we begin to train this model.\n')
    print('/*******************************************************/\n')
    if mode == '4by4':
        Network.fit(Four_InputX,
                    Four_InputY,
                    epochs=epoch,
                    batch_size=batch_size,
                    validation_split=0.1,
                    shuffle=True)
    elif mode == '5by5':
        Network.fit(Five_InputX,
                    Five_InputY,
                    epochs=epoch,
                    batch_size=batch_size,
                    validation_split=0.1,
                    shuffle=True,
                    callbacks=[history])
    elif mode == '6by6':
        Network.fit(Six_InputX,
                    Six_InputY,
                    epochs=epoch,
                    batch_size=batch_size,
                    validation_split=0.1,
                    shuffle=True)
# =============================================================================
    print('/*******************************************************/')
    print('         finished!!  ')
    timer.elapsed_time()

    print('/*******************************************************/\n')
    return Network, history
예제 #55
0
def remove_noisy(x_test_noisy):
    autoencoder = load_model('autoencoder.h5')
    decoded_imgs = autoencoder.predict(x_test_noisy)
    return decoded_imgs
예제 #56
0
def main(Base_dir, Docx, DocY, epoch=3000, batch_size=50, TF=False):
    in_shape = (None, None, 1)
    History_4by4 = []
    History_5by5 = []
    History_6by6 = []
    #    Iteration_num=int(len(Docx['6by6_data'])/batch_size);
    temp_len = max(len(Docx['4by4_data']), len(Docx['5by5_data']),
                   len(Docx['6by6_data']))
    Iteration_num = int(temp_len // batch_size)
    Four_num = int(len(Docx['4by4_data']) // batch_size)
    Five_num = int(len(Docx['5by5_data']) // batch_size)
    Six_num = int(len(Docx['6by6_data']) // batch_size)
    Four_InputX = Docx['4by4_data']
    Four_InputY = DocY['4by4_data']
    Five_InputX = Docx['5by5_data']
    Five_InputY = DocY['5by5_data']
    Six_InputX = Docx['6by6_data']
    Six_InputY = DocY['6by6_data']
    ###########################################################################################################
    if TF == False:
        Network = Sequential()
        ModelBuild(Network, in_shape)
        configure(Network)
    else:
        H5_file = Base_dir + '/predict_h5file/5by5-1_CNN.h5'
        Network = load_model(H5_file)
###########################################################################################################
    timer = ElapsedTimer()
    print('/*******************************************************/\n')
    print(' Now we begin to train this model.\n')
    print('/*******************************************************/\n')
    ###########################################################################################################

    for i in range(epoch):
        for j in range(Iteration_num):
            if (j + 1) * batch_size > len(Docx['4by4_data']):
                j0 = j % Four_num
                History_4by4.append(
                    Network.train_on_batch(
                        Four_InputX[j0 * batch_size:(j0 + 1) *
                                    batch_size, :, :, :],
                        Four_InputY[j0 * batch_size:(j0 + 1) * batch_size, :]))
            else:
                History_4by4.append(
                    Network.train_on_batch(
                        Four_InputX[j * batch_size:(j + 1) *
                                    batch_size, :, :, :],
                        Four_InputY[j * batch_size:(j + 1) * batch_size, :]))
            if (j + 1) * batch_size > len(Docx['5by5_data']):
                j0 = j % Five_num
                History_5by5.append(
                    Network.train_on_batch(
                        Five_InputX[j0 * batch_size:(j0 + 1) *
                                    batch_size, :, :, :],
                        Five_InputY[j0 * batch_size:(j0 + 1) * batch_size, :]))
            else:
                History_5by5.append(
                    Network.train_on_batch(
                        Five_InputX[j * batch_size:(j + 1) *
                                    batch_size, :, :, :],
                        Five_InputY[j * batch_size:(j + 1) * batch_size, :]))
            if (j + 1) * batch_size > len(Docx['6by6_data']):
                j0 = j % Six_num
                History_6by6.append(
                    Network.train_on_batch(
                        Six_InputX[j0 * batch_size:(j0 + 1) *
                                   batch_size, :, :, :],
                        Six_InputY[j0 * batch_size:(j0 + 1) * batch_size, :]))
            else:
                History_6by6.append(
                    Network.train_on_batch(
                        Six_InputX[j * batch_size:(j + 1) *
                                   batch_size, :, :, :],
                        Six_InputY[j * batch_size:(j + 1) * batch_size, :]))
        if (i % 20 == 0):
            print('In iteration ' + str(i) +
                  ', The Training detail is :  4by4: ' + str(History_4by4[i]))
            print('In iteration ' + str(i) +
                  ', The Training detail is :  5by5: ' + str(History_5by5[i]))
            print('In iteration ' + str(i) +
                  ', The Training detail is :  6by6: ' + str(History_6by6[i]) +
                  '\n')
    if TF == True:
        h5_dir = Base_dir + '/predict_h5file/total_TF_VCN.h5'
    elif TF == False:
        h5_dir = Base_dir + '/predict_h5file/total_Non-TF_VCN.h5'
    Network.save(h5_dir)
    timer.elapsed_time()
    print('/*******************************************************/')
    print('         finished!!  ')
    print('/*******************************************************/\n')
    return Network
예제 #57
0
from io import BytesIO  
import matplotlib.pyplot as plt  
  
from keras.preprocessing import image  
from keras.models import load_model  
from keras.applications.inception_v3 import preprocess_input, decode_predictions  
	  
import sys

def out_to_file():
    file = open("out.txt", 'w')
    sys.stdout = file #重定向到文件
# model = load_model(r"C:\Users\zbh11\zhongkong_ft.h5")  

out_to_file()
model = load_model(r"C:/Users/江淼/Desktop/2019train/model/zhongkong_ft_2.h5")   
  
def predic(i):  
    img_path = "C:/Users/江淼/Desktop/2019train/data/test/class" + str(i) + ".jpg"  
    # img_path = "C:/Users/江淼/Desktop/2019train/data/test/class2.jpg"  
    # img_path = "F:\\SuperMarket\\SuperMarker2.0\\" + str(i) +".jpg"  
    img = image.load_img(img_path, target_size=(299, 299))  
    x = image.img_to_array(img)  
    x = np.expand_dims(x, axis=0)
    # print(x)  
    x = preprocess_input(x)  
    # print("--------------------preprocessed!----------------")
    # print(x)
    # print(" ")
    preds = model.predict(x)  
    preds=preds.tolist()  
예제 #58
0
import imutils
import matplotlib.pyplot as plt
from sklearn.preprocessing import Normalizer, LabelEncoder
import time

# Load anh
image = Image.open('test.jpg')
img = cv2.imread('test.jpg')
# Chuyen sang anh mau neu can
image = image.convert('RGB')
# Chuyen du lieu thanh mang
pixels = np.asarray(image)
# Tao bo detector
detector = MTCNN()
# Load FaceNet model
model = load_model('facenet_keras.h5')
# Load model da duoc train
clf = pickle.loads(open('output/clf.pkl', "rb").read())
lb = pickle.loads(open('output/lb.pkl', "rb").read())


# Bo chuan hoa
def normalization(vector):
    a = 0
    x = []
    for i in range(len(vector)):
        a = np.sqrt(a**2 + vector[i]**2)
    for i in range(len(vector)):
        vector[i] = vector[i] / a
    return vector
예제 #59
0
def train_model():
    print('Start: Training Model')
    root_path = os.getcwd()

    train_path = os.path.join(root_path, *TRAINING_SPLIT_DATA_PATH)
    test_path = os.path.join(root_path, *TEST_SPLIT_DATA_PATH)

    if len(VALIDATION_SPLIT_DATA_PATH) > 0:
        val_path = os.path.join(root_path, *VALIDATION_SPLIT_DATA_PATH)

    history_name = '{}_ep{}_bs{}.csv'.format(HISTORY_PREFIX, NUM_EPOCHS, BATCH_SIZE)

    history_file = os.path.join(root_path, *HISTORY_TRAINING_PATH)
    history_file = os.path.join(history_file, history_name)

    print('Preparing Data')

    # Get Training Data
    print('Retrieving Data for Training')
    train_features = None
    train_label = None
    data_retrieved = False
    for file_name in os.listdir(train_path):
        file_path = os.path.join(train_path, file_name)
        if os.path.isfile(file_path):
            if DATA_FILE == file_name[:-6]:
                data_retrieved = True
                np_file = np.load(file_path)

                if train_features is None:
                    train_features = np_file['dataset']
                    train_label = np_file['labels']
                else:
                    train_features = np.concatenate((train_features, np_file['dataset']), axis=0)
                    train_label = np.concatenate((train_label, np_file['labels']), axis=0)

    if not data_retrieved:
        print('Retrieved No Training Data')

    if len(VALIDATION_SPLIT_DATA_PATH) == 0:
        print('Splitting Training Dataset Into Training and Validation')
        x_train, x_val, y_train, y_val = data_manager.train_val_split(train_features, train_label,
                                                                      val_ratio=VALIDATION_RATIO)
    else:
        print('Retrieving Data for Validation')
        x_train = train_features
        y_train = train_label

        x_val = None
        y_val = None
        data_retrieved = False
        for file_name in os.listdir(val_path):
            file_path = os.path.join(val_path, file_name)
            if os.path.isfile(file_path):
                if DATA_FILE == file_name[:-6]:
                    data_retrieved = True
                    np_file = np.load(file_path)

                    if x_val is None:
                        x_val = np_file['dataset']
                        y_val = np_file['labels']
                    else:
                        x_val = np.concatenate((x_val, np_file['dataset']), axis=0)
                        y_val = np.concatenate((y_val, np_file['labels']), axis=0)

        if not data_retrieved:
            print('Retrieved No Validation Data')

    # Get Testing Dataset
    print('Retrieving Data for Testing')
    test_features = None
    test_label = None
    data_retrieved = False
    for file_name in os.listdir(test_path):
        file_path = os.path.join(test_path, file_name)
        if os.path.isfile(file_path):
            if DATA_FILE == file_name[:-6]:
                data_retrieved = True
                np_file = np.load(file_path)
                if test_features is None:
                    test_features = np_file['dataset']
                    test_label = np_file['labels']
                else:
                    test_features = np.concatenate((test_features, np_file['dataset']), axis=0)
                    test_label = np.concatenate((test_label, np_file['labels']), axis=0)

    if not data_retrieved:
        print('Retrieved No Testing Data')

    # Use validation as test data
    x_test = test_features
    y_test = test_label

    print('Preparing_Model')

    model_path = os.path.join(root_path, *MODELS_PATH)
    model_temp_path = os.path.join(model_path, TEMP_MODEL_DIR)

    if not os.path.exists(model_temp_path):
        os.makedirs(model_temp_path)

    if len(PRE_TRAINED_MODEL) == 0:
        print('Creating New Model')
        model_input = Input(INPUT_SHAPE)
        x = inception.build_inception_v4(model_input, enable_reduction=ENABLE_MODEL_REDUCTION)
        model = Model(model_input, x, name='inception_v4')
        if len(LOAD_WEIGHTS_NEW_MODEL) > 0:
            print('Loading Weights From Prior Training Error')
            weights2load = os.path.join(root_path, *LOAD_WEIGHTS_NEW_MODEL)
            model.load_weights(weights2load)
    else:
        print('Loading Existing Model')
        pre_model_path = os.path.join(root_path, *PRE_TRAINED_MODEL)
        cus_obj = None
        if LOSS_FUNCTION == 'root_mean_squared_error':
            cus_obj = {'root_mean_squared_error': root_mean_squared_error}
        model = load_model(pre_model_path, custom_objects=cus_obj)

    if LOSS_FUNCTION == 'root_mean_squared_error':
        model.compile(loss=root_mean_squared_error, optimizer=LEARNING_OPTIMIZER, metrics=['mae'])
    else:
        model.compile(loss=LOSS_FUNCTION, optimizer=LEARNING_OPTIMIZER, metrics=['mae'])

    print(model.summary())

    print('Evaluating Model')

    # Write Prediction Log
    def log_preds(y, y_, file):
        with codecs.open(file, "w", encoding="utf-8") as pred_csv:
            pred_csv.write('y_true,y_pred\n')
            for l in range(len(y)):
                pred_csv.write('{},{}\n'.format(y[l], y_[l]))

    # Write Summary Log
    log_file = os.path.join(root_path, *SUMMARY_TRAINING_LOG)
    if not os.path.exists(log_file):
        with codecs.open(log_file, "a", encoding="utf-8") as log_csv:
            log_csv.write(
                'model_version,i,nb_epochs,batch_size,nb_rotations,mirror_vertical,mirror_horizontal,model_reduction,test_rmse,test_pk\n')

    # Create Directory to save all top k models
    save_model_path = os.path.join(model_path, 'top_models-test_only_ep({})-bs({})-r({})-rm({})-rd({})'.format(NUM_EPOCHS,
                                                                                                     BATCH_SIZE,
                                                                                                     NB_ROTATION,
                                                                                                     ENABLE_HORIZONTAL_MIRROR or ENABLE_VERTICAL_MIRROR,
                                                                                                     ENABLE_MODEL_REDUCTION))
    if not os.path.exists(save_model_path):
        os.makedirs(save_model_path)

    # Create Directory to save all prediction logs

    pred_path = os.path.join(root_path, *HISTORY_TRAINING_PATH)
    pred_path = os.path.join(pred_path,
                             '{}_test_only_ep{}_bs{}_r({})_rm({})_mr({})'.format(PREDICTION_LOG, NUM_EPOCHS, BATCH_SIZE,
                                                                       NB_ROTATION,
                                                                       ENABLE_HORIZONTAL_MIRROR or ENABLE_VERTICAL_MIRROR,
                                                                       ENABLE_MODEL_REDUCTION))
    if not os.path.exists(pred_path):
        os.makedirs(pred_path)

    model_path = os.path.join(root_path, *MODELS_PATH)
    model_temp_path = os.path.join(model_path, TEMP_MODEL_DIR)

    if not os.path.exists(model_temp_path):
        os.makedirs(model_temp_path)

    # Evaluate Best Validation Models
    for file_name in os.listdir(model_temp_path):
        file_path = os.path.join(model_temp_path, file_name)
        if os.path.isfile(file_path):

            if 'rmse' in file_name:
                itr = int(file_name[22])
                temp_best_val_weights = os.path.join(model_temp_path, 'temp_best_rmse_weights{}.hdf5'.format(itr))
                model.load_weights(temp_best_val_weights)
                val_preds = model.predict(x_test)
                val_preds = val_preds.reshape(val_preds.shape[0])
                val_preds = np.nan_to_num(val_preds)
                p_k = prediction_probability.predprob(y_test[:, 2], val_preds)
                test_rmse_val = sqrt(mean_squared_error(y_test[:, 2], val_preds))
                print('Test P_K Score (Val Model {}):'.format(itr))
                val_str_pk = '{:.6f}'.format(p_k)
                print(val_str_pk)

                best_model_name = 'model_rmse__e({})_bs({})_mr({})_i({})_test-rmse({:.6f})_test-pk({}).hdf5'.format(
                    NUM_EPOCHS,
                    BATCH_SIZE,
                    ENABLE_MODEL_REDUCTION,
                    itr,
                    test_rmse_val,
                    val_str_pk)

                model.save(os.path.join(save_model_path, best_model_name))

                pred_name_file = '{}_val_ep{}_bs{}_mr({})_i({}).csv'.format(PREDICTION_LOG, NUM_EPOCHS, BATCH_SIZE,
                                                                            ENABLE_MODEL_REDUCTION, itr)
                pred_file = os.path.join(pred_path, pred_name_file)
                log_preds(y_test[:, 2], val_preds, pred_file)

                with codecs.open(log_file, "a", encoding="utf-8") as log_csv:
                    log_csv.write(
                        'rmse,{},{},{},{},{},{},{},{:.6f},{}\n'.format(NUM_EPOCHS, itr, BATCH_SIZE,
                                                                                            NB_ROTATION,
                                                                                            ENABLE_VERTICAL_MIRROR,
                                                                                            ENABLE_HORIZONTAL_MIRROR,
                                                                                            ENABLE_MODEL_REDUCTION,
                                                                                            test_rmse_val, val_str_pk))
            elif 'avrg' in file_name:
                itr = int(file_name[22])
                temp_best_avg_weights = os.path.join(model_temp_path,
                                                     'temp_best_avrg_weights{}.hdf5'.format(itr))
                model.load_weights(temp_best_avg_weights)
                avg_preds = model.predict(x_test)
                avg_preds = avg_preds.reshape(avg_preds.shape[0])
                avg_preds = np.nan_to_num(avg_preds)
                p_k = prediction_probability.predprob(y_test[:, 2], avg_preds)
                test_rmse_avg = sqrt(mean_squared_error(y_test[:, 2], avg_preds))
                print('Test P_K Score (Avg Model {}):'.format(itr))
                avg_str_pk = '{:.6f}'.format(p_k)
                print(avg_str_pk)

                best_model_name = 'model_average_e({})_bs({})_mr({})_i({})_test-rmse({:.6f})_test-pk({}).hdf5'.format(
                    NUM_EPOCHS,
                    BATCH_SIZE,
                    ENABLE_MODEL_REDUCTION,
                    itr,
                    test_rmse_avg,
                    avg_str_pk,)

                model.save(os.path.join(save_model_path, best_model_name))

                pred_name_file = '{}_avg_ep{}_bs{}_mr({})_i({}).csv'.format(PREDICTION_LOG, NUM_EPOCHS, BATCH_SIZE,
                                                                            ENABLE_MODEL_REDUCTION, itr)
                pred_file = os.path.join(pred_path, pred_name_file)
                log_preds(y_test[:, 2], avg_preds, pred_file)

                with codecs.open(log_file, "a", encoding="utf-8") as log_csv:
                    log_csv.write(
                        'average,{},{},{},{},{},{},{},{:.6f},{}\n'.format(NUM_EPOCHS, itr, BATCH_SIZE,
                                                                                               NB_ROTATION,
                                                                                               ENABLE_VERTICAL_MIRROR,
                                                                                               ENABLE_HORIZONTAL_MIRROR,
                                                                                               ENABLE_MODEL_REDUCTION,
                                                                                               test_rmse_avg, avg_str_pk))

    print('Done: Training Model')
예제 #60
0
    smooth = 1.0
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) +
                                           smooth)


img_train = np.load('train_data.npy')
mask_train = np.load('train_mask.npy')

img_test = np.load('test_data.npy')
mask_test = np.load('test_mask.npy')

print("Shape of Image Test")
print(img_test.shape)

model = load_model('model-lv_segmentation2.h5')
model.save_weights('t_lv.h5')
with open('model_architecture.json', 'w') as f:
    f.write(model.to_json())

with open('model_architecture.json', 'r') as f:
    model_type1 = model_from_json(f.read())

with open('model_architecture.json', 'r') as f:
    model_type2 = model_from_json(f.read())

model_type1.load_weights('t_lv.h5')
model_type2.load_weights('t_my.h5')

model_type1.compile(optimizer=Adam(lr=1e-5),
                    loss='binary_crossentropy',