def visualise_with_quiver(model,
                          input_images='../data/',
                          class_type='age',
                          port=5000):
    # classes = np.array(['1-15', '16-20', '21-25', '26-30', '31-35', '36-40', '40-45', '46-50', '51-55', '56-100'])
    # if class_type != 'age':
    #   classes = np.array(['female', 'male'])
    classes = [
        '1-15', '16-20', '21-25', '26-30', '31-35', '36-40', '40-45', '46-50',
        '51-55', '56-100'
    ]
    if class_type != 'age':
        classes = ['female', 'male']

    print("Starting Quiver on port %i" % port)

    from quiver_engine import server
    server.launch(
        model,  # a Keras Model
        classes,  # list of output classes from the model to present (if not specified 1000 ImageNet classes will be used)
        10,  # number of top predictions to show in the gui (default 5)

        # where to store temporary files generated by quiver (e.g. image files of layers)
        temp_folder='./tmp',

        # a folder where input images are stored
        input_folder=input_images,

        # the localhost port the dashboard is to be served on
        port=port)
Ejemplo n.º 2
0
def main(model_path, temp_folder_path, input_folder_path, classes, num_predictions):
    model = get_model(model_path)

    server.launch(
        model=model,  # a Keras Model
        classes=classes,
        top=num_predictions,  # number of top predictions to show in the gui (default 5)
        temp_folder=temp_folder_path,
        input_folder=input_folder_path,
        port=5000 # the localhost port the dashboard is to be served on
    )
Ejemplo n.º 3
0
    def start_server(self):
        server.launch(
            self.model,  # a Keras Model

            # where to store temporary files generatedby quiver (e.g. image files of layers)
            temp_folder='./tmp',

            # a folder where input images are stored
            input_folder='../../data',

            # the localhost port the dashboard is to be served on
            port=5001)
Ejemplo n.º 4
0
def explore_run(args):
    import tempfile
    from quiver_engine import server
    from . import predict

    model, meta = predict.load(args.modeldir)
    server.launch(
        model,
        classes=meta["classes"],
        input_folder=args.imagedir,
        temp_folder=tempfile.mkdtemp(prefix="mila_quiver_"),
        std=[255, 255, 255],
    )  # This is a bit of a hack to make quiver scale the images
Ejemplo n.º 5
0
def main():
    parser = ArgumentParser()
    parser.add_argument("-m", "--model", dest="model")
    parser.add_argument("-q", "--quiver", action="store_true", dest="quiver")
    args = parser.parse_args()

    model_type = ""
    if args.model:
        model_type = args.model

    model = retrieve_option_model(model_type)
    model.load_model()

    if args.quiver:
        server.launch(
            model.model,
            input_folder="./examples/test_cut",
        )
Ejemplo n.º 6
0
def runserver(model_path, input_images_path):
    try:
        model = keras.models.load_model(model_path)
    except ValueError:
        new_model_path = os.path.abspath(model_path) + ".edited"
        shutil.copy(model_path, new_model_path)

        f = h5py.File(new_model_path, 'r+')
        del f['optimizer_weights']
        f.close()

        model = keras.models.load_model(new_model_path)

    server.launch(
        model,  # a Keras Model
        classes=
        'regression',  # list of output classes from the model to present (if not specified 1000 ImageNet classes will be used)

        # a folder where input images are stored
        input_folder=input_images_path,

        # the localhost port the dashboard is to be served on
        port=5000)
Ejemplo n.º 7
0
# -*- coding: utf-8 -*-
from keras.applications.vgg16 import VGG16
from quiver_engine import server
model = VGG16()

server.launch(model,
              input_folder='./sample_images', temp_folder='./tmp', port=8000)
Ejemplo n.º 8
0
 def launch_server(self, imgFolder="GeneratedImages_Bin2"):
     names = ['bc', 'bj', 'bn', 'sa', 'bry', 'brb',  'cm', 'cst', 'cso', 'sl',\
              'cbp', 'brc', 'cd', 'ds', 'brp', 'sf1', 'sii1', 'siv1', 'sp1', 'sv1',\
              'cca', 'cch', 'cgr', 'cme', 'cpe', 'ahy', 'apacc', 'apr', 'apo', 'are']
     server.launch(self.model, classes=names, input_folder=imgFolder)
Ejemplo n.º 9
0
from models.model2_6_9172 import first_model
from quiver_engine import server

input_shape = (64, 64, 3)
model = first_model(input_shape, 6)
model.load_weights(
    'save_weights/model2_6/trash-model-weight-ep-176-val_loss-0.33-val_acc-0.92.h5'
)
server.launch(
    model,
    classes=['glass', 'cardboared', 'metai', 'paper', 'plasic', 'trash'],
    input_folder='hhh')
server.launch(model)
Ejemplo n.º 10
0
import numpy as np
import time
from keras.preprocessing.image import save_img
from keras.applications.vgg16 import VGG16
from keras.models import load_model
from keras import backend as K

from dataset_loader import DatasetLoader
from constants import *

# Dimensions of the generated pictures for each filter.
img_width = 32
img_height = 32

# Load dataset
# Comment/uncomment to select dataset to use
dataset = DatasetLoader()

# CK Extended no resize
dataset.ck_extended_load_from_save()
classes = CK_EXTENDED_EMOTIONS

# Load model
model = load_model(MODELS_PATH + 'model_ck_extended_inception_v3_1.h5')
print ('[+] Model loaded')
print (model.summary())

from quiver_engine.server import launch
launch(model, classes=classes, input_folder='./imgs')
model = None

parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
    'model',
    type=str,
    help='Path to model h5 file. Model should be on the same path.')
parser.add_argument(
    'image_folder',
    type=str,
    nargs='?',
    default='',
    help=
    'Path to image folder. This is where the images from the run will be saved.'
)
args = parser.parse_args()
#print(args)
# check that model Keras version is same as local Keras version
f = h5py.File(args.model, mode='r')
model_version = f.attrs.get('keras_version')
keras_version = str(keras_version).encode('utf8')

if model_version != keras_version:
    print('You are using Keras version ', keras_version,
          ', but the model was built using ', model_version)

model = load_model(args.model)

server.launch(model, temp_folder='features', input_folder='img', port=5000)
Ejemplo n.º 12
0
# quiver engine: https://keplr-io.github.io/quiver/

from keras.applications.vgg16 import VGG16
model = VGG16(weights='imagenet')

from quiver_engine.server import launch
launch(model, input_folder='./img', port=7000)

type(model)
Ejemplo n.º 13
0
tf.python.control_flow_ops = tf

from keras.models import Sequential, model_from_json, load_model
from keras.optimizers import Adam, SGD
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.preprocessing.image import *

from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split

import matplotlib.pyplot as plt
import tables
import sys

from quiver_engine import server

# -------------------------------------
# Compile and train the model
# -------------------------------------
model = load_model('model.h5')
#opt = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.summary()

# -------------------------------------
# Evaluate the trained model
# -------------------------------------
server.launch(model, input_folder='./quiver_img/IMG')
Ejemplo n.º 14
0
model.add(
    Convolution2D(nb_filters,
                  kernel_size[0],
                  kernel_size[1],
                  border_mode='valid',
                  activation='relu',
                  input_shape=input_shape))
model.add(
    Convolution2D(nb_filters,
                  kernel_size[0],
                  kernel_size[1],
                  activation='relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
              metrics=['accuracy'])

model.load_weights('MNIST_weights.h5')

# launching the visualization server
from quiver_engine import server
server.launch(model, temp_folder='./tmp', input_folder='./', port=7777)
# NOTES for running:
# Must run: pip install quiver-engine
# This script runs well from a .ipynb jupyter notebook, just put into a .py file for git sake.
# Model must be trained on .jpg and only .jpg files will load in the quiver visualization.
#

import tensorflow as tf
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.models import load_model
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.utils.vis_utils import plot_model
import keras.backend

from quiver_engine import server

model = load_model("senator_model.h5")
model.summary()

server.launch(model, temp_folder='./tmp', input_folder='./Images/Senators/')
Ejemplo n.º 16
0
# pip install quiver_engine
from quiver_engine import server

server.launch(model)  # localhost:5000
Ejemplo n.º 17
0
# Test quiver-time
import keras as ks
from quiver_engine import server

# Load model
model = ks.models.load_model('models/my_bestmodel-win.h5')

# Launch server
server.launch(model, classes=range(7), input_folder='input/')
Ejemplo n.º 18
0
#!/usr/bin/env python

import os
from quiver_engine import server
from squeezenet import SqueezeNet

classes = [
    'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight',
    'Nine'
]

input_shape = (67, 67, 3)
weights_file = './mnist_weights.h5'
model = SqueezeNet(len(classes), input_shape=(input_shape))
model.output
model.compile(loss="categorical_crossentropy",
              optimizer='adadelta',
              metrics=['accuracy'])
if os.path.isfile(weights_file):
    print('Loading weights: %s' % weights_file)
    model.load_weights(weights_file, by_name=True)
server.launch(model=model,
              classes=classes,
              temp_folder='/tmp/quiver',
              input_folder='/mnt/data/Development/ros/catkin_ws/images')
                  kernel_size=(3, 3),
                  padding='same',
                  input_shape=(3, 100, 100)))
model.add(Activation('relu'))
model.add(Convolution2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Convolution2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(5))
model.add(Activation('sigmoid'))

# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=False)

model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.load_weights("weights.hdf5")

server.launch(model, input_folder='./', temp_folder='./filters')
Ejemplo n.º 20
0
def visualize_feature_map(model_name):
    model, input_size = model_builder(model_name)

    hook_list = register_hook(model)

    server.launch(model, hook_list, input_folder="./data/Cat", image_size=input_size, use_gpu=False)
Ejemplo n.º 21
0
def launch_quiver(model_path):
    model = load_model(model_path)
    server.launch(model,
                  port=8015,
                  input_folder='./imgs',
                  classes=['pos', 'neg'])
Ejemplo n.º 22
0
#
# Einsatz von Quiver zur interaktiven Visualisierung eines VGG16 Modells
#

from quiver_engine import server
from keras.applications import VGG16
global model

model = VGG16(weights="imagenet")
model.summary()

# Struktur des Modells
print(model.to_json())

# Quiverboard wird gestartet
server.launch(model,
              top=10,
              temp_folder="./tmp",
              input_folder='./imgs',
              port=12345)
model = Sequential()
model.add(Convolution2D(32, kernel_size=(3, 3),padding='same',input_shape=(3 , 100, 100)))
model.add(Activation('relu'))
model.add(Convolution2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Convolution2D(64,(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(5))
model.add(Activation('sigmoid'))


# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=False)

model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.load_weights("weights.hdf5")

server.launch(model,input_folder='./',temp_folder='./filters')
Ejemplo n.º 24
0
# Test quiver-time
import keras as ks
from quiver_engine import server
import sys

# Load model
if sys.version_info.major == 2:
    model = ks.models.load_model('models/my_bestmodel.h5')
elif sys.version_info.major == 3:
    model = ks.models.load_model('models/my_bestmodel_py3.h5')

# Names of classes of PAMAP2
classes = [
    'lying', 'sitting', 'standing', 'walking', 'cycling', 'vacccuum', 'ironing'
]

# Launch server
server.launch(model, classes=classes, top=7, input_folder='input/')
Ejemplo n.º 25
0
import keras.applications as apps
from quiver_engine.server import launch

model = apps.vgg16.VGG16()

launch(model, input_folder=".")
Ejemplo n.º 26
0
    model_disk = load_model('model/2017-07-09-14-53-loss-decrease-171-0.89.hdf5',
                            custom_objects={'my_hinge': my_hinge, 'new_smooth': new_smooth})
    weights_disk = model_disk.get_weights()
    multitask_model.set_weights(weights_disk)
    """

    # multitask_model = load_model('model/2017-07-09-14-53-loss-decrease-171-0.89.hdf5',
    #                            custom_objects={'my_hinge': my_hinge, 'new_smooth': new_smooth})

    multitask_model = load_model(
        'model/2017-07-17-18-43-epoch-16-loss-6.78-saved-all-model.hdf5',
        custom_objects={
            'my_hinge': my_hinge,
            'new_smooth': new_smooth
        })
    launch(multitask_model, input_folder='./img', port=5000)

    # all_imgs = glob.glob('/home/yuquanjie/Documents/shumei_crop_center_test/' + '*.jpg')
    all_imgs = glob.glob('/home/yuquanjie/Documents/icdar2017_crop_center/' +
                         '*.jpg')
    # python generator
    data_gen_pred = get_pred_img(all_imgs)
    while True:
        X, img_data = data_gen_pred.next()
        # predict
        X = np.expand_dims(X, axis=0)
        predict_all = multitask_model.predict_on_batch(1 / 255.0 * X)
        # 1) classification result
        predict_cls = predict_all[0]
        # reduce dimension from (1, 80, 80, 1) to (80, 80)
        predict_cls = np.sum(predict_cls, axis=-1)
Ejemplo n.º 27
0
import numpy as np
from quiver_engine import server
from quiver_engine.model_utils import register_hook
from torchvision import models

if __name__ == "__main__":
    model = models.resnet18()

    hook_list = register_hook(model)

    server.launch(model,
                  hook_list,
                  input_folder="./data/Cat",
                  image_size=[250, 250],
                  use_gpu=False)
Ejemplo n.º 28
0
                weights_path = get_file(
                    'vgg19_weights_tf_dim_ordering_tf_kernels.h5',
                    TF_WEIGHTS_PATH,
                    cache_subdir='models')
            else:
                weights_path = get_file(
                    'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
                    TF_WEIGHTS_PATH_NO_TOP,
                    cache_subdir='models')
            model.load_weights(weights_path)
            if K.backend() == 'theano':
                convert_all_kernels_in_model(model)
    return model


model = VGG19(include_top=True, weights='imagenet')

from quiver_engine import server

server.launch(
    model,  # a Keras Model

    # where to store temporary files generatedby quiver (e.g. image files of layers)
    temp_folder='./cache',

    # a folder where input images are stored
    input_folder='./',

    # the localhost port the dashboard is to be served on
    port=5000)
filepath=os.path.join(MODEL_DIR, "model-{epoch:02d}.h5"))
model.fit(Xtrain, Ytrain, batch_size=BATCH_SIZE, nb_epoch=NUM_EPOCHS,
validation_split=0.1, callbacks=[checkpoint])



Using TensorBoard and Keras

Keras provides a callback for saving your training and test metrics, as well as activation histograms
for the different layers in your model:
keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0,
write_graph=True, write_images=False)
Saved data can then be visualized with TensorBoad launched at the command line:
tensorboard --logdir=/full_path_to_your_logs


Using Quiver and Keras

In Chapter 3 , Deep Learning with ConvNets, we will discuss ConvNets, which are an advanced deep
learning technique for dealing with images. Here we give a preview of Quiver (for more information,
refer to https://github.com/jakebian/quiver ), a tool useful for visualizing ConvNets features in an interactive
way. The installation is pretty simple, and after that Quiver can be used with one single line:
pip install quiver_engine
from quiver_engine import server
server.launch(model)
This will launch the visualization at localhost:5000 .




import os
import argparse

from keras.applications.vgg16 import VGG16
from quiver_engine import server

if __name__ == "__main__":

    parser = argparse.ArgumentParser()
    parser.add_argument('--img_folder',
                        type=str,
                        help='directory with your images')

    parser.add_argument('--port', type=int, help='port to serve your quiver')

    args = parser.parse_args()

    model = VGG16(include_top=True)
    tmp_dir = os.path.join(args.img_folder, '.tmp')
    if not os.path.exists(tmp_dir): os.makedirs(tmp_dir)
    server.launch(model,
                  input_folder=args.img_folder,
                  temp_folder=tmp_dir,
                  port=args.port)
Ejemplo n.º 31
0
from quiver_engine import server
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import CustomObjectScope
from tensorflow.keras.initializers import glorot_uniform
from keras.preprocessing import image
import numpy as np

model = load_model('C:/Users/jenni/Downloads/artifacts_model.h5')
model.summary()

# temp_folder not working - creates a \tmp file where the project is instead (delete folder if error)
server.launch(model,
              classes=['Accepted', 'Crack', 'Chip'],
              temp_folder='C:/Data/temp',
              input_folder='C:/Data/catdog')