Esempio n. 1
0
 def __init__(self, model_path='./model.h5', encoder_path='encoder.pkl'):
     self.model_path = model_path
     gdd.download_file_from_google_drive(
         file_id='1642JgezyxVSlowH9kiTuB6xCWr6KleEb',
         dest_path=model_path,
         unzip=True)
     self.model = load_model(model_path)
     self.encoder = pickle.load(open(encoder_path, 'rb'))
Esempio n. 2
0
 def download_model(
     self,
     model_url='https://drive.google.com/file/d/1642JgezyxVSlowH9kiTuB6xCWr6KleEb/view?usp=sharing'
 ):
     response = urllib2.urlopen(model_url)
     html = load_model(response.read())
     return html
     """
def infer_with_model(model_filename, input_string, max_length, max_features):
    """evaluate the saved model against the test set
  """
    from keras.model import load_model

    model = load_model(model_filename)
    pred_string = project_text_to_imdb_data(input_string, max_features,
                                            max_length)
    print("predicting on '%s'" % str(input_string))
    print("predicting on '%s'" % str(pred_string))
    pred = model.predict(pred_string, batch_size=1)
    print("got: '%s'" % str(pred))
    return pred
Esempio n. 4
0
import scipy.misc
import random
from scipy import pi

img = cv2.imread('steering_wheel_image.jpg',0)
rows,cols = img.shape

smoothed_angle = 0

i = 0
from subprocess import call


from keras.model import load_model

model=load_model("Best_model_One/mymodel_best_model.ckpt")

while(cv2.waitKey(10) != ord('q')):
    full_image = scipy.misc.imread("driving_dataset/driving_dataset/" + str(i) + ".jpg", mode="RGB")
    image = scipy.misc.imresize(full_image[-150:], [66, 200]) / 255.0
    degrees = model.predict(image[None,...])[0][0] * 180.0 / scipy.pi
    #call("clear")
    print("Predicted steering angle: " + str(degrees) + " degrees")
    cv2.imshow("frame", cv2.cvtColor(full_image, cv2.COLOR_RGB2BGR))
    #make smooth angle transitions by turning the steering wheel based on the difference of the current angle
    #and the predicted angle
    smoothed_angle += 0.2 * pow(abs((degrees - smoothed_angle)), 2.0 / 3.0) * (degrees - smoothed_angle) / abs(degrees - smoothed_angle)
    M = cv2.getRotationMatrix2D((cols/2,rows/2),-smoothed_angle,1)
    dst = cv2.warpAffine(img,M,(cols,rows))
    cv2.imshow("steering wheel", dst)
    i += 1
Esempio n. 5
0
from keras.layers import Input, Lambda, Conv2D
from keras.model import load_model, Model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes

sess = K.get_session()

# Defining classes, anchors and image shape

class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (720., 1280.)

# Loading a pretrained model

yolo_model = load_model("model_data/yolo.h5")

yolo_model.summary()

# Convert output of the model to usable bounding box tensors

yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

# Filtering boxes

scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)

# Run the graph on an image


def predict(sess, image_file):
Esempio n. 6
0
def get_model():
    global model
    model = load_model('VGG16_cats_and_dogs.h5')
    print(" * Model loaded!")
Esempio n. 7
0
        scores.extend(s)

    precision = []
    recall = []
    for t, p in zip(true_rec, predict_rec):
        if len(t) != 0 and len(p) != 0:
            precision.append(get_precision_rectangle(t, p))
            recall.append(get_recall_rectangle(t, p))

    precision = np.mean(np.array(precision))
    recall = np.mean(np.array(recall))

    y_true = get_ground_truth(true_rec, predict_rec, 0.2)
    plot_roc(y_true, scores, "roc.png", True)

    print("precision", precision)
    print("recall", recall)


if __name__ == "__main__":

    # model = train_model(SIZE)

    model = load_model("./modele/train_2000_vgg16.h5")

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    test_model(model, 20)
import numpy as np
import cv2
from keras.model import load_model
from flask import Flask, render_template, Response
import tensorflow as tf
global graph
global writer
from skimage.transform import resize

graph = tf.get_default_graph()
writer = None

model = load_model("/content/drive/MyDrive/Colab Notebooks/iceberg_model.h5")

app = Flask(__name__)

print("[INFO] accessing video stream")
vs = cv2.VideoCapture("")

pred = ""


def detect(frame):
    img = resize(frame, (75, 75))
    img = np.expand_dims(img, axis=0)
    if (np.max(img) > 1):
        img = img / 255.0
    with graph.as_default():
        prediction = model.predict_classes(img)
    pred = prediction[0][0]
    if not pred:
Esempio n. 9
0
from keras.model import load_model

model = load_model('model.h5')
#model.predict()
                (str(ishape) for ishape in layer.input_shapes))
        else:
            inputlabels = 'multiple'
        label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label, inputlabels,
                                                       outputlabels)
        node = pydot.Node(layer_id, label=label)
        dot.add_node(node)
    for layer in layers:
        # objectの持つ番号を取得
        layer_id = str(id(layer))
        for i, node in enumerate(layer._inbound_nodes):
            # layerが持つnodeに対しても以下のformatで名前がついている
            node_key = layer.name + '_ib-' + str(i)
            if node_key in model._network_nodes:
                for inbound_layer in node.inbound_layers:
                    inbound_layer_id = str(id(inbound_layer))
                    # graphにinbound_layerが登録されているか確認
                    assert dot.get_node(inbound_layer_id)
                    assert dot.get_node(layer_id)
                    # graphにedgeを登録
                    dot.add_edge(pydot.Edge(inbound_layer_id, layer_id))
    return dot


def set_position(nodes, edges):
    pass


if __name__ == '__main__':
    model = load_model('./models/model.h5')
import scipy.misc
import random
from scipy import pi

img = cv2.imread('steering_wheel_image.jpg',0)
rows,cols = img.shape

smoothed_angle = 0

i = 0
from subprocess import call


from keras.model import load_model

model=load_model("Best_model_Two/mymodel_best_model.ckpt")

while(cv2.waitKey(10) != ord('q')):
    full_image = scipy.misc.imread("driving_dataset/driving_dataset/" + str(i) + ".jpg", mode="RGB")
    image = scipy.misc.imresize(full_image[-150:], [66, 200]) / 255.0
    degrees = model.predict(image[None,...])[0][0] * 180.0 / scipy.pi
    #call("clear")
    print("Predicted steering angle: " + str(degrees) + " degrees")
    cv2.imshow("frame", cv2.cvtColor(full_image, cv2.COLOR_RGB2BGR))
    #make smooth angle transitions by turning the steering wheel based on the difference of the current angle
    #and the predicted angle
    smoothed_angle += 0.2 * pow(abs((degrees - smoothed_angle)), 2.0 / 3.0) * (degrees - smoothed_angle) / abs(degrees - smoothed_angle)
    M = cv2.getRotationMatrix2D((cols/2,rows/2),-smoothed_angle,1)
    dst = cv2.warpAffine(img,M,(cols,rows))
    cv2.imshow("steering wheel", dst)
    i += 1
Esempio n. 12
0
import json
import tenserflow as tf
from tensorflow import Graph, Session

img_height, img_width = 224, 224
with open('filepath.json') as f:
    lableInfo = f.read()

lableInfo = json.load(lableInfo)
# model = load_model('./models/modelfile.h5')
model_graph = Graph()
with model_graph.as_default():
    tf_session = Session()

    with tf_Session.as_default():
        model = load_model('./models/modelfile.h5')


# Create your views here.
def index(request):
    return render(request, 'index.html')


def prediction(request):
    fileobj = request.FILES['image']
    fs = FileSystemStorage()
    fs.save(fileobj.name, fileobj)
    image = fileobj.name.split('.')[:1]
    testimage = '.' + fs.url(fileobj)

    img = image.load_image(testimage, target_size=(img_height, img_height))
test_data_dir = 'test'
test_datagen = ImageDataGenerator(rescale=1. / 255)

#make the test data generator
test_generator = test_datagen.flow_from_directory(directory=test_data_dir,
                                                  target_size=(img_width,
                                                               imd_height),
                                                  batch_size=batch_size,
                                                  color_mode="grayscale",
                                                  shuffle=False,
                                                  class_mode="None")
test_samples = test_generator.n

#make the predictions using the model
model = load_model("hand_gestures.h5")
test_generator.reset()
pred = model.predict_generator(test_generator,
                               steps=test_samples // batch_size,
                               verbose=1)
predicted_class_indices = np.argmax(pred, axis=1)

#labeling the predicted output
labels = (train_generator.class_indices)
labels = dict((v, k) for k, v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]

#storing the result in csv file
filenames = test_generator.filenames
results = pd.DataFrame({"Filename": filenames, "Predictions": predictions})
results.to_csv("results.csv", index=False)
import glob
import json
import os

from keras.model import load_model
from utils.losses import compute_test_distribution

models = glob.glob('results/model-architecture-*/best_model*.h5')
for model_path in models:
    print(model_path)
    model = load_model(model_path)
    test_distribution = compute_test_distribution(model)
    with open(
            model_path.replace(
                model_path.split('/')[-1], 'training_distribution.json'),
            'w+') as fp:
        json.dump(test_distribution, fp, sort_keys=True, indent=4)