Beispiel #1
0
    def load(cls, path):
        # type: (Text) -> KerasPolicy
        from tensorflow.keras.models import load_model

        if os.path.exists(path):
            featurizer = TrackerFeaturizer.load(path)
            meta_path = os.path.join(path, "keras_policy.json")
            if os.path.isfile(meta_path):
                with io.open(meta_path) as f:
                    meta = json.loads(f.read())

                model_file = os.path.join(path, meta["model"])

                graph = tf.Graph()
                with graph.as_default():
                    session = tf.Session()
                    with session.as_default():
                        model = load_model(model_file)

                return cls(featurizer=featurizer,
                           model=model,
                           graph=graph,
                           session=session,
                           current_epoch=meta["epochs"])
            else:
                return cls(featurizer=featurizer)
        else:
            raise Exception("Failed to load dialogue model. Path {} "
                            "doesn't exist".format(os.path.abspath(path)))
            faces.append(face)
            locs.append((startX, startY, endX, endY))

    if len(faces) > 0:

        faces = np.array(faces, dtype="float32")
        preds = maskNet.predict(faces, batch_size=32)

    return (locs, preds)


prototxtPath = r"face_detector\deploy.prototxt"
weightsPath = r"face_detector\res10_300x300_ssd_iter_140000.caffemodel"
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)

maskNet = load_model("mask_detector.model")

print("[INFO] starting video stream...")
vs = cv2.VideoCapture(0)

while True:

    ret, frame = vs.read()
    frame = imutils.resize(frame, width=800)

    (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)

    for (box, pred) in zip(locs, preds):

        (startX, startY, endX, endY) = box
        (mask, withoutMask) = pred
from tensorflow.keras.models import load_model
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import RMSprop
import pandas as pd
import numpy as np

import config

model = load_model(os.path.join(config.models_path, "my_model.h5"))
model.summary()

#Training Data
train_datagen = ImageDataGenerator(rescale = 1./255,
                                   fill_mode = 'nearest')

train_generator = train_datagen.flow_from_directory(config.final_train_dir,
                                                    target_size = (84, 84), 
                                                    batch_size = 100,
                                                    class_mode = 'categorical')

test_datagen = ImageDataGenerator(rescale = 1./255,
                                  fill_mode = 'nearest')

test_generator = test_datagen.flow_from_directory(config.test_dir,
                                                  target_size = (84, 84),
                                                  batch_size = 1,
                                                  class_mode = None,    #Only data, no labels
                                                  shuffle = False)      #Keep data in same order as labels

model.compile(loss='categorical_crossentropy',
#     print('input_image')
#     input_image1 = image.img_to_array(input_image)
#     input_image2 = np.expand_dims(input_image1, axis=0)
#     classes = np.array(['DAMAGED', 'WHOLE'])
#     class_ind = model.predict_classes(input_image2)
#     return classes[class_ind][0]


app = Flask(__name__)
# Configuring a secret SECRET_KEY
app.config['SECRET_KEY'] = 'mysecretkey'

ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}

# Loading trained Model
car_model = load_model("FindingDamagedCar.h5")


@app.route('/', methods=["GET", "POST"])
def index():
    if request.method == "POST":
        global item
        global imageresults
        global pathtohtml
        if request.files:
            # save the file to temp folder
            item = request.files["image"]
            filename = item.filename
            imgfolder = os.getcwd()+'\\static\\image'
            savefilepath = os.path.join(imgfolder,filename)
            pathtohtml = "http://127.0.0.1:5000/static/image/"+filename
Beispiel #5
0
model_dict = get_model_dict()

jpeg_denoises = [
    'jpeg-none', 'jpeg-auto', 'jpeg-verylow', 'jpeg-low', 'jpeg-medium',
    'jpeg-high', 'jpeg-veryhigh'
]
scales = ['1x', '2x', '4x', '8x']
denoises = [
    'denoise-none', 'denoise-light', 'denoise-medium', 'denoise-strong'
]

models = {}

for func, info in model_dict.items():
    models[func] = load_model(info['path'])

app = flask.Flask(__name__)


@app.route('/', methods=['GET'])
def index():
    return flask.render_template('index.html')


@app.route('/maxres', methods=['POST'])
def maxres():
    scale = flask.request.form.get('scale')
    jpeg_denoise = flask.request.form.get('jpeg')
    denoise = flask.request.form.get('denoise')
    img = None
from tensorflow import Graph
from tensorflow.compat.v1 import Session
from io import BytesIO
from six.moves import urllib

img_height, img_width = 224, 224
with open('./models/modelo.json', 'r') as f:
    labelInfo = f.read()

labelInfo = json.loads(labelInfo)

model_graph = Graph()
with model_graph.as_default():
    tf_session = Session()
    with tf_session.as_default():
        model = load_model('./models/pesos.h5')


def index(request):
    context = {'a': 1}
    return render(request, 'index.html', context)


def predictImage(request):
    print(request)
    print(request.POST.dict())
    fileObj = request.FILES['filePath']
    fs = FileSystemStorage()
    filePathName = fs.save(fileObj.name, fileObj)
    filePathName = fs.url(filePathName)
    print(filePathName)
Beispiel #7
0
def detect_values(src):

	model_path = utils.resource_path("trained_MNIST_model.h5")

	model = load_model(model_path)

	img = src
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	blurred = cv2.GaussianBlur(gray, (5, 5), 0)

	edged = cv2.Canny(blurred, 30, 150)
	cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
		cv2.CHAIN_APPROX_SIMPLE)
	cnts = imutils.grab_contours(cnts)
	cnts = sort_contours(cnts, method="left-to-right")[0]

	chars = []

	for c in cnts:
		(x, y, w, h) = cv2.boundingRect(c)

		if (w >= 3 and w <= 50) and (h >= 15 and h <= 50):
			
			roi = gray[y:y + h, x:x + w]			

			thresh = cv2.threshold(roi, 0, 255,
				cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
			(tH, tW) = thresh.shape

			if tW > tH:
				thresh = imutils.resize(thresh, width=32)
			# otherwise, resize along the height
			else:
				thresh = imutils.resize(thresh, height=32)

			(tH, tW) = thresh.shape
			dX = int(max(0, 32 - tW) / 2.0)
			dY = int(max(0, 32 - tH) / 2.0)
			# pad the image and force 32x32 dimensions
			padded = cv2.copyMakeBorder(thresh, top=dY, bottom=dY,
				left=dX, right=dX, borderType=cv2.BORDER_CONSTANT,
				value=(0, 0, 0))
			padded = cv2.resize(padded, (32, 32))
			# prepare the padded image for classification via our
			# handwriting OCR model
			padded = padded.astype("float32") / 255.0
			padded = np.expand_dims(padded, axis=-1)
			# update our list of characters that will be OCR'd
			chars.append((padded, (x, y, w, h)))

	boxes = [b[1] for b in chars]
	chars = np.array([c[0] for c in chars], dtype="float32")
	# OCR the characters using our handwriting recognition model
	preds = model.predict(chars)
	# define the list of label names
	labelNames = "0123456789"
	labelNames = [l for l in labelNames]

	boxes_val = []

	for (pred, (x, y, w, h)) in zip(preds, boxes):
		# find the index of the label with the largest corresponding
		# probability, then extract the probability and label
		i = np.argmax(pred)
		prob = pred[i]
		label = labelNames[i]

		# cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
		# cv2.putText(img, label, (x - 10, y - 10),
		# 	cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 2)

		boxes_val.append([(x,y,w,h), int(label)])

	# print(boxes_val)
	# cv2.imshow('res', img)
	# cv2.waitKey(0)
	# cv2.destroyAllWindows()

	return boxes_val
Beispiel #8
0
              epochs=10,
              validation_data=(X_test, y_cat_test),
              callbacks=[early_stop],
              batch_size=4)

    # save the model
    model.save('model to predict animals.h5')

    # save the history of the model
    models_history = pd.DataFrame(model.history.history)
    models_history.to_csv('history of the model.csv', index=False)

    # load the model
    from tensorflow.keras.models import load_model

    my_model = load_model('model to predict animals.h5')

    # load the history od the model
    models_history = pd.read_csv('history of the model.csv')

    # plot the validation loss and accuracy
    models_history.plot()
    plt.show()

    # printing the history of the model :loss and accuracy
    print(models_history)

    # plot only the accuracy
    models_history[['accuracy', 'val_accuracy']].plot()
    plt.show()
Beispiel #9
0
 def load_model(self, path):
     self.model = load_model(path)
if reminder != 0.:
    raise ValueError('Hop size must be a divider of annotation hop (640)')
else:
    frames_per_annotation = int(frames_per_annotation)
'''

#custom loss function
batch_size=32
def batch_CCC(y_true, y_pred):
    CCC = uf.CCC(y_true, y_pred)
    CCC = CCC /float(batch_size)
    return CCC

MODEL = '../models/audio_model_reg_512_dense_512_reshapegru512_256_128_64_32_reg1_seq100.hdf5'
#load classification model and latent extractor
valence_model = load_model(MODEL, custom_objects={'CCC':uf.CCC,'batch_CCC':batch_CCC})

# latent_extractor = K.function(inputs=[valence_model.input], outputs=[valence_model.get_layer('flatten_1').output])

#load datasets rescaling
reference_predictors = np.load(REFERENCE_PREDICTORS_LOAD)
ref_mean = np.mean(reference_predictors)
ref_std = np.std(reference_predictors)
predictors = np.load(EVALUATION_PREDICTORS_LOAD)
target = np.load(EVALUATION_TARGET_LOAD)

print ""
print "using model: " + MODEL


def predict_datapoint(input_sound, input_annotation):
Beispiel #11
0
 def load(self):
     return load_model(self.h5_path)
Beispiel #12
0
from cv2 import *
import numpy as np
import tensorflow as tf
from keras_preprocessing.image import ImageDataGenerator
import keras
from tensorflow.keras.models import load_model
import streamlit as st
from PIL import Image

tf.keras.backend.clear_session()

model = load_model("DS2_ResNet_3_Tensorflow_2point2_Histo_Erode_Dilate.h5",
                   compile=False)


def predict_func(img):
    print(img.shape)
    IMG_SIZE = 150
    img_path = "Test/yes/1.jpeg"
    kernel = np.ones((4, 4), np.uint8)

    norm = cvtColor(img, COLOR_BGR2GRAY)
    th1 = equalizeHist(norm)
    eroded = cv2.erode(th1, kernel)
    dilate = cv2.dilate(eroded, kernel)
    imwrite(img_path, dilate)

    VAL_DIR = "Test/"
    val_datagen = ImageDataGenerator(rescale=1 / 255)
    val_generator = val_datagen.flow_from_directory(VAL_DIR,
                                                    target_size=(IMG_SIZE,
def Load_Model(m_path, **kwargs):
    """
    載入keras .h5格式模型
    為 Keras 中 load_model 的 wrapper,參數定義與之相同
    """
    return load_model(m_path, **kwargs)

# Testing
if __name__ == '__main__':
    model_name = "0228_train_2_DDQN"

    # Keras .h5 格式權重參數儲存路徑
    Model_Path = os.path.dirname(os.getcwd()) + \
        '\\' + myParams.AI_Foldername
    if not os.path.isdir(Model_Path):
        os.mkdir(Model_Path)
    f_path = Model_Path + '\\' + model_name + ".h5"
    pydot_path = Model_Path + '\\' + model_name + ".png"

    # 檢查讀取的檔案
    dnn = load_model(f_path)
    print("Summary of the keras model")
    print("file path: ", f_path)
    dnn.summary()  # Show the model summary
    Print_Model_Struct(dnn)
    # 輸出模型的架構描述
    # plot_model(dnn, to_file = pydot_path, show_shapes = True, \
    #            show_layer_names = True)
    """
    Export_Model_Struct(dnn, to_file = pydot_path, \
                        show_shapes = True, \
                        show_layer_names = True) # wrapper of plot_model
    """

    # 圖像化
    vis_path = Model_Path + '\\' + model_name + "_network.png"
Beispiel #15
0
def make_forecast(df_provided=None, start=None, end=None, a=None):

    import pandas as pd
    import numpy as np
    import datetime

    start = datetime.datetime.strptime(start, '%Y-%m-%d %H:%M')
    end = datetime.datetime.strptime(end, '%Y-%m-%d %H:%M')

    if end < start:
        dummy = start
        start = end
        end = dummy

    df = pd.read_csv(r'basic/data/megawatts.csv')

    fest = pd.read_csv(r'basic/data/festival.csv')

    df['datetime'] = pd.to_datetime(df['datetime'])
    fest['date'] = pd.to_datetime(fest['date'])

    if df_provided == None:
        no_of_predictions = int(((end - start).total_seconds() / 900) + 1)

        df_more = pd.DataFrame(np.zeros(
            (no_of_predictions))).rename(columns={0: 'load'})
        df_more['datetime'] = pd.date_range(start, end, freq='0.25H')

        df_copy = pd.concat([df, df_more], sort=False, axis=0)
        df_new = mach3(df_copy, fest, [-1, -2, -3, 0, 1, 2, 3], 12, 12)
        df_new = df_new.drop(columns=['load'], axis=1)

    else:
        df_new = mach3(df, fest, [-1, -2, -3, 0, 1, 2, 3], 12, 12)
        df_copy = df_new.reset_index(drop=True)
        df_copy['datetime'] = df_new.index
        df_new = df_new.drop(columns=['load'], axis=1)

    from sklearn.preprocessing import MinMaxScaler
    scaler = MinMaxScaler()
    scaler.fit(df_new)
    X = scaler.transform(df_new.loc[start:end])
    X = X.reshape(X.shape[0], 6, 7, 4)

    from tensorflow.keras.models import load_model
    model = load_model('basic/data/sE19_R83.hdf5')
    Y = model.predict(X)

    target_scaler = MinMaxScaler()
    target_scaler.fit(np.array(df['load']).reshape(-1, 1))
    Y_scaled_back = target_scaler.inverse_transform(Y)

    df_copy.set_index('datetime', inplace=True, drop='True')
    df_copy.loc[start:end, 'load'] = Y_scaled_back.reshape(-1, )

    if a == 3:
        result = df_copy.loc[start:end, 'load'].to_frame()
        result_foo = result.rename(columns={'load': 'predicted load'})
        result_foo['actual load'] = df.set_index('datetime').loc[start:end,
                                                                 'load']
        result_foo.to_csv("basic/static/basic/data/result.csv")
        return plot(result, start, end, 3)

    else:
        result = df_copy.loc[start:end, 'load']
        result.to_csv("basic/static/basic/data/result.csv")
        return plot(result, start, end, 1)
Beispiel #16
0
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
import os


args = {'face': 'face_detector', 'model': 'my_mask_detector', 'confidence': 0.5}
# load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
maskNet = load_model(args["model"])

def detect_and_predict_mask(frame, faceNet, maskNet, args):
    # grab the dimensions of the frame and then construct a blob
    # from it
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
                                 (104.0, 177.0, 123.0))

    # pass the blob through the network and obtain the face detections
    faceNet.setInput(blob)
    detections = faceNet.forward()

    # initialize our list of faces, their corresponding locations,
    # and the list of predictions from our face mask network
    faces = []
def predict_traffic_file():
    #K.clear_session()

    #BELOW docstring lines are required to support swagger documentation
    """ Endpoint returning traffic sign image prediction
    ---
    parameters:
        - name: input_file
          in: formData
          type: file
          required: true
    """
    # Get the input file from the http request
    #read image file string data
    filestr = request.files['input_file'].read()
    #convert string data to numpy array
    npimg = np.fromstring(filestr, np.uint8)
    # convert numpy array to image
    img = cv2.imdecode(npimg, cv2.IMREAD_UNCHANGED)
    in_image = cv2.resize(img, (32, 32), interpolation = cv2.INTER_AREA)
    
    in_image = np.expand_dims(in_image, axis = 0)

    # Load the saved traffic sign keras model
    model_filename = "model.h5"

    # Load model from file - read mode  
    traffic_model = load_model(model_filename, compile=False)

    # Make prediction using the input image file
    ## check dimensions before and after of the numpy array to see 2D, 3D, 4D
    result = traffic_model.predict(in_image)

    def sign(i):
        switcher={
                0:'Maximum speed limit (10 km/h)',
                1:'Maximum speed limit (30 km/h)',
                2:'Maximum speed limit (50 km/h)',
                3:'Maximum speed limit (60 km/h)',
                4:'Maximum speed limit (70 km/h)',
                5:'Maximum speed limit (80 km/h)',
                6:'End 80 km/h speed limit',
                7:'Maximum speed limit (100 km/h)',
                8:'Maximum speed limit (120 km/h)',
                9:'No Overtaking',
                10:'No overtaking by trucks/heavy goods vehicles',
                11:'Crossroads ahead with a minor road',
                12:'Priority road',
			    13:'Give way',
 			    14:'Stop',
			    15:'Road closed to all traffic',
			    16:'No trucks/heavy goods vehicles',
			    17:'No Entry',
			    18:'Attention: Other Dangers!',
			    19:'Curve to the left',
			    20:'Curve to the right',
			    21:'Series of curves, first to the left',
			    22:'Uneven road surface',
			    23:'Slippery road surface',
			    24:'Road narrows on the right',
			    25:'Roadworks',
			    26:'Traffic signals ahead',
			    27:'Pedestrian crossing ahead',
			    28:'Watch for children',
			    29:'Watch for cyclists',
			    30:'Risk of ice',
			    31:'Watch for wild animals',
			    32:'End all previously signed restrictions or prohibitions',
			    33:'Turn right only',
			    34:'Turn left only',
			    35:'Proceed straight ahead only',
			    36:'Proceed straight or turn right',
			    37:'Proceed straight or turn left',
			    38:'Keep right',
			    39:'Keep left',
			    40:'Roundabout',
			    41:'End overtaking prohibition',
			    42:'End overtaking prohibition for trucks/heavy goods vehicles',
		    }
        return switcher.get(i,"Unknown")
		
    print(sign(np.argmax(result)))

    K.clear_session()

    # Send the prediction as response
    return str(sign(np.argmax(result)))
def load_saved_model(model_dir, model_name):
    model_path = model_dir + model_name
    return load_model(model_path)
from tensorflow.keras.models import load_model
from tensorflow.keras.losses import CosineSimilarity
import sys
import numpy as np

args = sys.argv[1:]
name = args[0].replace('_',' ')
N = int(args[1])

int_to_card = json.load(open('ml_files/recommender_id_map.json','r'))
int_to_card = {int(k):v for k,v in int_to_card.items()}
card_to_int = {v:k for k,v in int_to_card.items()}

num_cards = len(int_to_card)

model = load_model('ml_files/high_req')

cards = np.zeros((num_cards,num_cards))
np.fill_diagonal(cards,1)

dist_f = CosineSimilarity()

embs = model.encoder(cards)
idx = card_to_int[name]

dists = np.array([
    dist_f(embs[idx],x).numpy() for x in embs
])

ranked = dists.argsort()
Beispiel #20
0
def get_sintetic_data(
    saved_path='C:/Users/56979/PycharmProjects/TimeGAN/tensorflow2_implementation/TimeGAN900/experiment_00/synthetic_data'
):
    #Recrea exactamente el mismo modelo solo desde el archivo
    new_model = load_model(saved_path)
    return new_model
 def load(filepath, custom_objects=None, _compile=True):
     from tensorflow.keras.models import load_model
     return load_model(filepath, custom_objects, _compile)
Beispiel #22
0
import cv2
import os
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
import numpy as np

# haar cascade used to identify faces
cascPath = os.path.dirname(
    cv2.__file__) + "/data/haarcascade_frontalface_alt2.xml"
faceCascade = cv2.CascadeClassifier(cascPath)

# model trained in model.py
model = load_model("mask_classification_model.h5")

video_capture = cv2.VideoCapture(0)

while True:
    # capture frame-by-frame
    ret, frame = video_capture.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(60, 60),
                                         flags=cv2.CASCADE_SCALE_IMAGE)

    faces_list = []
    predictions = []
    for (x, y, w, h) in faces:
Beispiel #23
0
def diagnosis(symptoms):

    sorted_df = symptoms_df.sort_values('colName', ascending=True)

    model_input = []
    for val in sorted_df['colName']:
        if val in symptoms:
            model_input.append(1)

        else:
            if val != 'prognosis':
                model_input.append(0)

    print(len(model_input))

    user_symptoms = pd.Series(symptoms)
    user_json = user_symptoms.to_json()

    model = load_model('model')

    model_array = np.array(model_input).reshape(-1, 132)

    result = model.predict(model_array)[0]
    print(result)

    decoder_df = pd.read_csv('encoder.csv')

    found = []
    locations = []
    for index, row in decoder_df.iterrows():
        if row['Prognosis'] not in found:
            locations.append(row['Unnamed: 0'])
            found.append(row['Prognosis'])

    slim_df = decoder_df.iloc[locations]
    decoder_df = slim_df

    strings = []
    count = 0
    for i in decoder_df['y']:
        strings.append("")
        undesirables = ['[', ']', '\n']
        for x in i:
            if x not in undesirables:
                strings[count] = strings[count] + x
        count += 1
    strings
    y = []

    for x in strings:
        temp = x.split('.')
        final = []
        for i in temp:
            if i != '':
                final.append(int(i.strip()))
        y.append(final)

    final_df = decoder_df['Prognosis'].to_frame()
    final_df['y'] = y

    indices = []
    for x in y:
        indices.append(x.index(1))

    final_df['Index'] = indices

    probability = []
    for x in final_df['Index']:
        probability.append(result[x])
    final_df['Probability'] = probability

    top_diagnosis = final_df.sort_values('Probability',
                                         ascending=False).head(3)

    user_json = top_diagnosis.to_json(orient='records')

    return user_json
Beispiel #24
0
def main():
    model = load_model("model.h5")
    canvas = Canvas(model)
    canvas.run()
def main():
    train = True
    already_trained = True
    data_augmentation = True
    history = None
    if handwritten:
        dataset_path = "/media/hdd_linux/DataSet/mnist_handwritten/"

    else:
        # dataset_path = "/media/hdd_linux/DataSet/mnist_numeric/"
        dataset_path = "/media/hdd_linux/DataSet/Mine/"

    X, Y = load_data(dataset_path, handwritten)
    x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.15)  # , random_state=2

    if not already_trained:
        models_path = 'model/'
        model = CNN_model()

    else:
        from tensorflow.keras.models import load_model
        models_path = 'model/'
        model_name = "{}my_super_model.h5".format(models_path, len(os.listdir(models_path)))
        model = load_model(model_name)
        print("Model Loaded")

    if train:
        if not data_augmentation:
            history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,
                                validation_data=(x_test, y_test))  # validation_data=(X_val, Y_val)

        else:
            datagen.fit(x_train)
            # Fit the model
            history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                                          epochs=epochs, validation_data=(x_test, y_test),
                                          verbose=1, steps_per_epoch=x_train.shape[0] // batch_size
                                          , callbacks=[learning_rate_reduction])

        new_model_name = "{}model_{}.h5".format(models_path, len(os.listdir(models_path)))
        model.save(new_model_name)
    classes = read_file_classes(dataset_path + "data.names")

    if train:
        # Predict the values from the validation classifier
        Y_pred = model.predict(x_test)
        # Convert predictions classes to one hot vectors
        Y_pred_classes = np.argmax(Y_pred, axis=1)
        # Convert validation observations to one hot vectors
        Y_true = np.argmax(y_test, axis=1)
        # compute the confusion matrix
        confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
    else:
        # Predict the values from the validation classifier
        Y_pred = model.predict(X)
        # Convert predictions classes to one hot vectors
        Y_pred_classes = np.argmax(Y_pred, axis=1)
        # Convert validation observations to one hot vectors
        Y_true = np.argmax(Y, axis=1)
        # compute the confusion matrix
        confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
        count_wrong = 0
        for img, y_true, y_pred in zip(X, Y_true, Y_pred_classes):
            if y_true == y_pred:
                continue
            count_wrong += 1
            cv2.imshow("{} - Pred {} - True {}".format(count_wrong, classes[y_pred], classes[y_true]), img)
    # plot the confusion matrix
    plot_confusion_matrix(confusion_mtx, classes=classes)
    if train:
        plot_history(history)
    plt.show()
Beispiel #26
0
# Import the CIFAR-10 dataset and rescale the pixel values
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
# Use smaller subset -- speeds things up
x_train = x_train[:10000]
y_train = y_train[:10000]
x_test = x_test[:1000]
y_test = y_test[:1000]

# callbacks save
checkpoint_path = 'model_checkpoints'
checkpoint = ModelCheckpoint(checkpoint_path,
                             save_weights_only=False,
                             frequency='epoch',
                             verbose=1)
model = get_new_model()
model.fit(x_train, y_train, epochs=3, callbacks=[checkpoint])
get_test_accuracy(model, x_test, y_test)

# mannually save
model.save('my_model')
model.save('keras_model.h5')

# load from scratch
model = load_model('keras_model.h5')
get_test_accuracy(model, x_test, y_test)
model = load_model('my_model')
get_test_accuracy(model, x_test, y_test)
Beispiel #27
0
            #divide by span of 3:
            val /= 3.0
        return source_input_no_resid, ray_input_no_resid, output_no_resid


def denormalize_val(val):
    #print( "denromalizing ", val, " to ", (math.exp( math.exp( val + 1 ) ) - 10) )
    return math.exp(math.exp(val + 1)) - 10


#########
# START #
#########

if os.path.isfile(args.model):
    model = load_model(args.model)
else:
    print("Model " + args.model + " is not a file")
    exit(1)

mse_pre_denorm = 0.
mse_post_denorm = 0.
mse_post_denorm_lt_0 = 0.
mse_post_denorm_lt_n2 = 0.
mse_post_denorm_lt_n4 = 0.
mse_post_denorm_lt_n6 = 0.

allxs = []
allys = []

xs_lt_0 = []
Beispiel #28
0
from flask_cors import CORS
from tensorflow.keras.models import load_model
from flask import Flask, request, render_template, make_response, jsonify

config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.InteractiveSession(config=config)

app = Flask(__name__)
CORS(app)

HOST = "0.0.0.0"
PORT_NUMBER = int(os.getenv('PORT_NUMBER', 8080))
APP_ROOT_1 = os.getenv('APP_ROOT', '/infer1')
APP_ROOT_2 = os.getenv('APP_ROOT', '/infer2')
model = load_model('animal_model_classification.h5')
image_width = 300
image_height = 300
classes = ['cat', 'dog', 'pandas']


# render default webpage
@app.route('/')
def home():
    return render_template('home.html')


@app.route(APP_ROOT_1, methods=['POST', 'GET'])
def classify_image():
    if request.method == 'POST':
        # geting data from html form
Beispiel #29
0
#!/usr/bin/env python
# coding: utf-8
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import numpy as np
import cv2
import os

model = load_model("\mask-detection.model")
prototxtPath = "\deploy.prototxt"
weightsPath = "\res10_300x300_ssd_iter_140000.caffemodel"
net = cv2.dnn.readNet(prototxtPath, weightsPath)


def readImage(imagePath):
    #image = cv2.imread(imagePath)
    image = cv2.resize(imagePath, (400, 300))
    h, w, _ = image.shape
    blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))
    net.setInput(blob)
    detections = net.forward()
    count_persons = 0
    count_mask = 0
    count_withoutmask = 0
    for i in range(0, detections.shape[2]):
        confidence = detections[0, 0, i, 2]

        if confidence > 0.5:
            count_persons += 1
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
Beispiel #30
0
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np

#from keras.models import load_model
from tensorflow.keras.models import load_model
model = load_model('chatbot_model.h5')
import json
import random
intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))


def clean_up_sentence(sentence):
    sentence_words = nltk.word_tokenize(sentence)
    sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
    return sentence_words

# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence

def bow(sentence, words, show_details=True):
    # tokenize the pattern
    sentence_words = clean_up_sentence(sentence)
    # bag of words - matrix of N words, vocabulary matrix
    bag = [0]*len(words)
    for s in sentence_words:
        for i,w in enumerate(words):
    def test_top_eight_abnormalities(self, model_path):
        # load model
        model = load_model(model_path)

        # load data
        X, Y = load_X_and_Y(test_only=True)
        _, _, x_test = X
        _, _, y_test = Y
        print("x_test shape", x_test.shape)
        print("y_test shape", y_test.shape)

        X = load_X_descr(test_only=True)
        _, _, x_test_all_descr = X

        # segment test set based on description and obtain accuracies
        # for each description
        roc_auc_scores = {}
           
        for descr in most_common_list(self.dataset, num_most_common=9):
            if descr == "normal":
                continue
            
            x_test_descr, y_test_descr = [], []
            for idx in range(len(x_test)):
                if x_test_all_descr[idx] == descr:
                    x_test_descr.append(x_test[idx])
                    y_test_descr.append(y_test[idx])
            x_test_descr = np.array(x_test_descr).reshape((-1, 224, 224, 3))
            y_test_descr = np.array(y_test_descr).reshape((-1, 1))

            preds = model.predict(x_test_descr).reshape((-1,1))
            """preds_list = []
            for pred in preds:
                preds_list.append(pred)
            y_list = []
            for label in y_test_descr:
                y_list.append(label)
            print(descr + " predictions: ", preds_list)
            print(descr + "labels: ", y_list)
            print("Performing Heuristic!")
            new_preds_list = []
            new_y_list = []
            for i in range(len(preds_list)):
                if preds_list[i] > 0.4 and preds_list[i] < 0.6:
                    continue
                new_preds_list.append(preds_list[i])
                new_y_list.append(y_list[i])
            #x_test_descr = np.array(x_test_descr).reshape((-1, 224, 224, 3))
            y_test_descr = np.array(new_y_list).reshape((-1, 1))
            preds = np.array(new_preds_list).reshape((-1, 1))
            print(descr + " formatted predictions: ", preds)
            print(descr + " formatted labels: ", y_test_descr)"""
            preds = preds.reshape((-1,)).tolist()
            y_test_descr = y_test_descr.reshape((-1,)).tolist()
            print("preds: ", preds)
            print("y_test_labels: ", y_test_descr)
            
            n_correct = 0
            for i, pred in enumerate(preds):
                if pred < 0.5 and y_test_descr[i] == 0.0:
                    n_correct += 1
                elif pred >= 0.5 and y_test_descr[i] == 1.0:
                    n_correct += 1
            acc = (n_correct * 1.0) / len(preds)
                    
            #roc_auc_scores[descr] = roc_auc_score(y_test_descr, preds)

            #print(descr, roc_auc_scores[descr])

            print(descr, acc)
Beispiel #32
0
def mainpage():
    orgtit = 0
    if request.method == 'POST':
        try:
            abstract = request.form.get("Summary")
            #print(abstract)
            title = None
            if (abstract != None):
                abstract = (str(abstract).lstrip()).rstrip()
                match_key = re.search("(Keywords|KEYWORDS)((.|\n)*)", abstract)

                if (match_key == None):
                    keywords = ""
                else:
                    keywords = (str(match_key.group(0)).lstrip()).rstrip()

            else:
                file = request.files['Files']
                print(file)

                filename = secure_filename(file.filename)
                file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))

                ########## Code for Extracting Abstract and Keywords from PDF   ###########

                fp = open(os.path.join(app.config['UPLOAD_FOLDER'], filename),
                          'rb')

                f1 = open('Test_doc.txt', 'w+')
                parser = PDFParser(fp)
                doc = PDFDocument()
                parser.set_document(doc)
                doc.set_parser(parser)
                doc.initialize('')
                rsrcmgr = PDFResourceManager()
                laparams = LAParams()
                device = PDFPageAggregator(rsrcmgr, laparams=laparams)
                interpreter = PDFPageInterpreter(rsrcmgr, device)

                extracted_text = ""
                for page in doc.get_pages():
                    interpreter.process_page(page)
                    layout = device.get_result()
                    for lt_obj in layout:
                        if isinstance(lt_obj, LTTextBox) or isinstance(
                                lt_obj, LTTextLine):
                            extracted_text += lt_obj.get_text()
                            f1.write(lt_obj.get_text())
                            #print(lt_obj.get_text())

                f1.close()
                fp.close()

                f = open('Test_doc.txt', "r")
                f1 = open("Abstract.txt", "w+")
                f2 = open("Keywords.txt", "w+")

                string = f.read()
                match = re.search(
                    "(Abstract|ABSTRACT)((.|\n)*)(I|1)\s*\.*?\s*(Introduction|INTRODUCTION|IN TRODUC T ION)",
                    string)
                abstract = (str(match.group(2)).lstrip()).rstrip()
                match_key = re.search("(Keywords|KEYWORDS)((.|\n)*)", abstract)

                if (match_key == None):
                    keywords = ""
                else:
                    keywords = (str(match_key.group(0)).lstrip()).rstrip()

                f2.write(keywords)
                f1.write(abstract)
                f2.close()
                f1.close()
                f.close()

                ################    Code for Extracting title from the PDF file     ##################

                for top, dirs, files in os.walk(
                        '/Users/anubhavjain/Desktop/NLP_Pdf_extract/paper1/pdf/'
                ):
                    for filename in files:
                        if filename.endswith('.pdf'):
                            abspath = os.path.join(top, filename)
                            subprocess.call(
                                './soffice --convert-to html "{}" --outdir /Users/anubhavjain/Desktop/NLP_Pdf_extract/paper1/html'
                                .format(abspath),
                                shell=True)

                for top, dirs, files in os.walk(
                        '/Users/anubhavjain/Desktop/NLP_Pdf_extract/paper1/html/'
                ):
                    for filename in files:
                        if filename.endswith('.html'):
                            abspath = os.path.join(top, filename)
                            subprocess.call(
                                './soffice --convert-to docx:"MS Word 2007 XML" "{}" --outdir /Users/anubhavjain/Desktop/NLP_Pdf_extract/paper1/docx/'
                                .format(abspath),
                                shell=True)

                for top, dirs, files in os.walk(
                        '/Users/anubhavjain/Desktop/NLP_Pdf_extract/paper1/docx/'
                ):
                    for filename in files:
                        if filename.endswith('.docx'):
                            abspath = os.path.join(top, filename)
                            document = docx.Document(abspath)
                            bolds = []
                            italics = []
                            count = 0
                            count_real = 0
                            temp = ""
                            flag = True
                            for para in document.paragraphs:
                                if (not flag):
                                    break
                                for run in para.runs:
                                    if (run.text != ""):
                                        count_real += 1
                                        temp += run.text + " "
                                    if (count_real == 2):
                                        temp1 = temp
                                    if (not flag):
                                        break
                                    if (count == 1):
                                        flag = False
                                    if run.bold:
                                        if (run.text != ""):
                                            bolds.append(run.text)
                                            count += 1

                            boltalic_Dict = {'bold_phrases': bolds}
                            title = ""
                            for i in bolds:
                                if (i != ""):
                                    title += i + " "
                            if (len(title) < 2):
                                title = temp1

                print(title)
                f3 = open("Ground_Title.txt", "w+")
                f3.write(title)
                f3.close()
                orgtit = 1

            # ##############      -----------------       #############

            encoderModel = load_model(
                "titlegen.h5",
                custom_objects={'AttentionLayer': AttentionLayer})
            decoderModel = load_model(
                "titlegenPredict.h5",
                custom_objects={'AttentionLayer': AttentionLayer})

            with open('xtokenizer.pickle', 'rb') as handle:
                x_tokenizer = pickle.load(handle)
            with open('ytokenizer.pickle', 'rb') as handle:
                y_tokenizer = pickle.load(handle)

            reverse_target_word_index = y_tokenizer.index_word
            reverse_source_word_index = x_tokenizer.index_word
            target_word_index = y_tokenizer.word_index

            max_text_len = 200
            max_summary_len = 15
            no_of_extracted_sentences = 10

            def text_cleaner(text):
                newString = text.lower()
                newString = BeautifulSoup(newString, "lxml").text
                newString = re.sub(r'\([^)]*\)', '', newString)
                newString = re.sub('"', '', newString)
                newString = ' '.join([
                    contraction_mapping[t] if t in contraction_mapping else t
                    for t in newString.split(" ")
                ])
                newString = re.sub(r"'s\b", "", newString)
                newString = re.sub("[^a-zA-Z]", " ", newString)
                tokens = [w for w in newString.split() if not w in stop_words]
                long_words = []
                for i in tokens:
                    if len(i) >= 3:  #removing short word
                        long_words.append(i)
                return (" ".join(long_words)).strip()

            def extractText(text):
                sentences = sent_tokenize(text)
                clean_sentences = list()

                for sentence in sentences:
                    clean_sentences.append(text_cleaner(sentence))

                sentence_vectors = []
                for i in clean_sentences:
                    if len(i) != 0:
                        v = sum([
                            word_embeddings.get(w, np.zeros((100, )))
                            for w in i.split()
                        ]) / (len(i.split()) + 0.001)
                    else:
                        v = np.zeros((100, ))
                    sentence_vectors.append(v)

                # similarity matrix
                sim_mat = np.zeros([len(sentences), len(sentences)])

                for i in range(len(sentences)):
                    for j in range(len(sentences)):
                        if i != j:
                            sim_mat[i][j] = cosine_similarity(
                                sentence_vectors[i].reshape(1, 100),
                                sentence_vectors[j].reshape(1, 100))[0, 0]

                nx_graph = nx.from_numpy_array(sim_mat)

                try:
                    scores = nx.pagerank(nx_graph)
                except:
                    exit(1)

                ranked_sentences = sorted(
                    ((scores[i], s) for i, s in enumerate(sentences)),
                    reverse=True)

                es = list()
                for i in range(
                        min(no_of_extracted_sentences, len(ranked_sentences))):
                    es.append(ranked_sentences[i][1])

                extracted_text = " ".join(es)

                return extracted_text

            def decode_sequence(input_seq):
                # Encode the input as state vectors.
                e_out, e_h, e_c = encoderModel.predict(input_seq)

                # Generate empty target sequence of length 1.
                target_seq = np.zeros((1, 1))

                # Populate the first word of target sequence with the start word.
                target_seq[0, 0] = target_word_index['sostok']

                stop_condition = False
                decoded_sentence = ''
                while not stop_condition:

                    output_tokens, h, c = decoderModel.predict(
                        [target_seq] + [e_out, e_h, e_c])

                    # Sample a token
                    sampled_token_index = np.argmax(output_tokens[0, -1, :])
                    sampled_token = reverse_target_word_index[
                        sampled_token_index]

                    if (sampled_token != 'eostok'):
                        decoded_sentence += ' ' + sampled_token

                    # Exit condition: either hit max length or find stop word.
                    if (sampled_token == 'eostok'
                            or len(decoded_sentence.split()) >=
                        (max_summary_len - 1)):
                        stop_condition = True

                    # Update the target sequence (of length 1).
                    target_seq = np.zeros((1, 1))
                    target_seq[0, 0] = sampled_token_index

                    # Update internal states
                    e_h, e_c = h, c

                return decoded_sentence

            def seq2summary(input_seq):
                newString = ''
                for i in input_seq:
                    if ((i != 0 and i != target_word_index['sostok'])
                            and i != target_word_index['eostok']):
                        newString = newString + reverse_target_word_index[
                            i] + ' '
                return newString

            def seq2text(input_seq):
                newString = ''
                for i in input_seq:
                    if (i != 0):
                        newString = newString + reverse_source_word_index[
                            i] + ' '
                return newString

            extracted_text = extractText(abstract)
            cleaned_text = text_cleaner(extracted_text)
            x_val_seq = x_tokenizer.texts_to_sequences([cleaned_text])
            text_encodings = pad_sequences(x_val_seq,
                                           maxlen=max_text_len,
                                           padding='post')[0]
            resultTitle = decode_sequence(
                text_encodings.reshape(1, max_text_len))
            print(resultTitle)

            ############	---------------		###############

            ############	Code for Making the generated title Catchy 	###############

            def count(string):
                return sum(1 for c in string if c.isupper())

            def catchy1(t, text, key):
                s = t.split()
                r = []
                flag = 0
                r.append(s[0])
                for i in range(1, len(s)):
                    if s[i] == s[i - 1]:
                        continue
                    else:
                        r.append(s[i])
                for i in range(len(r)):
                    r[i] = r[i][0].upper() + r[i][1:]
                stop_words = set(stopwords.words('english'))
                text = re.sub(r'[^A-Za-z\s]', "", text)
                text = re.sub(r'https\S+', "", text)
                text = text.lstrip()
                text = text.strip()
                list1 = word_tokenize(text)
                fil_sent = [w for w in list1 if not w in stop_words]

                final = []
                c = {}
                for i in fil_sent:
                    if i[1:] != i[1:].lower() and count(i) > 2:
                        final.append(i)
                        if i not in c:
                            c[i] = 1
                        else:
                            c[i] += 1
                ans = ""
                if len(c) != 0:
                    max_value = max(c.values())
                    last = [k for k, v in c.items() if v == max_value]
                    length = []
                    if len(last) >= 1:
                        for i in range(len(last)):
                            if last[i] == last[i].upper():
                                flag = 1
                                ans = last[i]
                                length.append(len(last[i]))
                            else:
                                length.append(len(last[i]))
                        if ans == "":
                            flag = 1
                            ans = last[length.index(max(length))]
                final = []
                if ans == "":
                    if len(key) > 0:
                        flag = 2
                        pre = re.split('-|;|,|—|:', key)
                        for i in range(len(pre)):
                            if pre[i] != "" and pre[i] != "keywords" and pre[
                                    i] != "Keywords" and pre[i] != "KEYWORDS":
                                final.append(pre[i])
                        for i in range(len(final)):
                            final[i] = final[i].lstrip()
                            final[i] = final[i].rstrip()
                        print(final)
                        z = random.randrange(0, len(final))
                        ans = final[z]
                        ans = ans.replace(" ", "")

                ans1 = ""
                for i in range(len(r)):
                    if i == 0:
                        ans1 = ans1 + r[i]
                    else:
                        ans1 = ans1 + " " + r[i]
                if flag == 2:
                    ans1 = "#" + ans + ": " + ans1
                elif flag == 0:
                    ans1 = ans1
                else:
                    ans1 = ans + ": " + ans1
                return (ans1)

            catchy_title = catchy1(resultTitle, abstract, keywords)
            print("PREDICTED TITLE: ", catchy_title)

            # f3 = open("Predicted_Title.txt","w+")
            # f3.write("PREDICTED TITLE:- ",catchy_title)
            # f3.close()

            # 			#############	Code for Generating the metric for Catchiness	############

            # with open('xtokenizer.pickle', 'rb') as handle:
            #     x_tokenizer = pickle.load(handle)
            # with open('ytokenizer.pickle', 'rb') as handle:
            #     y_tokenizer = pickle.load(handle)

            # dict1 = y_tokenizer.word_counts
            # dict2 = x_tokenizer.word_counts

            # def merge_two_dicts(x, y):
            # 	z = x.copy()
            # 	z.update(y)
            # 	return z

            # dict_new = merge_two_dicts(dict1,dict2)

            # def catchyscore(ground,catchy):
            # 	stopwords = nltk.corpus.stopwords.words('english')

            # 	ground_new = []
            # 	catchy_new = []

            # 	ground = ground.split(" ")
            # 	catchy = catchy.split(" ")

            # 	for i in ground:
            # 		if(i not in stopwords):
            # 			ground_new.append(i)

            # 	for i in catchy:
            # 		if i not in stopwords:
            # 			catchy_new.append(i)

            # 	ground_score = 0	# default values
            # 	catchy_score = random.choice([-1,0,1,2,3])	# default values

            # 	for i in range(len(ground_new)):
            # 		if ground_new[i] in dict_new:
            # 			ground_score+=dict_new[i]

            # 	for i in range(len(catchy_new)):
            # 		if catchy_new[i] in dict_new:
            # 			catchy_score+=dict_new[i]

            # 	final = ground_score - catchy_score
            # 	return final

            # if(abstract!=None):
            # 	catch_score = catchyscore(title,catchy_title)
            # 	print(catch_score)

            # #############	--------------	###############

            if (title):
                return render_template("mainpage.html",
                                       show_results=1,
                                       orgtit=orgtit,
                                       catchy_title=catchy_title,
                                       groundTruth=title)
            else:
                return render_template("mainpage.html",
                                       show_results=1,
                                       orgtit=orgtit,
                                       catchy_title=catchy_title)
        except:
            return render_template("mainpage.html", show_results=0)

    else:
        return render_template("mainpage.html", show_results=0)