def ImageFeature_Extractor(ImageName):
    #Re-structuring the VGG model as per requirements
    model_ = VGG16(weights="imagenet")  #Loading the model
    model_.layers.pop()  #Restructing model (retain penultimate FCC-4096)
    model_ = Model(inputs=model_.inputs, outputs=model_.layers[-1].output)

    #Extracting features from image (jpg) using restructured model
    image = load_img(ImageName, target_size=(224, 224))  #loading img aptly
    image = img_to_array(image)  #converting PIL image to array
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    image = preprocess_input(image)  # preprocessing of img for VGG
    image_feature = model_.predict(image, verbose=0)  #getting img features

    return image_feature
def xception_transfer_values(image):
    base_model = Xception()
    model = Model(input=base_model.input,
                  output=base_model.get_layer('avg_pool').output)
    #img = image.load_img(img_path, target_size=(224, 224))
    #x = image.img_to_array(img)
    #x = np.expand_dims(x, axis=0)
    #image = preprocess_input(image)
    image = image.reshape((1, 299, 299, 3))
    transferva = model.predict(image)
    nsamples, npoints = transferva.shape
    d2_train_dataset = transferva.reshape((npoints * nsamples))
    K.clear_session()
    return d2_train_dataset
Beispiel #3
0
def model_predict(img_path, model):
    image = cv2.imread(img_path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = cv2.resize(image, (224, 224))
    image_con = image.reshape((1, 224, 224, 3))
    image_con = np.array(image_con) / 255.0
    predIdxs = model.predict(image_con)
    # for each image in the testing set we need to find the index of the
    # label with corresponding largest predicted probability
    predIdxs = np.argmax(predIdxs, axis=1)
    kv = {0: "Covid Positive", 1: "Covid Negative"}
    l = dict((k, v) for k, v in kv.items())
    prednames = l[predIdxs[0]]
    print(prednames)
    return prednames
def resnet50_transfer_values(image):
    base_model = ResNet50(weights='imagenet')
    model = Model(input=base_model.input,
                  output=base_model.get_layer('avg_pool').output)
    #img_path = '01_0016.jpg'
    #img = image.load_img(img_path, target_size=(224, 224))
    #x = image.img_to_array(img)
    #x = np.expand_dims(x, axis=0)
    #x = preprocess_input(x)
    image = image.reshape((1, 224, 224, 3))
    transferva = model.predict(image)
    nsamples, nx, ny, npoints = transferva.shape
    d2_train_dataset = transferva.reshape((npoints * nsamples * nx * ny))
    K.clear_session()
    return d2_train_dataset
Beispiel #5
0
def load_in_img(img_location):

    #folder name has save in img_location
    imageLocation = img_location
    image = cv2.imread(imageLocation)  # Gray

    if (image is None):
        print(imageLocation)
#     print(imageLocation)

#     image = image[45:-9,::]
    image = cv2.resize(image, (400, 400), interpolation=cv2.INTER_CUBIC)
    image = color_frame_process(image)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    image = image.reshape(400, 400, 1)
    return image
Beispiel #6
0
def frame_by_frame_prediction(batch_holder, model):
    # log the time
    dt1 = datetime.now()
    # read each image frame by frame
    for frame in range(0, batch_holder.shape[0]):
        image = batch_holder[frame]
        image = image.reshape(
            (1, image.shape[0], image.shape[1], image.shape[2]))
        #image = preprocess_input(image)
        pred = model.predict(image)
        #decode_predictions(pred,frame)

    # log the final time
    dt2 = datetime.now()
    time_diff_frame = dt2 - dt1
    return time_diff_frame.total_seconds() * 1000
Beispiel #7
0
 def predict(self, image_path):
     image = self._encode(image_path, self.model_inception)
     image = image.reshape((1,2048))
     in_text = '#START'
     for i in range(self.max_length):
         sequence = [self.word_to_position[w] for w in in_text.split() if w in self.word_to_position]
         sequence = pad_sequences([sequence], maxlen=self.max_length)
         yhat = self.model.predict([image, sequence], verbose=0)
         yhat = np.argmax(yhat)
         word = self.position_to_word[yhat]
         in_text += ' ' + word
         if word == '#END':
             break
     final = in_text.split()
     final = final[1:-1]
     final = ' '.join(final)
     return final
def extract_features(filename):
    # load the model
    model = Xception()
    # re-structure the model
    model.layers.pop()
    model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
    # load the photo
    image = load_img(filename, target_size=(224, 224))
    # convert the image pixels to a numpy array
    image = img_to_array(image)
    # reshape data for the model
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    # prepare the image for the VGG model
    image = preprocess_input(image)
    # get features
    feature = model.predict(image, verbose=0)
    return feature
def handle_requests(socket):
    # Set tensorflow configs
    tf_config = K.tf.ConfigProto()
    tf_config.gpu_options.per_process_gpu_memory_fraction = CarConfig.classifier[
        "gpu_memory_frac"]
    K.set_session(K.tf.Session(config=tf_config))
    init = K.tf.global_variables_initializer()
    sess = K.get_session()
    sess.run(init)

    # Load model once
    model, loaded_model_json = load_model_and_json()
    print("Car classifier is ready to roll.")
    while True:
        result_dict = {}
        predict_list = []
        message = "OK"

        # Receive image and predict classes
        try:
            request = socket.recv()
            image = zmq_comm.decode_request(request)
            # Preprocess the image
            image = image * 1. / 255
            image = cv2.resize(image, (299, 299))
            image = image.reshape((1, ) + image.shape)
            # Feed image to classifier
            preds = model.predict(image)[0]
            predict_list = classifyIndices(loaded_model_json, preds,
                                           CarConfig.classifier["n"])
        except tf.errors.OpError as e:
            message = e.message
        except Exception as e:
            message = str(e)

        predictions = []
        tags = ["model", "score"]
        for index, pred in enumerate(predict_list):
            predictions.append(dict(zip(tags, [pred.name, str(pred.score)])))

        result_dict["result"] = predictions
        result_dict["message"] = message
        socket.send_json(result_dict)
Beispiel #10
0
def augment_cnn(input_path='./dataset/Train',
                output_path='./dataset/Augmented',
                count=10):
    ''' Augments images and saves them into a new directory '''
    for Images in os.listdir(input_path):
        gen = ImageDataGenerator(
            featurewise_center=False,
            samplewise_center=False,
            featurewise_std_normalization=False,
            samplewise_std_normalization=False,
            zca_whitening=False,
            zca_epsilon=1e-06,
            rotation_range=30,
            width_shift_range=30,
            height_shift_range=30,
            brightness_range=[0.6, 0.8],
            shear_range=4.0,
            zoom_range=[0.8, 1.0],
            channel_shift_range=10,
            fill_mode='reflect',
            cval=0.0,
            horizontal_flip=True,
            vertical_flip=True,
            rescale=1. / 255,
            preprocessing_function=None,
            data_format='channels_last',
            validation_split=0.2,
            #interpolation_order=1,
            dtype='float32')
        img = load_img('{}/{}'.format(input_path, Images))
        name = Images[:-4]
        size = img.size
        image = img_to_array(img)
        image = image.reshape(1, size[1], size[0], 3)
        image = image.astype('float32')
        gen.fit(image)
        images_flow = gen.flow(image, batch_size=1)
        for i, new_images in enumerate(images_flow):
            new_image = array_to_img(new_images[0], scale=True)
            output = '{}/Aug_{}-{}.jpg'.format(output_path, name, i + 1)
            print(output)
            new_image.save(output)
            if i >= count - 1: break
Beispiel #11
0
def home():
    model = load_model('models\MobileNetV2_3.h5')
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    if request.method == 'GET':
        # show the upload form
        prediction = 0
        return render_template('index.html')

    if request.method == 'POST':
        image_file = request.files['image']
        filename = image_file.filename
        filepath = os.path.join('tmp/uploaded/', filename)
        image_file.save(filepath)
        image = imread(filepath)
        image = face_extractor(image)
        image = image / 255
        #print(image.shape)
        image = cv2.resize(image, (160, 160))
        #print(image.shape)
        #prediction = 0
        prediction = model.predict(image.reshape(-1, 160, 160, 3))
        #i,j = np.unravel_index(prediction.argmax(), prediction.shape)
        flag = 0
        non_celeb = 0
        for k in range(0, 105):
            if (prediction[0][k] > 0.90):
                i, j = 0, k
                flag = 1
                break
            elif (prediction[0][k] > 0.5):
                non_celeb = 1
        if (flag == 0):
            if (non_celeb == 1):
                return render_template(
                    'index.html', prediction='Non Celebrity Face Detected')
            return render_template('index.html', prediction='No Face Found')
        prediction[i, j]
        print(j)
        predicted_class = get_name(j)[30:-1]
        print(prediction)
        return render_template('index.html', prediction=predicted_class)
def predict(image1): 
    model = MobileNetV2(include_top=True, weights='imagenet')
    model._make_predict_function()
    #model = load_model('tl_fine_tuning_InceptionResNetV2_120_breeds.h5')
    image = load_img(image1, target_size=(224, 224))
    # convert the image pixels to a numpy array
    image = img_to_array(image)
    # reshape data for the model
    image = image.reshape((-1, image.shape[0], image.shape[1], 3)) / 255
    #img = cv2.imread(image1)
    #img = cv2.resize(img, (299, 299))
    #img = np.reshape(img, (-1, 299, 299, 3)) / 255
    #with open('classes_encoding_120', 'rb') as f:
    #    classes_labels = pickle.load(f)
    #label = classes_labels[model.predict_classes(image)[0]]
    predictions = model.predict(image)
    labels = decode_predictions(predictions)
    label = labels[0][0]
    label, probability = label[1], round(label[2]*100,2)
    return (label, probability)    
Beispiel #13
0
def predict_captions(image):
    start_word = ["<start>"]
    while True:
        par_caps = [word_2_indices[i] for i in start_word]
        par_caps = sequence.pad_sequences([par_caps],
                                          maxlen=max_len,
                                          padding='post')
        par_caps = np.array(par_caps)
        image = np.array(image)
        preds = model.predict(
            [image.reshape(1, 4096),
             par_caps.reshape(1, max_len)])
        word_pred = indices_2_word[np.argmax(preds[0])]

        start_word.append(word_pred)

        if word_pred == "<end>" or len(start_word) > max_len:
            break

    return ' '.join(start_word[1:-1])
Beispiel #14
0
def beam_search_pred(image, beam_index=3):
    start = [word_2_indices["<start>"]]
    start_word = [[start, 0.0]]

    while len(start_word[0][0]) < max_len:
        temp = []
        for s in start_word:
            par_caps = sequence.pad_sequences([s[0]],
                                              maxlen=max_len,
                                              padding='post')
            par_caps = np.array(par_caps)
            image = np.array(image)
            preds = model.predict(
                [image.reshape(1, 4096),
                 par_caps.reshape(1, max_len)])
            preds = model.predict([np.array([image]), np.array(par_caps)])

            word_preds = np.argsort(preds[0])[-beam_index:]

            for w in word_preds:
                next_cap, prob = s[0][:], s[1]
                next_cap.append(w)
                prob += preds[0][w]
                temp.append([next_cap, prob])
        start_word = temp
        start_word = sorted(start_word, reverse=False, key=lambda l: l[1])
        start_word = start_word[-beam_index:]
    start_word = start_word[-1][0]
    inter_cap = [indices_2_word[i] for i in start_word]

    final_cap = []
    for i in inter_cap:
        if i != '<end>':
            final_cap.append(i)
        else:
            break
    final_cap = ' '.join(final_cap[1:])
    return final_cap
def upload_file():
  if request.method == 'GET':
    return render_template('index.html')
  if request.method == 'POST':
    # アプロードされたファイルを保存する
    #f = request.files['file']
    #filepath = "./static/" + datetime.now().strftime("%Y%m%d%H%M%S") + ".jpg"
    #f.save(filepath)
    # モデルを使って判定する

    # data = req.get_param('file').file.read()
    data = request.files['file'].read()
    pilimg = Image.open(io.BytesIO(data))

    ### load model and weight
    model = model_from_json(open('apple_orange_model.json').read())
    model.load_weights('apple_orange_weights.h5')

    image = np.array(pilimg.resize((25, 25)))
    image = image.transpose(2, 0, 1)
    image = image.reshape(1, image.shape[0] * image.shape[1] * image.shape[2]).astype("float32")[0]
    x = np.array([image / 255.])
    result = model.predict_classes(x)
    proba = model.predict_proba(x)

    predict = result[0].tolist()
    result = classes[predict]
    predict_proba = proba[0].tolist()
    result_proba = predict_proba[predict]

    response = jsonify({'result':result, 'probability':result_proba})

    # res.status = falcon.HTTP_200
    response.status_code = 200

    return response
Beispiel #16
0
def predict():
    #Catching the data received
    data = (request.form)
    #Slicing out only the required portion of the data (BASE64 string)
    #converting the clipped string into byte format as it is the supported format by the method decodebytes()
    data = base64.decodebytes(bytes((data['value'][22:]), 'utf-8'))
    image = Image.open(io.BytesIO(data))
    #converting the image to greyscale
    image = image.convert('L')
    #resizing with smoothing (ANTIALIAS)
    image = image.resize((28, 28), Image.ANTIALIAS)
    #converting the image to array
    image = np.asarray(image)
    #dividing each pixel intensity by 255 to apply MINMAX scaling
    image = image.astype('float32') / 255
    #converting the image shape to that of the training data as it is what the model accepts
    image = image.reshape(1, 28, 28, 1)
    #storing the index of the output array which has the greatest probabilistic value
    number = np.argmax(loaded_model.predict(image))
    #returning predicted number as a response
    if ((type(number) != None)):
        return ("PREDICTED NUMBER : " + str(number))
    else:
        return ("UNABLE TO PREDICT")
Beispiel #17
0
        ValueError("You must pass an image path when using prediction mode.")
    from PIL import Image
    # Create directories if needed
    if not os.path.isdir("%s" % ("Predictions")):
        os.makedirs("%s" % ("Predictions"))

    # Read in your image
    image = cv2.imread(args.image)
    image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    image = np.array(image)
    #image = Image.open(args.image)
    #print(image.shape)
    #image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
    save_image = image
    image = np.float32(cv2.resize(image, (HEIGHT, WIDTH)))
    image = preprocessing_function(image.reshape(1, HEIGHT, WIDTH, 3))

    class_list_file = "./checkpoints/" + args.model + "_class_list.txt"

    class_list = utils.load_class_list(class_list_file)

    finetune_model = utils.build_finetune_model(base_model,
                                                num_classes=len(class_list),
                                                dropout=args.dropout,
                                                fc_layers=FC_LAYERS)

    finetune_model.load_weights("./checkpoints/" + args.model +
                                "_model_weights.h5")

    # Run the classifier and print results
    st = time.time()
Beispiel #18
0
# extract features from each photo
for val in data:
    print(val['image_url'])
    # load an image from file
    image = loadImage(val['image_url'])
    if image is not None and image.shape == (224, 224, 3):

        img_names.append(val['image_url'])
        poems.append(val['poem'])
        seq_poems.append('***startseq ' + val['poem'] + ' endseq***')
        vocab.update(val['poem'].split())

        #             print(image.shape)
        # reshape data for the model
        image = image.reshape(
            (1, image.shape[0], image.shape[1], image.shape[2]))
        # prepare the image for the VGG model
        #             print(image.shape)
        image = preprocess_input(image)

        # get features
        feature = model.predict(image, verbose=0)
        featurelist.append(feature)
        # get image id
        image_id = val['id']

        # store feature
        #        features[image_id] = feature

        print('>%s' % image_id)
    else:
Beispiel #19
0
def getImagetoPixels_imdb(image_path, db='imdb'):  #db = wiki or imdb
    image = cv2.imread('./data/{}_crop/{}'.format(db, image_path[0]),
                       cv2.IMREAD_COLOR)
    image = cv2.resize(image, target_size)
    return image.reshape(1, -1)[0]
preds = model.evaluate(X_train, Y_train)
print("Loss = " + str(preds[0]))
print("Train Accuracy = " + str(preds[1]))

import scipy
from scipy import ndimage
files = os.listdir('testresize/')
test = pd.DataFrame()
for i in range(len(files)):
    fname = 'testresize/' + files[i]
    name, _ = os.path.splitext(fname)
    name = name + '.jpg'
    if (os.path.exists(name)):
        image = np.array(ndimage.imread(fname, flatten=False))
        my_image = image.reshape((1, 64 * 64 * 3))
        single_data = pd.DataFrame(my_image)
        test = test.append(single_data)
    percent = float(i) * 100 / float(len(files))
    sys.stdout.write("%.4f\r" % percent)
    sys.stdout.flush()
test = test.reset_index()
test = test.drop(['index'], axis=1)
test_X = np.zeros((len(test), 64, 64, 3))
for i in range(len(test)):
    test_X[i] = np.array(test.loc[i]).reshape(64, 64, 3)
    percent = float(i) * 100 / float(len(test))
    sys.stdout.write("%.4f\r" % percent)
    sys.stdout.flush()
X_test_orig = test_X
X_test_orig = X_test_orig.astype(float)
Beispiel #21
0
def process(image):
    image = img_to_array(image)
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    image = preprocess_input(image)
    features = model.predict(image)
    return features
Beispiel #22
0
 def loss_gradient(self, image):
     return np.array(self.loss_function_gradient([image.reshape(self.e_image_shape).astype(K.floatx())])).\
         astype('float64').flatten()
model.save('mathSymbolsPredictor.model')
np.save('classes.npy', label_encoder.classes_)

# ## New predictions

import keras.models
#model_ = keras.models.load('mathSymbolsPredictor.model')

label_encoder2 = LabelEncoder()
label_encoder2.classes_ = np.load('classes.npy')

PATH = 'hasy-data/v2-00100.png'

image = keras.preprocessing.image.img_to_array(pil_image.open(PATH))
image /= 255.
predicted = (model.predict(image.reshape(1, 32, 32, 3)))
inverted = label_encoder2.inverse_transform([np.argmax(predicted)])
print(inverted[0], np.max(predicted))
(Image(url=PATH))

# # Convert your model into .tflite
# ## To deploy it on an application

RPS_SAVED_MODEL = "rps_saved_model"
tf.saved_model.save(model, RPS_SAVED_MODEL)

get_ipython().run_cell_magic(
    'bash', '-s $RPS_SAVED_MODEL',
    'saved_model_cli show --dir $1 --tag_set serve --signature_def serving_default'
)
def predict_GUI_number(UI_model, image):  #image is nump array
    image = image.reshape(-1, 28, 28, 1)
    pred = UI_model.predict([image])[0]
    return np.argmax(pred), max(pred)
from keras.models import load_model
from keras.preprocessing import image
import matplotlib.pyplot as plt
import random
####################################import model
model = keras.models.load_model('mymodel.h5')
dir = 'D:\\tomato\\images\\test'
x = []
images = os.listdir(dir)

for imageString in images:
    path = os.path.join(dir, imageString)
    try:
        image = cv2.imread(path)
        image = cv2.resize(image, (100, 100))
        image = image.reshape(-1, 100, 100, 3)
        x.append(image)
    except:
        print('error')

random.shuffle(x)
z0 = []
y0 = []
y1 = []
z1 = []
y2 = []
z2 = []
y3 = []
z3 = []
count = 0
for xx in x:
Beispiel #26
0
    images = []
    names = []
    size = 64, 64
    for image in os.listdir(test_dir):
        temp = cv2.imread(test_dir + '/' + image)
        temp = cv2.resize(temp, size)
        images.append(temp)
        names.append(image)
    images = np.array(images)
    images = images.astype('float32') / 255.0
    return images, names


test_images, test_img_names = load_test_data()

# make predictions on an image and append it to the list (predictions).
predictions = [
    model.predict_classes(image.reshape(1, 64, 64, 3))[0]
    for image in test_images
]


def get_labels_for_plot(predictions):
    predictions_labels = []
    for i in range(len(predictions)):
        for ins in labels_dict:
            if predictions[i] == labels_dict[ins]:
                predictions_labels.append(ins)
                break
    return predictions_labels
Beispiel #27
0
 def save_image(self, image):
     imsave(
         "static/css/images/" + self.picture_image_filepath.split(".")[0] +
         '_' + str(self.step) + '.jpg',
         self.post_process_image(image.reshape(self.e_image_shape).copy()))
def initiate():

    names = ['List of names']
    video = cv2.VideoCapture(0)
    face_cascade = cv2.CascadeClassifier(
        "C:\\Users\\DRV\\Desktop\\haarcascade_frontalface_default.xml")
    id = 0
    font = cv2.FONT_HERSHEY_SIMPLEX
    a = 0
    while True:
        a = a + 1
        check, frame = video.read()
        faces = face_cascade.detectMultiScale(frame,
                                              scaleFactor=1.30,
                                              minNeighbors=10,
                                              minSize=(50, 50))
        print(faces)
        print(check)
        print(frame)
        for x, y, w, h in faces:
            img = cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
            img1 = frame[y:y + h, x:x + w]
            resized_img = cv2.resize(img1, (224, 224))
            image = img_to_array(resized_img)
            image = image.reshape(
                (1, image.shape[0], image.shape[1], image.shape[2]))
            image = preprocess_input(image)
            prediction = model.predict(image)

            i = 0
            n = 27
            for i in range(0, n):
                if (prediction[0][i] >= 0.9900):
                    identity = str(i)
                    if int(identity) in range(0, n):
                        if int(identity) == 6:
                            id = 7194
                        elif int(identity) == 1:
                            id = 6983
                        elif int(identity) == 7:
                            id = 5847
                        elif int(identity) == 11:
                            id = 5987
                        else:
                            print("None")
                        break

                    else:
                        return render_template("failed.html")
                    break
                else:
                    identity = str(9999)
                i = i + 1

            #cv2.putText(img,identity,(x,y+h),cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),1)#last part=font,red,thickness of text
        #cv2.imshow("Capturing image/video",frame)
        #if (cv2.waitKey(1)==ord('q')):
        break
    webbrowser.open_new_tab("url")

    video.release()
    cv2.destroyAllWindows()

    return render_template("index.html")
def initiate():
    
    names=['Aakash','Ahad','Anirudha','Ankita','Archana','Ashwin','Dhruv','Ishani','Manish',
       'Meera','Neha','Noufan','Pooja','Praveen','Raj','Rohit','Shital','Shraddha','Siddharth',
       'Smitha','Sneha','Suhas','Suresh','Taranjit','Tushar','Umesh','Vaibhav']
    video=cv2.VideoCapture(0)
    face_cascade=cv2.CascadeClassifier("C:\\Users\\Noufanpmc\\Documents\\Aegis\\Capstone\\Project\\code\\haarcascade_frontalface_default.xml")
    id=0
    font=cv2.FONT_HERSHEY_SIMPLEX
    a=0
    while True:
        a=a+1
        check, frame=video.read()
        faces=face_cascade.detectMultiScale(frame,scaleFactor=1.30,minNeighbors=10,minSize=(50,50))
        print(faces)                         
        print(check)
        print(frame)
        for x,y,w,h in faces:
            img=cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0), 2)
            img1=frame[y:y+h,x:x+w]
            resized_img=cv2.resize(img1,(224,224))
            image = img_to_array(resized_img)
            image = image.reshape((1,image.shape[0], image.shape[1], image.shape[2]))
            image = preprocess_input(image)
            prediction = model.predict(image)
            
            i=0
            n=27
            for i in range(0,n):
                if (prediction[0][i]>=0.9900) :
                    identity=str(i)
                    if int(identity) in range(0,n):
                        if int(identity)==6:
                            id=7194
                        elif int(identity)==1:
                            id=6983
                        elif int(identity)==7:
                            id=5847
                        elif int(identity)==11:
                            id=5987
                        else:
                            print("None")
                        break;
                        
                    else:
                        return render_template("failed.html")
                    break;
                else :
                    identity = str(9999)    
                i=i+1

                
            #cv2.putText(img,identity,(x,y+h),cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),1)#last part=font,red,thickness of text
        #cv2.imshow("Capturing image/video",frame)
        #if (cv2.waitKey(1)==ord('q')):
        break;
    webbrowser.open_new_tab("https://www.muniversity.mobi/login/face_login.php?token="+"azbd"+str(id)+"&flag=1")

    video.release()
    cv2.destroyAllWindows()

    return render_template("index.html")
Beispiel #30
0
def preprocess_image(image_path, target_size):

    image = prepare_image(image_path, target_size)
    image = image.reshape(-1, 224, 224, 3)

    return image