Esempio n. 1
0
def load_img_scaled(input_path, target_shape, grayscale=False):
    return np.expand_dims(
        image.img_to_array(
            image.load_img(
                input_path, target_size=target_shape, grayscale=grayscale)) /
        255.0,
        axis=0)
Esempio n. 2
0
def load_img(input_path, target_shape, grayscale=False, mean=None, std=None):
    img = image.load_img(
        input_path, target_size=target_shape, grayscale=grayscale)
    img_arr = np.expand_dims(image.img_to_array(img), axis=0)
    if not grayscale:
        img_arr = preprocess_input(img_arr, mean=mean, std=std)
    return img_arr
Esempio n. 3
0
if args["image"]:
    img = cv2.imread(args["image"])
    height, width = img.shape[:2]
    if width > 750 or width < 150:
        img = imutils.resize(img, width=600)
    height, width = img.shape[:2]
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img_clone = img.copy()
    rects = face_cascade.detectMultiScale(gray,
                                          scaleFactor=1.1,
                                          minNeighbors=5)
    if len(rects) == 0:
        roi = cv2.resize(gray, (28, 28))
        roi = roi.astype("float") / 255.0
        roi = img_to_array(roi)
        roi = np.expand_dims(roi, axis=0)
        notSmiling, smiling = model.predict(roi)[0]
        label = "Smiling" if smiling > notSmiling else "Not Smiling"
        cv2.putText(img_clone, f"{label}: {max(notSmiling, smiling) * 100}%",
                    (width // 4, height // 2), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (255, 0, 25), 2)
    for fX, fY, fW, fH in rects:
        roi = gray[fY:fY + fH, fX:fX + fW]
        roi = cv2.resize(roi, (28, 28))
        roi = roi.astype("float") / 255.0
        roi = img_to_array(roi)
        roi = np.expand_dims(roi, axis=0)
        notSmiling, smiling = model.predict(roi)[0]
        label = "Smiling" if smiling > notSmiling else "Not Smiling"
        textpos = (fX, fY - 10)
Esempio n. 4
0
     print('new_learning:', init_lr)
 if num_iter == int(2*train_iter/3):
     init_lr = init_lr/10
     model.compile(optimizer=Adam(learning_rate=init_lr), loss='mse')
     print('new_learning:', init_lr)
 #randomly select a batch of sample
 select_case = [np.random.randint(1,len(all_label)) for _ in range(batch_size)] 
 batch_input = []
 batch_output = []
 for i in range(batch_size):
     all_sensor_input = np.zeros((num_sensors, 84, 84*4, 3))
     all_sensor_output = np.zeros((num_sensors, 1))
     for idx_sensor in range(num_sensors):
         sensor_path = 'training_random/' + all_sensors[idx_sensor]
         img_1 = image.load_img(sensor_path+'/1/'+filelist[select_case[i]], target_size=(84,84))  #height-width
         img_array_1 = image.img_to_array(img_1)
         img_2 = image.load_img(sensor_path+'/2/'+filelist[select_case[i]], target_size=(84,84))  #height-width
         img_array_2 = image.img_to_array(img_2)
         img_3 = image.load_img(sensor_path+'/3/'+filelist[select_case[i]], target_size=(84,84))  #height-width
         img_array_3 = image.img_to_array(img_3)
         img_4 = image.load_img(sensor_path+'/4/'+filelist[select_case[i]], target_size=(84,84))  #height-width
         img_array_4 = image.img_to_array(img_4)               
         all_sensor_input[idx_sensor,:, 84*3:84*4,:] = img_array_4/255 
         all_sensor_input[idx_sensor,:, 84*2:84*3,:] = img_array_3/255
         all_sensor_input[idx_sensor,:, 84*1:84*2,:] = img_array_2/255
         all_sensor_input[idx_sensor,:, 84*0:84*1,:] = img_array_1/255    
     batch_input.append(all_sensor_input.copy())
     #  get label data 
     img_index = int(filelist[select_case[i]][:-4])
     batch_output.append(all_label[select_case[i]]) 
 batch_input = np.array(batch_input) 
Esempio n. 5
0
from os import listdir, remove
from numpy import asarray
from numpy import save
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from PIL import Image
import numpy as np

for x in listdir("../q7_img"):
    for z in listdir("../q7_img/" + x):
        ind = 0
        for y in listdir("../q7_img/" + x + '/' + z):
            ind += 1
            p = "../q7_img/" + x + '/' + z + '/' + y
            print(p)
            arr = load_img(p, target_size=(224, 224))
            arr = img_to_array(arr).astype(np.uint8)
            print(arr)
            im = Image.fromarray(arr)
            remove(p)
            im.save("../q7_img/" + x + '/' + z + '/' + str(ind) + ".jpg")
Esempio n. 6
0
print("\nModel:", model_name)
print("Model version:", model_version)
print("Image:", image_path)
print("Port:", port)

host = "127.0.0.1"
port = port
server = host + ':' + port
model_name = model_name
model_version = int(model_version)
request_timeout = float(10)
image_filepaths = [image_path]

# Loading image
test_image = image.load_img(image_filepaths[0], target_size=(224, 224))
test_image = image.img_to_array(test_image)
test_image = test_image.astype('float32')
test_image = test_image / 255.0

# Create gRPC client and request
channel = grpc.insecure_channel(server)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.model_spec.version.value = model_version
request.model_spec.signature_name = "serving_default"
request.inputs['vgg16_input'].CopyFrom(
    tensor_util.make_tensor_proto(test_image,
                                  shape=[1] + list(test_image.shape)))
Esempio n. 7
0
    def splitSearch(self, markerImg):
        rightNow = rospy.Time.now()
        IMG_HEIGHT, IMG_WIDTH, colors = markerImg.shape
        tiles = []
        results = []
        has_marker_chance = []  # for debugging
        no_marker_percent = []  #for debuging

        # split into 9 tiles
        step_width = IMG_WIDTH // 3
        step_height = IMG_HEIGHT // 3

        if (VERBOSE):
            now = str(rospy.Time.now)
            cv2.imshow("on Laptop ", markerImg)
            cv2.waitKey(1)

        if (SPAM):
            rospy.loginfo("IMG: {}x{}".format(round(IMG_WIDTH, 2),
                                              round(IMG_HEIGHT, 2)))

        for i in range(3):
            for j in range(3):
                x_start, x_end = i * step_width, (i + 1) * step_width
                y_start, y_end = j * step_height, (j + 1) * step_height

                tiles.append(
                    cv2.resize(markerImg[x_start:x_end, y_start:y_end],
                               (step_width, step_height)))
        '''		
		if(VERBOSE):
			rospy.loginfo(len(tiles))
			for x in range(9):
				cv2.imshow(str(x),tiles[x] )
				cv2.waitKey(0)
				#cv2.destroyAllWindows()
		'''

        if (SPAM):
            rospy.loginfo("{}:{}x{}:{}".format(round(x_start, 2),
                                               round(x_end, 2),
                                               round(y_start, 2),
                                               round(y_end, 2)))

        certaintyLevel = 5
        #threadsavety in Python is a nightmare
        with graph.as_default():
            with thread_session.as_default():

                for tile in tiles:
                    tile = numpy.expand_dims(img_to_array(
                        tile.astype("float") / 255.0),
                                             axis=0)
                    #cv2.imshow('',tile)
                    #cv2.waitKey(0)
                    (noMarker, marker) = model.predict(tile)[0]
                    probability = round((max(noMarker, marker) * 100), 2)
                    certainty = ((marker - noMarker) * 100)

                    if (SPAM):  #output the difference in certainty
                        rospy.loginfo("degree  of certainty")
                        rospy.loginfo(certainty)
                    has_marker_chance.append(marker * 100)  #fill debug array
                    no_marker_percent.append(noMarker * 100)  #fill debug array

                    # do not trust  if marker > no marker
                    if ((marker > noMarker)
                            and (certainty > certaintyLevel)):  # bad bad BUG!
                        has_marker = True
                    else:
                        has_marker = False
                    results.append([has_marker, probability])
            #output formated array content of AI results
            if (VERBOSE):
                rospy.loginfo("certainty of Marker found")
                rospy.loginfo("{:6.2f} {:6.2f} {:6.2f}".format(
                    has_marker_chance[0], has_marker_chance[3],
                    has_marker_chance[6]))
                rospy.loginfo("{:6.2f} {:6.2f} {:6.2f}".format(
                    has_marker_chance[1], has_marker_chance[4],
                    has_marker_chance[7]))
                rospy.loginfo("{:6.2f} {:6.2f} {:6.2f}".format(
                    has_marker_chance[2], has_marker_chance[5],
                    has_marker_chance[8]))

                rospy.loginfo(" percent certainty that there is No Marker")
                rospy.loginfo("{:6.2f} {:6.2f} {:6.2f}".format(
                    no_marker_percent[0], no_marker_percent[3],
                    no_marker_percent[6]))
                rospy.loginfo("{:6.2f} {:6.2f} {:6.2f}".format(
                    no_marker_percent[1], no_marker_percent[4],
                    no_marker_percent[7]))
                rospy.loginfo("{:6.2f} {:6.2f} {:6.2f}".format(
                    no_marker_percent[2], no_marker_percent[5],
                    no_marker_percent[8]))
                rightDuration = rospy.Time.now() - rightNow
                rospy.logwarn(
                    "AI took {} Sec ({} nSec) to process the image".format(
                        rightDuration.to_sec(), rightDuration.to_nsec()))
        return results, has_marker_chance, no_marker_percent
Esempio n. 8
0
interpreter1.allocate_tensors()
input_details1 = interpreter1.get_input_details()
output_details1 = interpreter1.get_output_details()

#This is interpreter for GRU_8_8
interpreter2 = tf.lite.Interpreter(model_path='gru.tflite')
interpreter2.allocate_tensors()
input_details2 = interpreter2.get_input_details()
output_details2 = interpreter2.get_output_details()

feature_seq = list()  #This will containg[1,20,7*7*1280]

for i in range(1, 21):
    seq = list()  #List of frame
    img = image.load_img('pic/{}.jpg'.format(i), target_size=(224, 224))
    img = image.img_to_array(img, dtype=np.float32)
    seq.append(img)
    seq = np.array(seq, dtype=np.float32)

    interpreter1.set_tensor(input_details1[0]["index"],
                            seq)  # seq is the input
    interpreter1.invoke()
    feature = interpreter1.get_tensor(output_details1[0]["index"])

    print("feature {} extracted".format(i))
    feature = feature.reshape(-1)
    feature_seq.append(feature)

feature_seq = np.array(feature_seq, dtype=np.float32)
feature_seq = feature_seq.reshape(-1, 20, 7 * 7 * 1280)
print(feature_seq.shape)
Esempio n. 9
0
def detect_and_predict_mask(frame, faceNet, maskNet):
    # lay kich thuoc cua frame va khoi tao blob
    # cho frame
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), (104.0, 177.0, 123.0))

    # Cho blob vao mo hinh phat hien khuon mat
    faceNet.setInput(blob)
    detections = faceNet.forward()

    # Khoi tao lists chua anh khuon mat, vi tri tuong ung cua khuon mat,
    # danh sach cac du doan khuon mat
    faces = []
    locs = []
    preds = []
    name = ""

    # lap qua cac phat hien khuon mat
    for i in range(0, detections.shape[2]):
        # trich xuat do tin cay (i.e., phan tram) cua phat hien
        # khuon mat
        confidence = detections[0, 0, i, 2]

        # loc cac phat hien yeu bang cach dam bao do tin cay lon hon
        # do tin cay toi thieu
        if confidence > args["confidence"]:
            # tinh toa do (x, y)-cua hop gioi han cho doi tuong
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")

            # dam bao cac hop gioi han nam trong kich thuoc cua
            # frame anh
            (startX, startY) = (max(0, startX), max(0, startY))
            (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

            # Trich xuat ROI cua khuon mat, chuyen doi tu he mau BGR qua
            # he mau RGB, thay doi kich thuoc ve 224 x 224 va xu ly truoc
            face = frame[startY:endY, startX:endX]
            # ensure the face width and height are sufficiently large
            # if fW < 20 or fH < 20:
            # 	continue

            # construct a blob for the face ROI, then pass the blob
            # through our face embedding model to obtain the 128-d
            # quantification of the face
            faceBlob = cv2.dnn.blobFromImage(face,
                                             1.0 / 255, (96, 96), (0, 0, 0),
                                             swapRB=True,
                                             crop=False)
            embedder.setInput(faceBlob)
            vec = embedder.forward()

            # perform classification to recognize the face
            preds = recognizer.predict_proba(vec)[0]
            j = np.argmax(preds)
            proba = preds[j]
            name = le.classes_[j]

            # chuyen doi tu he mau BGR qua he mau RGB, thay doi kich thuoc
            # ve 224 x 224 va xu ly truoc
            face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
            face = cv2.resize(face, (224, 224))
            face = img_to_array(face)
            face = preprocess_input(face)

            # Them khuon mat vao faces va toa do hop gioi han vao locs
            faces.append(face)
            locs.append((startX, startY, endX, endY))

    # Chi dua ra du doan neu mot khuon mat duoc phat hien
    if len(faces) > 0:
        # dua ra du doan ve tat ca *all*
        # khuon mat cung mot luc thay ve du doan tung khuong mat
        # trong vong lap `for` o tren
        faces = np.array(faces, dtype="float32")
        preds = maskNet.predict(faces, batch_size=32)

    # tra ve 2 bo vi tri khuon mat va vi tri tuong ung
    # cua chung
    return (locs, preds, name)
Esempio n. 10
0
def evaluate_model(list_number=1):
    print("👾 evaluating model on list number {}".format(list_number))
    print("👾 loading weights")
    model = tf.compat.v1.keras.experimental.load_from_saved_model(
        path.join(MODEL_ROOT, "model"))
    model.build(IMAGE_SIZE + (3, ))
    print(model.summary())
    print("👾 reading test file")
    file = open(
        path.join(ANNOTATIONS_ROOT, "testlist0{}.txt".format(list_number)),
        "r")
    temp = file.read()
    videos = temp.split('\n')
    # pick up only the classes I need
    videos = list(
        set([a for a in videos for b in CLASSES if b in a.split("/")[0]]))

    # creating the dataframe
    print("👾 creating dataframe from test file")
    test = pd.DataFrame()
    test['video'] = videos
    test = test[:-1]
    test_videos = test['video']

    # creating the tags
    print("👾 generating tags for labels")
    train = pd.read_csv(path.join(ANNOTATIONS_ROOT, 'trainlist01_frames.csv'))
    y = train['label']
    y = pd.get_dummies(y)

    # creating two lists to store predicted and actual tags
    predict = []
    actual = []

    # checking if temp root exist otherwise create it
    if not path.exists("temp"):
        print("👾 creating folder temp")
        os.makedirs("temp")

    # for loop to extract frames from each test video
    print("👾 extracting frames from test videos")
    for i in tqdm(range(test_videos.shape[0])):
        count = 0
        videoFile = test_videos[i]
        cap = cv2.VideoCapture(path.join(
            VIDEOS_ROOT, videoFile))  # capturing the video from the given path
        frameRate = cap.get(5)  #frame rate
        x = 1
        # removing all other files from the temp folder
        files = glob('temp/*')
        for f in files:
            os.remove(f)
        while (cap.isOpened()):
            frameId = cap.get(1)  #current frame number
            ret, frame = cap.read()
            if (ret != True):
                break
            if (frameId % math.floor(frameRate) == 0):
                # storing the frames of this particular video in temp folder
                filename = 'temp/' + "_frame%d.jpg" % count
                count += 1
                cv2.imwrite(filename, frame)
        cap.release()

        # reading all the frames from temp folder
        images = glob("temp/*.jpg")

        prediction_images = []
        for i in range(len(images)):
            img = load_img(images[i], target_size=IMAGE_SIZE + (3, ))
            img = img_to_array(img)
            prediction_images.append(img)

        # converting all the frames for a test video into numpy array
        prediction_images = np.array(prediction_images)
        # predicting tags for each array
        prediction = np.argmax(model.predict(prediction_images), axis=-1)
        print(prediction)
        # appending the mode of predictions in predict list to assign the tag to the video
        predict.append(y.columns.values[s.mode(prediction)[0][0]])
        # appending the actual tag of the video
        actual.append(videoFile.split('/')[0])

    print("evaluation done")
    print(accuracy_score(predict, actual) * 100)
Esempio n. 11
0
        endX = (x + 1) * stepX
        endY = (y + 1) * stepY

        # add the (x, y)-coordinates to our cell locations list
        row.append((startX, startY, endX, endY))

        # crop the cell from the warped transform image and then
        # extract the digit from the cell
        cell = warped[startY:endY, startX:endX]

        digit = get_digit(cell)

        if digit is not None:
            digit_preprocessed = cv2.resize(digit, (28, 28))
            digit_preprocessed = digit_preprocessed.astype("float") / 255.0
            digit_preprocessed = img_to_array(digit_preprocessed)
            digit_preprocessed = np.expand_dims(digit_preprocessed, axis=0)

            pred = model.predict(digit_preprocessed).argmax()
            board[y, x] = pred

    # add the row to our cell locations
    cellLocs.append(row)

for i in board:
    for y in i:
        if y == 0:
            print(y)

solve(board)
Esempio n. 12
0
def preprocessing_func(image):
    '''Function that preprocesses the input'''
    preproc_img = img_to_array(image)
    preproc_img = np.expand_dims(preproc_img, axis = 0)
    preproc_img = preprocess_input(preproc_img)
    return preproc_img
Esempio n. 13
0
def load_data(directory,
              classes,
              rescale=True,
              preprocess=None,
              verbose=False):
    """ Helper function to load data in a Keras friendly format """

    if not os.path.exists(directory):
        raise FileNotFoundError(directory + ' not found')

    # Check directories
    directories = os.listdir(directory)
    dataset = list()

    for d in directories:
        if d not in classes or not os.path.isdir(os.path.join(directory, d)):
            print('Skipping', d)
            continue
        if verbose:
            print('Loading directory', d)
        for f in os.listdir(os.path.join(directory, d)):

            try:
                # Load image
                img = load_img(os.path.join(os.path.join(directory, d), f),
                               color_mode='rgb',
                               target_size=[256, 256])
            except PIL.UnidentifiedImageError:
                continue

            # Convert to numpy array
            img = img_to_array(img)

            # Apply any preprocess function and rescaling
            if preprocess is not None:
                img = preprocess(img)
            else:
                if rescale:
                    img /= 255

            # Get index number
            class_num = classes.index(d)

            # Append
            dataset.append([img, class_num])

    # Shuffle dataset
    random.shuffle(dataset)

    x = list()
    y = list()

    # Create our x and y arrays
    for img, label in dataset:
        x.append(img)
        y.append(label)

    # Reshape x,y into the required formats
    x = np.array(x).reshape(-1, 256, 256, 3)
    y = to_categorical(y)

    return x, y, 0
def image_to_tensor(image_path, image_size):
    img = image.load_img(image_path, target_size=image_size[:2])
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x
Esempio n. 15
0
classes=['Brabo','Feliz','Neutro']

while(True):

	ret, img = cap.read()
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

	faces = face_cascade.detectMultiScale(gray, 1.3, 5)
	#print(faces)
	for (x,y,w,h) in faces:
		cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)

		rosto = img[y:y+h,x:x+w]
		rosto = cv2.resize(rosto, (100, 100))
		rosto = image.img_to_array(rosto)
		rosto = rosto.reshape((1,) + rosto.shape)
    
		predict = model.predict_classes(rosto)
		print(classes[predict[0]])
		cv2.putText(img, classes[predict[0]], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0,255,0), 2)
		print('Círculo')

	cv2.imshow('Image',img)

	if cv2.waitKey(1) == 27:
		break

cap.release()
cv2.destroyWindow('Image')
cv2.destroyAllWindows()
Esempio n. 16
0
def load_image(img_path):
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = imagenet_utils.preprocess_input(x)
    return x
Esempio n. 17
0
                    use_batch_norm=False,
                    num_classes=1,
                    filters=64,
                    dropout=0.2,
                    output_activation='sigmoid')

model.compile(optimizer='Adam', loss='MeanSquaredError')

print(np.shape(x_train), " ", np.shape(y_train))

model.fit(x=x_train, y=y_train, batch_size=32, epochs=5, validation_split=.2)

#input_img = image.load_img('./love-me-do.png', target_size=(640,480))
input_img = pixels_from_path('./love-me-do.jpg')
print("input shape", np.array(input_img).shape)
img_array = image.img_to_array(input_img)
print("input array", np.array(img_array).shape)
img_batch = np.expand_dims(img_array, axis=0)
print("input batch", np.array(img_batch).shape)
img_preprocessed = preprocess_input(img_batch)
print("input pre", np.array(img_preprocessed).shape)

predictions = model.predict(img_preprocessed)

print("predictions shape is ", predictions.shape)
print("prediction is ", predictions)

test = np.reshape(predictions, (1120, 1280))

print("Test ", test.shape)
print(test)
if player_num == 1:
    while globalvar.leave_game != True:
        
        # 從WebCam讀取一張圖片
        suc, frame_A = camera_A.read()
        # 顯示圖片
        cv2.imshow('frame', frame_A)


        cv2.waitKey(100)

        frame_A = cv2.cvtColor(frame_A, cv2.COLOR_BGR2RGB)
        frame_A = cv2.resize(frame_A, (img_x, img_y), interpolation=cv2.INTER_NEAREST)
        frame_A = np.array(frame_A) / 255
        frame_A = image.img_to_array(frame_A)
        frame_A = np.expand_dims(frame_A, axis = 0)

        predict_output_A = model.predict(frame_A)
        max_probablility_index_A = predict_output_A.argmax()

        max_probablility_A = np.max(predict_output_A)
        print("Player A:", label_list[max_probablility_index_A], ":", "{:.2%}".format(max_probablility_A), end="\r")


if player_num == 2:
    while globalvar.leave_game != True:
        
        # 從WebCam讀取一張圖片
        suc, frame_A = camera_A.read()
        suc, frame_B = camera_B.read()
Esempio n. 19
0
        else:
            ret, img = cam.read()
            if not ret:
                print('error')
                break

        key = cv2.waitKey(1)
        if key == 27:  # when ESC key is pressed break
            break

        count += 1
        if count > max_count:
            X = []
            img_org = cv2.resize(img, (640, 480))
            img = cv2.resize(img, (256, 256))
            img = img_to_array(img)
            X.append(img)
            X = np.asarray(X)
            X = X / 255.0
            start = time.time()
            preds = model_pred.predict(X)
            elapsed_time = time.time() - start

            pred_label = ''

            label_num = 0
            tmp_max_pred = 0
            print(preds)
            for i in preds[0]:
                if i > tmp_max_pred:
                    pred_label = labels[label_num]
Esempio n. 20
0
 def process_image(img_path):
     img = image.load_img(img_path, target_size=(224, 224))
     img_array = image.img_to_array(img)
     img_array = np.expand_dims(img_array, axis=0)
     pImg = mobilenet_v2.preprocess_input(img_array)
     return pImg
Esempio n. 21
0
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")

            # ensure the detected bounding box does fall outside the
            # dimensions of the frame
            startX = max(0, startX)
            startY = max(0, startY)
            endX = min(w, endX)
            endY = min(h, endY)

            # extract the face ROI and then preproces it in the exact
            # same manner as our training data
            face = frame[startY:endY, startX:endX]
            face = cv2.resize(face, (32, 32))
            face = face.astype("float") / 255.0
            face = img_to_array(face)
            face = np.expand_dims(face, axis=0)

            # pass the face ROI through the trained liveness detector
            # model to determine if the face is "real" or "fake"
            preds = model.predict(face)[0]
            j = np.argmax(preds)
            label = le.classes_[j]
            if label == 'fake':
                label = "{}: {:.4f}".format(label, preds[j])

                cv2.putText(frame, label, (startX, startY - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 5, (0, 0, 255), 8)
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              (0, 0, 255), 2)
            else:
Esempio n. 22
0
    labels = []
    # with open(backup_dir + '/labels.txt','r') as f:
    with open(args.labels, 'r') as f:
        for line in f:
            labels.append(line.rstrip())
    print(labels)

    model_pred = model_from_json(open(args.model).read())
    model_pred.load_weights(args.weights)

    # model_pred.summary()

    X = []
    img_path = args.testfile
    img = img_to_array(load_img(img_path, target_size=(64, 64)))
    X.append(img)
    X = np.asarray(X)

    preds = model_pred.predict(X)

    pred_label = ""

    label_num = 0
    for i in preds[0]:
        if i == 1.0:
            pred_label = labels[label_num]
            break
        label_num += 1

    print("label=" + pred_label)
Esempio n. 23
0
start_time = time.time()

#load image
img_path = "cat.jpg" #animal class
#img_path = "other.jpg" #other class
#img_path = "person.jpg" #person class

#resize image
img = load_img(img_path, target_size=(299, 299))

#show image
plt.imshow(img)
plt.show()

#image to array
new_img = image.img_to_array(img)
new_img /= 255
new_img = np.expand_dims(new_img, axis=0)

# input_details[0]['index'] = the index which accepts the input
interpreter.set_tensor(input_details[0]['index'], new_img)
   
# run the inference
interpreter.invoke()
    
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
output_data = interpreter.get_tensor(output_details[0]['index'])
#print(output_data)    

#stop time
Esempio n. 24
0
def prepare_image(file):
    img_path = 'E:/Stay Away You!/DL_training/Mobile_Net/data/Mobilenet_sample/'
    img = image.load_img(img_path + file,target_size=(224,224))
    img_array = image.img_to_array(img)
    img_array_expand_dimension = np.expand_dims(img_array,axis = 0)
    return tf.keras.applications.mobilenet.preprocess_input(img_array_expand_dimension)    
Esempio n. 25
0
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img

# Let's define a new Model that will take an image as input, and will output
# intermediate representations for all layers in the previous model after
# the first.
successive_outputs = [layer.output for layer in model.layers[1:]]
visualization_model = Model(img_input, successive_outputs)

# Let's prepare a random input image of a cat or dog from the training set.
cat_img_files = [os.path.join(train_cats_dir, f) for f in train_cat_fnames]
dog_img_files = [os.path.join(train_dogs_dir, f) for f in train_dog_fnames]
img_path = random.choice(cat_img_files + dog_img_files)

img = load_img(img_path, target_size=(150, 150))  # this is a PIL image
x = img_to_array(img)  # Numpy array with shape (150, 150, 3)
x = x.reshape((1,) + x.shape)  # Numpy array with shape (1, 150, 150, 3)

# Rescale by 1/255
x /= 255

# Let's run our image through our network, thus obtaining all
# intermediate representations for this image.
successive_feature_maps = visualization_model.predict(x)

# These are the names of the layers, so can have them as part of our plot
layer_names = [layer.name for layer in model.layers]


# Now let's display our representations
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
Esempio n. 26
0
#finetune you network parameter in last by using low learning rate like 0.00001
model.save('hyperparameter1.h5')

#from tensorflow.keras.models import load_model
#model = load_model('LETSDOTHIS.h5')

from tensorflow.keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
results = []
test = pd.read_csv('test.csv')
length_test = len(test['Image_File'])
testDF = pd.read_csv('test.csv')

for i in range(length_test):
    img1 = image.load_img('DataSet/Test Images/' + test['Image_File'][i],
                          target_size=(720, 480))
    img = image.img_to_array(img1)
    img = img / 255
    # create a batch of size 1 [N,H,W,C]
    img = np.expand_dims(img, axis=0)
    prediction = model.predict(img, batch_size=None,
                               steps=1)  # gives all class prob.
    if (prediction[:, :] > 0.5):
        testDF['Class'][i] = "Small"
    else:
        testDF['Class'][i] = "Large"
    print(i)
print(testDF)
testDF.to_csv('hyperparameterTWEAK.csv')
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
	help="path to the input image")
ap.add_argument("-o", "--output", required=True,
	help="path to output directory to store augmentation examples")
ap.add_argument("-p", "--prefix", type=str, default="image",
	help="output filename prefix")
args = vars(ap.parse_args())

# load the input image, convert it to a NumPy array, and then
# reshape it to have an extra dimension
print("[INFO] loading example image...")
image = load_img(args["image"])
image = img_to_array(image)
image = np.expand_dims(image, axis=0)

# construct the image generator for data augmentation then
# initialize the total number of images generated thus far
aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,
	height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
	horizontal_flip=True, fill_mode="nearest")
total = 0

# construct the actual Python generator
print("[INFO] generating images...")
imageGen = aug.flow(image, batch_size=1, save_to_dir=args["output"],
	save_prefix=args["prefix"], save_format="jpg")

# loop over examples from our image data augmentation generator
Esempio n. 28
0
def ModelKi(location,BASE_DIR):
    print("first:" + location)
    base_model = InceptionV3(weights='imagenet', include_top=False,input_shape=(150,150,3))
	#base_model = InceptionV3(include_top=False, weights='imagenet',input_shape=(150,150,3))
    temploc = location.split('\\')
    print("------------------------|||||||||||")
    print(temploc)
    baseloc = ''
    for i in range(len(temploc)-1):

        baseloc += temploc[i] + '\\'
    print("third:" + baseloc)
    #print("fourth:"+location)
#defining the model architecture
    model = Sequential()
    model.add(Dense(1024, activation='relu',input_shape=(18432,)))
    model.add(Dropout(0.5))
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(101, activation='softmax'))

    model.load_weights(BASE_DIR + "\DV\models\weight_150_300_Incv3.hdf5")

    model.compile(loss='categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])

# creating the tags
    train = pd.read_csv(BASE_DIR+'\DV\\train_new.csv')
    y = train['class']
    y = pd.get_dummies(y)

    pred = []
    cap = cv2.VideoCapture(location)
    print(cap)# capturing the video from the given path
    frameRate = cap.get(5) #frame rate
    x=1
    count = 0

    while(cap.isOpened()):

        frameId = cap.get(1) #current frame number
        ret, frame = cap.read()
        if (ret != True):
            break
        if (frameId % math.floor(frameRate) == 0):
        # storing the frames of this particular video in temp folder
            filename = str(baseloc) + 'temp\\' + "_frame%d.jpg" % count;count+=1
            cv2.imwrite(filename, frame)
    cap.release()

    images = glob(str(baseloc) + "temp\*.jpg")

    prediction_images = []
    for i in range(len(images)):
        img = image.load_img(images[i], target_size=(150,150,3))
        img = image.img_to_array(img)
        img = img/255
        prediction_images.append(img)


# converting all the frames for a test video into numpy array
    prediction_images = np.array(prediction_images)
# extracting features using pre-trained model
    prediction_images = base_model.predict(prediction_images)
# converting features in one dimensional array
    prediction_images = prediction_images.reshape(prediction_images.shape[0], 3*3*2048)
# predicting tags for each array
    predictionar = model.predict_classes(prediction_images)

    predictionar = predictionar.reshape(predictionar.shape[0],1)

    mode = s.mode(predictionar)

    x = mode[0][0]

    ans = y.columns.values[x][0]

    files = glob(str(baseloc) + 'temp\*')
    for f in files:
        os.remove(f)
    return ans
Esempio n. 29
0
csv_file = os.path.join(test_path, 'test.csv')
df = pd.read_csv(csv_file)
print(df.head())

submission = './submission.csv'
csv_sub = open(submission, mode='w')
writer = csv.writer(csv_sub,
                    delimiter=',',
                    quotechar='"',
                    quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Image', 'target'])

dances = [
    'bharatanatyam', 'kathak', 'kathakali', 'kuchipudi', 'manipuri',
    'mohiniyattam', 'odissi', 'sattriya'
]
for index, row in df.iterrows():
    print("**************>", row.values[0])
    test_image = os.path.join(test_image_path, row.values[0])
    img = image.load_img(test_image, target_size=(image_width, image_height))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)

    images = np.vstack([x])
    classes = model.predict(images, batch_size=10)
    image_class = np.argmax(classes[0])
    result = [row.values[0], dances[image_class]]
    writer.writerow(result)

csv_sub.close()
Esempio n. 30
0
test_covid_path = glob('.\\data-split\\val\\CT_COVID\\*')
test_normal_path = glob('.\\data-split\\val\\CT_NonCOVID\\*')

# Load images (For google collab)
# train_covid_path = glob('/content/data-split/train/CT_COVID/*')
# train_normal_path = glob('/content/data-split/train/CT_NonCOVID/*')
# test_covid_path = glob('/content/data-split/val/CT_COVID/*')
# test_normal_path = glob('/content/data-split/val/CT_NonCOVID/*')

# Building the training array
cnt = 0  # index
for img_file in train_covid_path:
    image_orig = cv2.imread(img_file)
    image_resized = cv2.resize(image_orig, (256, 256),
                               interpolation=cv2.INTER_CUBIC)
    img_array = img_to_array(image_resized)

    x_train_temp[cnt] = img_array / 255
    y_train_temp[cnt] = 1
    cnt += 1

for img_file in train_normal_path:
    image_orig = cv2.imread(img_file)
    image_resized = cv2.resize(image_orig, (256, 256),
                               interpolation=cv2.INTER_CUBIC)
    img_array = img_to_array(image_resized)

    x_train_temp[cnt] = img_array / 255
    y_train_temp[cnt] = 0
    cnt += 1
Esempio n. 31
0
def path_to_tensor(img_path):
    img = image.load_img(img_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x
Esempio n. 32
0
# In[3]:

pathname = r"C:\Users\LENOVO\Desktop\Face-Mask-Detection\dataset"
print("[INFO] loading images...")
imagePaths = list(paths.list_images(pathname))
data = []
labels = []

# In[4]:

for imagePath in imagePaths:
    # extract the class label from the filename
    label = imagePath.split(os.path.sep)[-2]
    # load the input image (224x224) and preprocess it
    image = load_img(imagePath, target_size=(224, 224))
    image = img_to_array(image)
    image = preprocess_input(image)

    # update the data and labels lists, respectively
    data.append(image)
    labels.append(label)

# convert the data and labels to NumPy arrays
data = np.array(data, dtype="float32")
labels = np.array(labels)

# perform one-hot encoding on the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)