Ejemplo n.º 1
0
    def predict_image(self, image):
        """Keras predict gender and age"""
        img_data = image[np.newaxis, :]
        img_data = preprocess_input(img_data)
        results = self.model.predict(img_data)

        return results
Ejemplo n.º 2
0
def extract_feature(dir_path, net):
    features = []
    person_id = []
    infos = []
    cam_id = []
    for image_name in sorted(os.listdir(dir_path)):
        if '.txt' in image_name:
            continue
        if 'f' in image_name or 's' in image_name:
            arr = image_name.split('_')
            person = int(arr[0])
            camera = int(arr[1][1])
        elif 's' not in image_name:
            # grid
            print(image_name)
            arr = image_name.split('_')
            person = int(arr[0])
            camera = int(arr[1])
        else:
            continue
        image_path = os.path.join(dir_path, image_name)
        img = image.load_img(image_path, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        feature = net.predict(x)
        features.append(np.squeeze(feature))
        person_id.append(person)
        cam_id.append(camera)
        infos.append((person, camera))

    return features, infos, person_id, cam_id
Ejemplo n.º 3
0
def load_data(data_name, path):
    data_loader = DataManager(data_name,
                              image_size=input_shape[:2],
                              dataset_path=path)
    faces, emotions = data_loader.get_data()
    faces = preprocess_input(faces)
    num_samples, num_classes = emotions.shape
    image_size = faces.shape[1]
    train_data, val_data = split_data(faces, emotions, validation_split)
    return train_data, val_data, image_size, num_classes
Ejemplo n.º 4
0
def predict_gender(name):

    #loading data and images
    image_path = source + name
    m = re.search('.*(?=-)', name)
    if m:
        found = m.group(0)
    else:
        found = name
    result = {'male': 0, 'female': 0, 'domain': found}

    try:
        rgb_image = load_image(image_path, grayscale=False)
    except:
        print('3. Doesn"t open')
        if (os.path.isfile(image_path)):
            os.remove(image_path)
        to_remove.append(name)
        return result
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    #face and gender detection
    faces = detect_faces(face_detection, gray_image)
    if (len(faces) == 0):
        print('no faces')
        to_remove.append(name)

    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        if gender_text == gender_labels[0]:
            result['female'] += 1
        else:
            result['male'] += 1
    return result
        #bgr_image = np.empty((480 * 640 * 3,), dtype=np.uint8)
        #camera.capture(bgr_image, format='bgr', use_video_port=True)
        bgr_image = bgr_image.reshape((480, 640, 3))
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:
            count = count + 1
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            gray_face = preprocess_input(gray_face, True)  # 类似于标准化的操作(64,64)
            gray_face = np.expand_dims(gray_face, 0)  # (1,64,64)
            gray_face = np.expand_dims(gray_face, -1)  # (1,64,64,1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue
Ejemplo n.º 6
0
 def preprocess_images(self, image_array):
     return preprocess_input(image_array)
Ejemplo n.º 7
0
    # callback
    # hdf5 형식으로 저장
    log_file_path = base_path + dataset_name + '_emotion_training.log'
    csv_logger = CSVLogger(log_file_path, append=False)
    early_stop = EarlyStopping('val_loss', patience=patience)
    reduce_lr = ReduceLROnPlateau('val_loss',
                                  factor=0.1,
                                  patience=int(patience / 4),
                                  verbose=1)
    trained_models_path = base_path = dataset_name + '_mini_Xception'
    model_names = trained_models_path + '.{epoch:02d}-{val_loss:.2f}.hdf5'
    model_checkpoint = ModelCheckpoint(model_names,
                                       'val_loss',
                                       verbose=1,
                                       save_best_only=True)
    callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]

    #loading dataset
    data_loader = DataManager(dataset_name, image_size=input_shape[:2])
    faces, emotions = data_loader.get_data()
    faces = preprocess_input(faces)
    train_data, val_data = split_data(faces, emotions, validation_split)
    train_faces, train_emotions = train_data
    model.fit_generator(data_generator.flow(train_faces, train_emotions,
                                            batch_size),
                        steps_per_epoch=len(train_faces) / batch_size,
                        epochs=num_epochs,
                        verbose=1,
                        callbacks=callbacks,
                        validation_data=val_data)
def preprocess(image_arr):
    data = preprocess_input(image_arr)
    return data
Ejemplo n.º 9
0
def crop_face(file_name, face_detection, name_count):

    face_detection_size = (40, 40)
    counter = 0
    frame_process_counter = 0

    # starting video streaming
    cv2.namedWindow('Attendence_Tracker', cv2.WINDOW_NORMAL)
    # cv2.namedWindow('Attendence_Tracker')
    # file_name = '../top10/person1.mp4'
    video_capture = cv2.VideoCapture(file_name)

    time.sleep(1.0)

    while (video_capture.isOpened()):
        ret, bgr_image = video_capture.read()
        if ret == False:
            break
        counter += 1
        if counter % 1 == 0:
            frame_process_counter += 1
            gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
            rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
            faces = detect_faces(face_detection, bgr_image,confidence)
            count = 0
            for face_coordinates in faces:
                x1, x2, y1, y2 = apply_offsets(face_coordinates, face_offsets)
                rgb_face = rgb_image[y1:y2, x1:x2]
        
                print("len", len(rgb_face))
                # print(rgb_face)
                if len(rgb_face) != 0 and counter % 1 ==0:
                    print(detector.detect_faces(rgb_face))
                    dict_mtcnn =  detector.detect_faces(rgb_face)
                    if len(dict_mtcnn) != 0:
                        bounding_box = dict_mtcnn[0]['box']
                        new_image = rgb_image[bounding_box[2]:bounding_box[3], bounding_box[0]:bounding_box[1]]
                        cv2.rectangle(new_image,
                          (bounding_box[0], bounding_box[1]),
                          (bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]),
                          (0,155,255), 2)

                        # cv2.imwrite("align/align_{}/align_{}_{}".format(name_count, name_count,counter) + ".jpg", cv2.cvtColor(img, cv2.COLOR_RGB2BGR))    
                    cv2.imwrite("align/emp_{}/emp_{}_{}".format(name_count, name_count,counter) + ".jpg", cv2.cvtColor(rgb_face, cv2.COLOR_RGB2BGR))
                    print("image saved-------------------", counter)              
                count += 1
                try:
                    rgb_face = cv2.resize(rgb_face, (face_detection_size))
                except:
                    continue
                rgb_face = np.expand_dims(rgb_face, 0)
                rgb_face = preprocess_input(rgb_face, False)

                # Bounding box color            
                color = (255, 0, 0)
                identity = "this is me"
                draw_bounding_box(face_coordinates, rgb_image, color, identity)
            bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
            cv2.imshow('Attendence_Tracker', bgr_image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            print('Total frames processed:', counter, frame_process_counter)
            break
    video_capture.release()
    # out.release()
    cv2.destroyAllWindows()

    return "successful"
Ejemplo n.º 10
0
    faces = face_cascade.detectMultiScale(gray_image,
                                          scaleFactor=1.1,
                                          minNeighbors=5,
                                          minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE)

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_prediction = emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]
        emotion_window.append(emotion_text)

        if len(emotion_window) > frame_window:
            emotion_window.pop(0)
        try:
            emotion_mode = mode(emotion_window)
        except:
            continue
start = time.clock()
face_cascade = cv2.CascadeClassifier(
    '/home/user/anaconda3/pkgs/libopencv-3.4.2-hb342d67_1/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml'
)
faces = face_cascade.detectMultiScale(gray_image, 1.3, 5)
print(np.shape(faces))
# faces = detect_faces(face_detection, gray_image)
for face_coordinates in faces:
    x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
    rgb_face = rgb_image[y1:y2, x1:x2]
    gray_face = gray_image[y1:y2, x1:x2]
    try:
        if not is_light_net:
            face = cv2.resize(rgb_face, (target_size),
                              interpolation=cv2.INTER_CUBIC)
            face = preprocess_input(face, False)
            face = np.expand_dims(face, 0)
        else:
            face = cv2.resize(gray_face, (target_size),
                              interpolation=cv2.INTER_CUBIC)
            face = preprocess_input(face, False)
            face = np.expand_dims(face, 0)
            face = np.expand_dims(face, -1)
    except:
        continue

    start_time = time.clock()
    if is_single_task:
        emotion_label = emotion_model.predict(face)
        gender_label = gender_model.predict(face)
        pose_label = pose_model.predict(face)
Ejemplo n.º 12
0
        inner_padding_factor = 0.1
        outer_padding = 0
        output_size = 160

        reference_5pts = get_reference_facial_points(
            (output_size, output_size), inner_padding_factor,
            (outer_padding, outer_padding), default_square)

        dst_img = warp_and_crop_face(raw,
                                     facial5points,
                                     reference_pts=reference_5pts,
                                     crop_size=(crop_size, crop_size))

        # (3) Convert image data to keras format
        img_data = dst_img[np.newaxis, :]
        img_data = preprocess_input(img_data)
        data_results = model.predict(img_data)
        t1 = time.time() - t0
        print("Time used: {} ms".format(t1 * 1000))

        # (4) Predict gender and other attributes
        predicted_gender = gCategory[np.argmax(data_results[0])]
        ages = np.arange(0, 71).reshape(71, 1)
        predicted_age = int(np.round(data_results[1].dot(ages).flatten()[0],
                                     1))

        # (5) Draw images
        if predicted_gender == 'W':
            cv.rectangle(raw, (box[0], box[1]), (box[2], box[3]), (0, 0, 255),
                         2)
        else:
Ejemplo n.º 13
0
def emotion():
    # parameters for loading data and images
    emotion_text = ""
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    # starting video streaming
    cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(0)
    while True:
        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue

            if emotion_text == 'angry':
                color = emotion_probability * np.asarray((255, 0, 0))
            elif emotion_text == 'sad':
                color = emotion_probability * np.asarray((0, 0, 255))
            elif emotion_text == 'happy':
                color = emotion_probability * np.asarray((255, 255, 0))
            elif emotion_text == 'surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))

            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 1)

        if (emotion_text):
            return emotion_text
            break

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Ejemplo n.º 14
0
def load_image(image_array):
    image_array = np.expand_dims(image_array, axis=0)
    image_array = preprocess_input(image_array)
    return image_array
Ejemplo n.º 15
0
def crop_face(file_name, face_detection, dirName):
    dire = "cropped_faces/" + dirName
    try:
        os.makedirs(dire)
        print("Directory ", dire, " Created ")
    except FileExistsError:
        print("Directory ", dire, " already exists")

    face_detection_size = (40, 40)
    counter = 0
    frame_process_counter = 0

    # starting video streaming
    cv2.namedWindow('Attendence_Tracker', cv2.WINDOW_NORMAL)
    # cv2.namedWindow('Attendence_Tracker')
    # file_name = '../top10/person1.mp4'
    video_capture = cv2.VideoCapture(file_name)

    time.sleep(1.0)

    while (video_capture.isOpened()):
        ret, bgr_image = video_capture.read()
        if ret == False:
            break
        counter += 1
        if counter % 1 == 0:
            frame_process_counter += 1
            gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
            rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
            faces = detect_faces(face_detection, bgr_image, confidence)
            count = 0
            for face_coordinates in faces:
                x1, x2, y1, y2 = apply_offsets(face_coordinates, face_offsets)
                rgb_face = rgb_image[y1:y2, x1:x2]

                print("len", len(rgb_face))
                # print(rgb_face)
                if len(rgb_face) != 0 and counter % 1 == 0:
                    cv2.imwrite(
                        dire + "/" + dirName + "_{}".format(counter) + ".jpg",
                        cv2.cvtColor(rgb_face, cv2.COLOR_RGB2BGR))
                    print("image saved-------------------", counter)
                count += 1
                try:
                    rgb_face = cv2.resize(rgb_face, (face_detection_size))
                except:
                    continue
                rgb_face = np.expand_dims(rgb_face, 0)
                rgb_face = preprocess_input(rgb_face, False)

                # Bounding box color
                color = (255, 0, 0)
                identity = "this is me"
                draw_bounding_box(face_coordinates, rgb_image, color, identity)
            bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
            cv2.imshow('Attendence_Tracker', bgr_image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            print('Total frames processed:', counter, frame_process_counter)
            break
    video_capture.release()
    # out.release()
    cv2.destroyAllWindows()

    return "successful"