def load_and_detect_images(im_path, tinyFaces_args, output_directory):

    args = get_args(tinyFaces_args)
    scaled_matrix = np.empty([])

    results = []

    print('-------Detecting images ... -------')

    for files in tqdm(os.listdir(im_path)):
        if files.endswith('.jpg') or files.endswith('.png'):

            if os.path.isfile(output_directory + 'data/dataFrame_' +
                              files.split('.')[0] + '.pkl'):
                print("-------> Loading DataFrame ...")
                data_frame = read_pickle(output_directory + 'data/dataFrame_' +
                                         files.split('.')[0] + '.pkl')

            else:
                print('-------> Imgage ' + files)
                img = cv2.imread(os.getcwd() + '/' + im_path + files)

                [scaled_matrix, bboxes, detected_faces_image
                 ] = tinyFaces.tinyFaces_Detection(args, img)

                index = []
                rows = []

                for i in range(len(scaled_matrix)):
                    rows.append({
                        'img': img,
                        'faces': scaled_matrix[i],
                        'bboxes': bboxes[i],
                        'name_img': files.split('.')[0]
                    })
                    index.append(i)

                data_frame = DataFrame(rows, index=index)
                data_frame.to_pickle(output_directory + 'data/dataFrame_' +
                                     files.split('.')[0] + '.pkl')

            # results.append([data_frame, img])
            results.append(data_frame)
            # data_frame --> shape (53,3)

    return results
Example #2
0
def main():
    args = get_args()
    depth = args.depth
    k = args.width
    max_age = args.max_age + 1

    scaled_matrix = np.empty([])

    for files in os.listdir(directory_files[0]):
        if files.endswith('.jpg') or files.endswith('.png'):
            print('-------Predicting image:  ' + files + '-------')
            # img = cv2.imread(os.getcwd()+'/'+directory_files[0]+files)
            img = cv2.imread(os.getcwd() + '/' + im_path + files)
            # for face detection
            output_directory = directory_files[1] + files.split('.')[0]
            if not os.path.exists(output_directory):
                print("** Creating output_directory in " + output_directory +
                      ' ... **')
                os.makedirs(output_directory)

            # [pnet, rnet, onet] = faceNet.create_FaceNet_network_Params(args)
            # [scaled_matrix , n_faces_detected ,detected_faces_image] = faceNet.faceNet_Detection(img,output_directory, args, pnet, rnet, onet)
            [scaled_matrix, n_faces_detected,
             detected_faces_image] = tinyFaces.tinyFaces_Detection(args, img)

            # Load model and weights of AGE-GENDER
            img_size_age_gender = 64
            img_size_ethnicity = 224
            model_age_gender = WideResNet(img_size_age_gender,
                                          depth=depth,
                                          k=k,
                                          units_age=max_age)()
            model_age_gender.load_weights(weight_file)
            #
            # # Load model and weights of ETHNICITY
            # model_ethnicity = create_face_network(nb_class=4, hidden_dim=512, shape=(224, 224, 3))
            # model_ethnicity.load_weights(weights_ethnic_file)

            # Resize the images for each model
            faces = np.empty((len(scaled_matrix), img_size_age_gender,
                              img_size_age_gender, 3))
            # faces1 = np.empty((len(scaled_matrix), img_size_ethnicity, img_size_ethnicity, 3))

            for i in range(len(scaled_matrix)):
                faces[i, :, :, :] = cv2.resize(
                    scaled_matrix[i],
                    (img_size_age_gender, img_size_age_gender))
                # faces1[i, :, :, :] = cv2.resize(scaled_matrix[i], (img_size_ethnicity, img_size_ethnicity))

            # # predict with ethnicity model
            # # TODO : Reduce the number of predictions in a second / minute
            # result_ethn = np.empty((len(faces1),4))
            # for i in range(len(faces1)):
            #     result_ethn[i] = model_ethnicity.predict(transform_image_etnicity_to_predict(faces1[i]))

            #NOTE predict ages and genders of the detected faces
            result_age_gend = model_age_gender.predict(faces)
            predicted_genders = result_age_gend[0]
            ages = np.arange(0, max_age).reshape(max_age, 1)
            predicted_ages = result_age_gend[1].dot(ages).flatten()

            cols = 5
            rows = int(len(scaled_matrix) / cols) + 1

            for i in range(len(scaled_matrix)):
                plt.subplot(rows, cols, i + 1)
                im = faces[i].astype(np.uint8)
                plt.imshow(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
                plt.title("{}, {}".format(
                    int(predicted_ages[i]),
                    "F" if predicted_genders[i][0] > 0.5 else "M"))
                plt.axis('off')
                plt.subplots_adjust(hspace=0.6)
                # cv2.imwrite(output_directory+'/'+"{}_{}_".format(int(predicted_ages[i]),
                #                     "F" if predicted_genders[i][0] > 0.5 else "M") +"_id"+str(i)+".jpg", scaled_matrix[i])

            plt.savefig(output_directory + "/result_" +
                        str(int(len(scaled_matrix))) + ".png")
def main():

    args = get_args()
    [pnet, rnet, onet] = faceNet.create_FaceNet_network_Params(args)
    detected_faces = []

    # for files in os.listdir(directory_files[0]):
    #     if files.endswith('.jpg') or files.endswith('.png'):
    #         print ('-------Analysing image:  '+files+'-------')
    #         img = cv2.imread(os.getcwd()+'/'+directory_files[0]+files)
    #         # for face detection
    #         output_directory = directory_files[1]
    #         if not os.path.exists(output_directory):
    #             print ("** Creating output_directory in "+output_directory+' ... **')
    #             os.makedirs(output_directory)
    #
    #
    #         [scaled_matrix , n_faces_detected, detected_faces_image] = faceNet.faceNet_Detection(img,output_directory, args, pnet, rnet, onet)
    #         cv2.imwrite(output_directory+files,detected_faces_image)
    #
    #         detected_faces.append(detected_faces_image)
    #
    # np.array(detected_faces).dump(open('detected_faces_array.npy', 'wb'))

    for files in os.listdir(directory_files[0]):
        if files.endswith('.jpg') or files.endswith('.png'):
            print('-------Analysing image:  ' + files + '-------')
            img = cv2.imread(os.getcwd() + '/' + directory_files[0] + '/' +
                             files)
            # for face detection
            output_directory = directory_files[1]
            if not os.path.exists(output_directory):
                print("** Creating output_directory in " + output_directory +
                      ' ... **')
                os.makedirs(output_directory)

            if args.face_detector == 'facenet':
                # out_fn = cv2.VideoWriter('output_facenet.avi',fourcc, 20.0, size)
                [scaled_matrix, n_faces_detected, detected_faces_image
                 ] = faceNet.faceNet_Detection(img, output_directory, args,
                                               pnet, rnet, onet)
                # print ('scaled_matrix.shape',scaled_matrix.shape)

                if detected_faces_image.ndim == 1:
                    print("--> SOMETHING IS WRONG IN THIS IMAGE")
                else:
                    cv2.imwrite(output_directory + files, detected_faces_image)
                    detected_faces.append(scaled_matrix)

            elif args.face_detector == 'tinyfaces':
                # out_tf = cv2.VideoWriter('output_tinyfaces.avi',fourcc, 20.0, size)
                [scaled_matrix, n_faces_detected, detected_faces_image
                 ] = tinyFaces.tinyFaces_Detection(args, img)
                # TODO: develop a scaled_matrix

                cv2.imwrite(output_directory + files, detected_faces_image)
                detected_faces.append(scaled_matrix)
            else:
                print(
                    'A face detector is required as an argument. The options are: facenet or tinyfaces.'
                )

    np.array(detected_faces).dump(
        open('detected_faces_' + str(args.face_detector) + '_array.npy', 'wb'))
    # detector = dlib.get_frontal_face_detector()
    emotion_model = load_emotion_model('models/model.best.hdf5')

    # [pnet, rnet, onet] = faceNet.create_FaceNet_network_Params(args)

    try:
        start = time.time()
        im = cv.imread(im_path)
        if not os.path.exists(output_directory):
            print("** Creating output_directory in " + output_directory +
                  ' ... **')
            os.makedirs(output_directory)
        # [scalied_matrix , n_faces_detected, detected_faces] = faceNet.faceNet_Detection(im,output_directory, args, pnet, rnet, onet)

        [scaled_matrix, n_faces_detected,
         detected_faces_image] = tinyFaces.tinyFaces_Detection(args, im)

        # Resize the images for each model
        faces = np.empty(
            (len(scaled_matrix), img_size_emotions, img_size_emotions, 3))

        cols = 5
        rows = int(len(scaled_matrix) / cols) + 1

        for i in range(len(scaled_matrix)):
            faces[i, :, :, :] = cv.resize(
                scaled_matrix[i], (img_size_emotions, img_size_emotions))

        preds = emotion_model.predict(faces)

        for i in range(len(scaled_matrix)):
Example #5
0
def main():

    start = time.time()
    args = get_args()
    depth = args.depth
    k = args.width

    capture = cv2.VideoCapture(video_directory)

    # # NOTE: PYTHON 3
    # fourcc = cv2.VideoWriter.fourcc('M','J','P','G')
    # # In this way it always works, because your get the right "size"
    # size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
    #         int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))

    # NOTE: PYTHON 2.7
    # fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
    fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
    size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))

    # out = cv2.VideoWriter('output.avi',fourcc, 20.0, size)
    # out1 = cv2.VideoWriter('output_facenet.avi',fourcc, 20.0, size)
    # out2 = cv2.VideoWriter('output_tinyfaces.avi',fourcc, 20.0, size)

    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    if args.face_detector == 'facenet':
        out1 = cv2.VideoWriter(output_directory + 'output_facenet.avi', fourcc,
                               20.0, size)
    elif args.face_detector == 'tinyfaces':
        out2 = cv2.VideoWriter(
            os.path.join(output_directory, 'output_tinyfaces_TEST.avi'),
            fourcc, 20.0, size)

    count = 0
    maxDetectedFaces = []

    try:
        while (capture.isOpened()):
            success, frame = capture.read()
            # print('Read a new frame: ', frame.shape) # --> (720, 1280, 3)

            if args.face_detector == 'facenet':

                # out_fn = cv2.VideoWriter('output_facenet.avi',fourcc, 20.0, size)
                [pnet, rnet,
                 onet] = faceNet.create_FaceNet_network_Params(args)
                [scaled_matrix, n_faces_detected, detected_faces_image
                 ] = faceNet.faceNet_Detection(frame, output_directory, args,
                                               pnet, rnet, onet)
                # print ('scaled_matrix.shape',scaled_matrix.shape)

                # if detected_faces_image.ndim == 1:
                #     print("------SOMETHING IS WRONG------")
                #     sys.exit(0)
                # else:
                # IDEA: LOAD MODELS -- SLOW
                # NOTE[faces, faces1] = loadModels(scaled_matrix, depth, k)
                # FACES  SHAPEEEE.------ (11, 64, 64, 3)
                # FACES 11111 SHAPEEEE.------ (11, 224, 224, 3)
                # cv2.imshow('FRAME',detected_faces_image)
                # IDEA: save all frames
                # name = "out_video/frame_"+str(count)+".jpg"
                # print (name)
                # cv2.imwrite(name,detected_faces_image)
                out1.write(detected_faces_image)
                count += 1

            elif args.face_detector == 'tinyfaces':
                # out_tf = cv2.VideoWriter('output_tinyfaces.avi',fourcc, 20.0, size)
                try:
                    [scaled_matrix, n_faces_detected, detected_faces_image
                     ] = tinyFaces.tinyFaces_Detection(args, frame)
                    # TODO: develop a scaled_matrix

                    # if detected_faces_image.ndim == 1:
                    #     print("------SOMETHING IS WRONG------")
                    # else:
                    out2.write(detected_faces_image)
                    maxDetectedFaces.append(len(n_faces_detected))
                    cv2.imwrite(
                        output_directory +
                        'output_video_tinyFaces/frame_%05d.png' % count,
                        detected_faces_image)
                    count += 1

                except:
                    print("This is an error message in frame " + str(count))
                    break
            else:
                print(
                    'A face detector is required as an argument. The options are: facenet or tinyfaces.'
                )

        text_file = open("MAX_COUNTER.txt", "w")
        text_file.write("MAXIM COUNTER: %s" % np.max(maxDetectedFaces))
        text_file.close()
        print("Total time: ", time.time() - start)
        # Release everything if job is finished
        capture.release()
        # out1.release()
        # out2.release()
        cv2.destroyAllWindows()

    except KeyboardInterrupt:
        aux = time.time() - start
        capture.release()
        out1.release()
        out2.release()
        print("Total time: ", aux)
        cv2.destroyAllWindows()