コード例 #1
0
ファイル: demo_final.py プロジェクト: ywadea/CMPT726
def main():
    global Result, waitting, current_face
    threads = []
    waitting = False  # can be passed to the TF
    period = 0

    args = get_args()
    depth = args.depth
    k = args.width

    # for face detection
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    fa = FaceAligner(predictor, desiredFaceWidth=200)

    # load model and weights
    img_size = 200

    # capture video
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    race = [0]
    gender = [0]
    age = [0]

    while True:
        # get video frame
        ret, img = cap.read()

        if not ret:
            print("error: failed to capture image")
            return -1

        input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        input_img2 = input_img.astype(np.float32)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_h, img_w, _ = np.shape(input_img)

        # detect faces using dlib detector
        detected = detector(input_img, 1)
        faces = np.empty((len(detected), img_size, img_size, 3))

        for i, d in enumerate(detected):
            x1, y1, x2, y2, w, h = d.left(), d.top(
            ), d.right() + 1, d.bottom() + 1, d.width(), d.height()
            xw1 = max(int(x1 - 0.4 * w), 0)
            yw1 = max(int(y1 - 0.4 * h), 0)
            xw2 = min(int(x2 + 0.4 * w), img_w - 1)
            yw2 = min(int(y2 + 0.4 * h), img_h - 1)
            cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
            # cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
            faces[i, :, :, :] = fa.align(input_img, gray, detected[i])
            # faces[i,:,:,:] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))
            # add = 'tmp_{}.jpg'.format(i)
            #

            #cv2.imwrite('tmp_{}.jpg'.format(i), faces, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
            #cv2.imwrite('tmp_{}.jpg'.format(i), faces, [int(cv2.IMWRITE_JPEG_QUALITY), 90])

        if len(detected) > 0:
            period += 1
            # if not waitting:
            #     race = np.zeros(len(detected))
            #     gender = np.zeros(len(detected))
            #     age = np.zeros(len(detected))
            if not waitting and period == 10:  # first time a person come to camera
                period = 0
                waitting = True
                # image = align_face(img)
                # predict ages and genders of the detected faces
                for i, d in enumerate(detected):

                    # gender[i], race[i] = predictor(faces[i, :, :, :])
                    cv2.imwrite('tmp_{}.jpg'.format(i), faces[i, :, :, :],
                                [int(cv2.IMWRITE_JPEG_QUALITY), 180])
                    # cv2.imwrite('tmp_{}.jpg'.format(i), image, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
                    # cv2.imwrite('tmp_{}.jpg'.format(i), gray, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
                    # gender[i], race[i] = predictor2(img)

                    imgp = './tmp_{}.jpg'.format(i)
                    image_tru = load_image(imgp)
                    # predictor2(image_tru,  len(detected))
                    thread = threading.Thread(target=predictor2,
                                              args=(
                                                  image_tru,
                                                  len(detected),
                                              ))
                    # if thread.isAlive():
                    threads.append(thread)
                    print('thread created!')

                current_face = 0
                thread.start()

            if Result:
                gender[current_face] = Result[0] + 1
                race[current_face] = Result[1] + 1
                age[current_face] = Result[2] + 1
                print(race, gender, age)
                current_face += 1
                Result = None
                # just added
                waitting = False
                period = 0
                threads = []

        if len(detected) == 0:
            waitting = False
            Result = None
            period = 0
            threads = []

        # draw results

        PAge = [
            '-', '0-6', '6-13', '13-20', '20-27', '27-35', '35-43', '43-50',
            '50-62', '62+'
        ]
        PRase = ['-', 'W', 'B', 'A', 'I', 'O']
        PGender = ['-', 'M', 'F']
        for i, d in enumerate(detected):
            label = "{}, {}, {}".format(PRase[int(race[i])],
                                        PGender[int(gender[i])],
                                        PAge[int(age[i])])
            draw_label(img, (d.left(), d.top()), label)

        cv2.imshow("result", img)
        key = cv2.waitKey(1)

        if key == 27:
            break
コード例 #2
0
               point,
               genders,
               races,
               font=cv2.FONT_HERSHEY_SIMPLEX,
               font_scale=1,
               thickness=2):

    for i in range(len(point)):
        label = "{}, {}".format(int(races[i]), "F" if genders[i] == 0 else "M")
        size = cv2.getTextSize(label, font, font_scale, thickness)[0]
        x, y = point[i]
        cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0),
                      cv2.FILLED)
        cv2.putText(image, label, point[i], font, font_scale, (255, 255, 255),
                    thickness)


if __name__ == '__main__':
    if len(sys.argv) != 1:
        print('python predictor.py file_path_to_image')

    else:
        image_path = sys.argv[1]

        image = load_image(image_path)
        image_aligned = align_face(image)

        pred_gender, pred_race = run(image_aligned)

        # draw_label()
コード例 #3
0
def main(img_name):
    if len(sys.argv) != 1:
        print('python predictor.py file_path_to_image')

    else:
        #image_path = sys.argv[1]
        #image_path = '/home/pagand/PycharmProjects/RaceRec/crop_part1/19_1_4_20170103233712235.jpg.chip.jpg'

        # image_path = project_dir + '23_1_0_20170103180703224.jpg'
        image_path = project_dir + img_name
        # image_path = project_dir + '03.jpg'
        # image_path = project_dir + 'pedram.jpg'
        image = load_image(image_path)
        image = align_face(image)





        ######**********************************************************#####
        #Encode
        data_file = 'tmp_aug.tfrecords'
        writer = tf.python_io.TFRecordWriter(data_file)
        # image = load_image('./')
        gender = np.array([0])
        race = np.array([0])
        age = np.array([0])
        addrs = ['0']
        feature = {'val/gender': _int64_feature(gender.astype(np.int8)),
                   'val/race': _int64_feature(race.astype(np.int8)),
                   'val/age': _int64_feature(age.astype(np.int8)),
                   'val/image': _bytes_feature(tf.compat.as_bytes(image.tostring())),
                   'val/address': _bytes_feature(os.path.basename(addrs[0].encode()))}
        example = tf.train.Example(features=tf.train.Features(feature=feature))
        writer.write(example.SerializeToString())
        writer.close()
        sys.stdout.flush()
        # decode
        tf.reset_default_graph()
        filename_queue = tf.train.string_input_producer([data_file])
        reader = tf.TFRecordReader()
        _, serialized_example = reader.read(filename_queue)

        features = tf.parse_single_example(
            serialized_example,
            features={'val/gender': tf.FixedLenFeature([], tf.int64),
                      'val/race': tf.FixedLenFeature([], tf.int64),
                      'val/age': tf.FixedLenFeature([], tf.int64),
                      'val/image': tf.FixedLenFeature([], tf.string),
                      'val/address': tf.FixedLenFeature([], tf.string)})

        image = tf.decode_raw(features['val/image'], tf.float32)
        image = tf.cast(image, tf.uint8)
        image.set_shape([200 * 200 * 3])
        image = tf.reshape(image, [200, 200, 3])
        # image = tf.reverse_v2(image, [-1])
        image = tf.image.per_image_standardization(image)

        images = tf.train.shuffle_batch([image],
                                        batch_size=1, capacity=256,
                                        num_threads=2, min_after_dequeue=32)



        train_mode = tf.placeholder(tf.bool)
        logits_gender, logits_race, logits_age, end_points, _ = build_model(images, train_mode)

        end_points['Predictions/gender'] = tf.nn.softmax(logits_gender, name='Predictions/gender')
        end_points['Predictions/race'] = tf.nn.softmax(logits_race, name='Predictions/race')
        end_points['Predictions/age'] = tf.nn.softmax(logits_age, name='Predictions/age')
        predictions1 = tf.argmax(end_points['Predictions/gender'], -1)
        predictions2 = tf.argmax(end_points['Predictions/race'], -1)
        predictions3 = tf.argmax(end_points['Predictions/age'], -1)

        pr1 = tf.to_float(tf.to_int32(predictions1))
        pr2 = tf.to_float(tf.to_int32(predictions2))
        pr3 = tf.to_float(tf.to_int32(predictions3))

        with tf.Session() as sess:
            #
            # init_op = tf.group(tf.local_variables_initializer())
            # sess.run(init_op)

            saver = tf.train.Saver(max_to_keep=100)
            ckpt = tf.train.get_checkpoint_state(model_dir)

            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print("restore and start evaluation!")

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            #writer = tf.summary.FileWriter(log_dir, graph=tf.get_default_graph())

            pred1, pred2 = [], []

            #for step in range(num_steps_per_epoch):

            acc1, acc2, acc3 = sess.run([pr1, pr2, pr3], {train_mode: False})

            print(acc1, acc2, acc3)

            # Log some information
            #logging.info('Step %s: gender Accuracy: %.4f race Accuracy: %.4f loss: %.4f  (%.2f sec/step)'step, acc1, acc2, l, time_elapsed)

            #writer.add_summary(curr_summary, step)

            # pred1.append(acc1)
            # pred2.append(acc2)

            # coord.request_stop()
            # coord.join(threads)

            # saver.save(sess, final_checkpoint_file)
            sess.close()

            #average_acc1 = np.mean(accuracies1)
            #average_acc2 = np.mean(accuracies2)
            #average_loss = np.mean(loss_list)

            # logging.info('Average gender Accuracy: %s', average_acc1)
            print('gender: ', PGender[int(np.mean(acc1))])
            print('race: ', PRace[int(np.mean(acc2))])
            print('age: ', PAge[int(np.mean(acc3))])
コード例 #4
0
            # saver.save(sess, final_checkpoint_file)
            sess.close()

            #average_acc1 = np.mean(accuracies1)
            #average_acc2 = np.mean(accuracies2)
            #average_loss = np.mean(loss_list)

            # logging.info('Average gender Accuracy: %s', average_acc1)
            # print('Average gender accuracy: ', int(np.mean(acc1)))
            # print('Average gender accuracy: ', int(np.mean(acc2)))
            gender = int(np.mean(acc1))
            race = int(np.mean(acc1))
            # logging.info('Average race Accuracy: %s', average_acc2)
            #logging.info('Average loss: %s', average_loss)

        #######***************************************************8######
        # pred_gender, pred_race = run(image)
        # pred_gender, pred_race = run(image)
        #print(pred_gender, pred_race)
        # print('You are a ', PGender[pred_gender[0]], ' and your race is ', PRase[pred_race[0]])
        # draw_label(image, )
        return gender, race


if __name__ == '__main__':
    imgp = './tmp_0.jpg'
    image_tru = load_image(imgp)
    gender, race = predictor(image_tru)
    print(PGender[gender], PRase[race])
コード例 #5
0
ファイル: demo_backup.py プロジェクト: ywadea/CMPT726
def main():
    args = get_args()
    depth = args.depth
    k = args.width

    # for face detection
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    fa = FaceAligner(predictor, desiredFaceWidth=160)

    # load model and weights
    img_size = 160

    # capture video
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    while True:
        # get video frame
        ret, img = cap.read()

        if not ret:
            print("error: failed to capture image")
            return -1

        input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        input_img2 = input_img.astype(np.float32)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_h, img_w, _ = np.shape(input_img)

        # detect faces using dlib detector
        detected = detector(input_img, 1)
        faces = np.empty((len(detected), img_size, img_size, 3))


        for i, d in enumerate(detected):
            x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, d.bottom() + 1, d.width(), d.height()
            xw1 = max(int(x1 - 0.4 * w), 0)
            yw1 = max(int(y1 - 0.4 * h), 0)
            xw2 = min(int(x2 + 0.4 * w), img_w - 1)
            yw2 = min(int(y2 + 0.4 * h), img_h - 1)
            cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
            # cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
            faces[i, :, :, :] = fa.align(input_img, gray, detected[i])
            # faces[i,:,:,:] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))
            # add = 'tmp_{}.jpg'.format(i)
            #
            # cv2.namedWindow("cam-test")
            # cv2.imshow("cam-test", img)
            # cv2.waitKey(0)
            # cv2.destroyWindow("cam-test")
            # cv2.imwrite(add, img)

            #cv2.imwrite('tmp_{}.jpg'.format(i), faces, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
            #cv2.imwrite('tmp_{}.jpg'.format(i), faces, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
        #
        gender = np.zeros(len(detected))
        race = np.zeros(len(detected))
        if len(detected) > 0:
            # predict ages and genders of the detected faces
            for i, d in enumerate(detected):
                # gender[i], race[i] = predictor(faces[i, :, :, :])
                add = 'tmp_{}.jpg'.format(i)
                img = tuple(map(tuple, img))
                print('hi')
                imgp = './111.jpg'
                img = load_image(imgp)
                gender[i], race[i] = predictor2(img)


            print (gender, race)


        # draw results
        PRase = [' White', 'Black', 'Asian', 'Indian', 'Others (like Hispanic, Latino, Middle Eastern)']
        for i, d in enumerate(detected):
            label = "{}, {}".format(PRase[int(race[i])], "F" if int(gender[i]) == 0 else "M")
            draw_label(img, (d.left(), d.top()), label)

        cv2.imshow("result", img)
        key = cv2.waitKey(1)

        if key == 27:
            break