コード例 #1
0
ファイル: process.py プロジェクト: sunsern/cameraman
def process():
    
    global framerate
    
    l = os.listdir('.')
    for f in l:
        if f.endswith('.mov'):
            print 'Processing...%s'%f
            #mov2avi.convert2avi(f,framerate)
            detect_face.detect(f.replace('.mov','.avi'))
            generate_jboost.make_jboost_files(xml,f.replace('.mov',''))
            
    print 'Done!'
コード例 #2
0
ファイル: process.py プロジェクト: sunsern/cameraman
def process():

    global framerate

    l = os.listdir('.')
    for f in l:
        if f.endswith('.mov'):
            print 'Processing...%s' % f
            #mov2avi.convert2avi(f,framerate)
            detect_face.detect(f.replace('.mov', '.avi'))
            generate_jboost.make_jboost_files(xml, f.replace('.mov', ''))

    print 'Done!'
コード例 #3
0
def predict(input_file, model):
    """
    画像ファイルを入力して判別する
    :param input_file: 画像ファイル
    :param model: 学習済みモデル
    :return:
    """
    face_list = df.detect(input_file)
    face_position_list = df.detect(input_file=input_file, only_position=True)
    for orig_image, rect in zip(face_list, face_position_list):
        image = cv2.resize(orig_image, (in_size, in_size),
                           interpolation=cv2.INTER_CUBIC)
        image = image.transpose(2, 0, 1) / 255.  # 正規化
        index = model.predict(np.array([image], dtype=np.float32))
        name = label_list[index]
        print("{}さんと予測しました。".format(name[0]))
        cv2.imshow('image', paint_on_face(input_file, rect, name[1]))
        cv2.waitKey(0)
        cv2.destroyAllWindows()
コード例 #4
0
ファイル: main.py プロジェクト: Eximius-Design/face_detection
def main():
    model_path = './mtcnn.pb'
    imagePaths = list(
        paths.list_images(
            '/home/ravikiranb/homedir/images/train_img/somaliya/'))
    j = 0
    graph = tf.Graph()
    with graph.as_default():
        with open(model_path, 'rb') as f:
            graph_def = tf.GraphDef.FromString(f.read())
            tf.import_graph_def(graph_def, name='')
    config = tf.ConfigProto(allow_soft_placement=True,
                            intra_op_parallelism_threads=4,
                            inter_op_parallelism_threads=4)
    config.gpu_options.allow_growth = True
    sess = tf.Session(graph=graph, config=config)
    for i in imagePaths:
        j = j + 1
        img = cv2.imread(i, 1)
        img_h, img_w = img.shape[:2]
        img_a = img_h * img_w

        bbox, scores, landmarks = detect(img,
                                         sess,
                                         graph,
                                         min_size=40,
                                         factor=0.709,
                                         thresholds=[0.6, 0.7, 0.7])

        print('total box:', len(bbox))
        for box, pts in zip(bbox, landmarks):
            box = box.astype('int32')
            box_w = box[3] - box[1]
            box_h = box[2] - box[0]
            box_a = box_w * box_h

            percent = box_a * 100 / img_a
            if percent > 3.0:
                print('percentage of bounding box in total image : {:.2f}'.
                      format(percent))
                img = cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]),
                                    (255, 0, 0), 3)

                pts = pts.astype('int32')
                for i in range(5):
                    img = cv2.circle(img, (pts[i + 5], pts[i]), 4, (0, 0, 255),
                                     8)
        cv2.imwrite('./opimages/imageeee' + str(j) + '.jpg', img)
コード例 #5
0
def draw(image, coords, preds):
    for i in range(len(coords)):
        x, y, w, h = coords[i]
        cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
        if preds[i][0] == 1:
            text = 'no mask'
        else:
            text = 'mask'
        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (x, y)
        fontScale = 1
        fontColor = (0, 0, 255)
        lineType = 2

        cv2.putText(image, text, bottomLeftCornerOfText, font, fontScale,
                    fontColor, lineType)
    return image


image = cv2.imread(args.image)
coords = detect_face.detect(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

preds = predict(image, coords)
image = draw(image, coords, preds)
cv2.imshow('', cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
#window_name='image'
#cv2.imshow(window_name, image)
cv2.waitKey(0)
コード例 #6
0
from detect_face import detect
import cv2
img=cv2.imread("./data/test01.jpg")
print(detect(img))
print(detect(img))
コード例 #7
0
def main():
    image = cv2.imread('test1.jpg')
    #print(image)
    faces, boxes = np.array(detect(image))
    count = 0
    for face in faces:
        scipy.misc.imsave('emotion/real_face/testing/{}.jpg'.format(count),
                          face)
        count += 1

    testing_dir = 'emotion/real_face'
    IMAGE_SIZE = [48, 48]
    testing_datagen = ImageDataGenerator(
        rescale=1. / 255, preprocessing_function=preprocess_input)
    testing_generator = testing_datagen.flow_from_directory(
        testing_dir, target_size=IMAGE_SIZE, batch_size=200)

    model = joblib.load('model.pkl')

    num_step = len(testing_generator)
    print('step should be:', num_step)
    #testing_img = cv2.imread('emotion/testing/1.jpg')
    predictions = model.predict_generator(testing_generator,
                                          steps=num_step,
                                          max_queue_size=10,
                                          workers=1,
                                          use_multiprocessing=False,
                                          verbose=0)
    #predicted_classes = convert_to_class(predictions)
    print(predictions)
    emotions = [
        'anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'
    ]
    num_label = predictions.shape[0]
    labels = []
    for i in range(predictions.shape[0]):
        labels.append(emotions[np.argmax(predictions[i])])

    print(labels)
    true_label = labels.count('surprise')
    accuracy = true_label / num_label
    print('accuracy is:', accuracy)
    #confusion_matrix = []
    count = 0
    for emotion in emotions:
        num_emo = labels.count(emotion)
        #confusion_matrix.append(count + ':' + num_emo)
        print(emotion + ':')
        print(num_emo)
        count += 1

    face_list = []
    for ((top, right, bottom, left), label) in zip(boxes, labels):
        cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
        y = top - 15 if top - 15 > 15 else top + 15
        cv2.putText(image, label, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                    (0, 255, 0), 2)
        face_list.append((label, ((top + bottom) / 2, (left + right) / 2)))

    print(face_list)
    cv2.namedWindow("output", cv2.WINDOW_NORMAL)
    im2 = image.copy()
    im2[:, :, 0] = image[:, :, 2]
    im2[:, :, 2] = image[:, :, 0]
    #imS = cv2.resize(im2, (600, 480))
    scipy.misc.imsave('kkk.jpg', im2)
    cv2.imshow("Image", im2)
    cv2.waitKey(0)
    return face_list
コード例 #8
0

if __name__ == '__main__':
    logfile = open('log.xls', 'w+')
    stream_entry('time', 'person', 'bounding box', 'detection_score', 'camera', 'screenshot name')
    cap = cv2.VideoCapture(0)
    cpt.initialize_capture()

    while True:
        hasFrame, frame = cap.read()
        input_key = cv2.waitKey(10)
        if input_key == 27 or not hasFrame:
            break

        frame_start = datetime.datetime.now()
        bounding_boxes, scores = df.detect(frame)

        screenshot_filename = cpt.capture_logic(bounding_boxes, scores, frame, frame_start)

        for i in range(len(bounding_boxes)):
            box = bounding_boxes[i]
            score = scores[i]

            height, width, channels = frame.shape
            x = box[0] if box[0] >= 0 else 0
            x2 = box[2] if box[2] <= width else width
            y = box[1] if box[1] >= 0 else 0
            y2 = box[3] if box[3] <= height else height

            crop_img = frame[y:y2, x:x2]
            cv2.imshow("Face off", crop_img)