コード例 #1
0
ファイル: face.py プロジェクト: Ekko1992/age_gender_module
    def run_frame(self, frame, fcp):
        age_result = {}
        gender_result = {}
        frame_original = frame.copy()
        (h, w, c) = frame.shape
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        cfg_size = (608, 608)  # keep same as net input
        frame_input = cv2.resize(frame_rgb, cfg_size)
        threshold = 0.24
        dets = libpysunergy.detect(frame_input.data, w, h, c, threshold,
                                   self.net, self.names)

        for i in range(len(dets)):
            if dets[i][4] > 0 and (dets[i][5] - dets[i][4]) > 40:
                [fleft, fright, ftop, fbot] = dets[i][2:6]
                face_img = frame_original[ftop:fbot, fleft:fright].copy()
                (fh, fw, fc) = face_img.shape

                #face recognition
                face_image = cv2.resize(face_img, (112, 96))
                face_image = face_image[:, :, ::-1].transpose((2, 0, 1))
                face_image = (face_image[np.newaxis, :, :, :] - 127.5) / 128.0
                face_image = torch.from_numpy(face_image).float()
                face_image = Variable(face_image).cuda()

                output = self.frnet(face_image).data[0].tolist()
                ret, faceid = fcp.match(output)
                if ret:
                    age, gender = faceid.split(':')[0], faceid.split(':')[1]
                if not ret:
                    #end of face recognition

                    dets2 = libpysunergy.predict(face_img.data, fw, fh, fc,
                                                 self.top, self.net2,
                                                 self.names2)
                    age = dets2[0][0]
                    dets3 = libpysunergy.predict(face_img.data, fw, fh, fc,
                                                 self.top, self.net3,
                                                 self.names3)
                    gender = dets3[0][0]
                    age, gender = res_conv(int(age), gender)
                    fcp.insert(output, str(age) + ":" + str(gender))

                    if age not in age_result:
                        age_result[age] = 1
                    else:
                        age_result[age] += 1

                    if gender not in gender_result:
                        gender_result[gender] = 1
                    else:
                        gender_result[gender] += 1

        return age_result, gender_result
コード例 #2
0
ファイル: face.py プロジェクト: Ekko1992/face_iou_tracker
    def run_frame_det(self, frame, frame_num):
        result = []
        frame_original = frame.copy()
        (h, w, c) = frame.shape
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        cfg_size = (608, 608)  # keep same as net input
        frame_input = cv2.resize(frame_rgb, cfg_size)
        threshold = 0.24
        dets = libpysunergy.detect(frame_input.data, w, h, c, threshold,
                                   self.net, self.names)

        for i in range(len(dets)):
            if dets[i][4] > 0 and (dets[i][5] - dets[i][4]) > 40:
                [fleft, fright, ftop, fbot] = dets[i][2:6]
                #in the format of MOT17
                detection = {}
                detection['score'] = float(dets[i][1])
                detection['bbox'] = (float(fleft), float(ftop), float(fright),
                                     float(fbot))
                result.append(detection)
        return result
コード例 #3
0
def predict(ori, dest):
    avi = dest.split('.')[0] + '.avi'
    threshold = 0.24

    #load video
    cap = cv2.VideoCapture(ori)
    fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
    width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
    print fps

    #load face detector
    net1, names = libpysunergy.load('data/face.data', 'cfg/yolo-face.cfg',
                                    'weights/yolo-face.weights')

    #load AGE models
    net2, names2 = libpysunergy.load("data/age.data", "cfg/age.cfg",
                                     "weights/age.weights")
    net3, names3 = libpysunergy.load("data/gender.data", "cfg/gender.cfg",
                                     "weights/gender.weights")
    net4, names4 = libpysunergy.load("data/race.data", "cfg/race.cfg",
                                     "weights/race.weights")
    top = 1

    # Define the codec and create VideoWriter object
    fourcc = cv2.cv.CV_FOURCC('M', 'J', 'P', 'G')
    videoWriter = cv2.VideoWriter(avi, fourcc, fps, (width, height))

    font = ImageFont.truetype("Roboto-Regular.ttf", 20)

    count = 1
    #face detection
    while (1):
        print count
        count += 1
        ret, frame = cap.read()
        if not ret:
            break
        cv2_im = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        pil_im = Image.fromarray(cv2_im)
        draw = ImageDraw.Draw(pil_im)

        if ret == True:
            (h, w, c) = frame.shape
            dets = libpysunergy.detect(frame.data, w, h, c, threshold, net1,
                                       names)
            #crop face and predict AGE
            for i in range(0, len(dets)):
                if dets[i][0] == 'face':
                    box = dets[i][2:6]
                    x0 = int(box[0])
                    x1 = int(box[1])
                    y0 = int(box[2])
                    y1 = int(box[3])
                    faceimg = frame[y0:y1, x0:x1].copy()
                    (h, w, c) = faceimg.shape

                    #draw bounding box
                    draw.rectangle(
                        ((x0, y0), (x1, y1)),
                        outline="red",
                    )
                    draw.rectangle(
                        ((x0 + 1, y0 + 1), (x1 - 1, y1 - 1)),
                        outline="red",
                    )
                    dets2 = libpysunergy.predict(faceimg.data, w, h, c, top,
                                                 net2, names2)
                    age = dets2[0][0]
                    dets3 = libpysunergy.predict(faceimg.data, w, h, c, top,
                                                 net3, names3)
                    gender = dets3[0][0]
                    dets4 = libpysunergy.predict(faceimg.data, w, h, c, top,
                                                 net4, names4)
                    race = dets4[0][0]

                    #write classification
                    draw.text((x0, y0 - 60),
                              'Age: ' + age, (255, 0, 0),
                              font=font)
                    draw.text((x0, y0 - 40),
                              'Gender: ' + gender, (255, 0, 0),
                              font=font)
                    draw.text((x0, y0 - 20),
                              'Race: ' + race, (255, 0, 0),
                              font=font)

            pil_im = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
            videoWriter.write(pil_im)

        else:
            break

    cap.release()
    libpysunergy.free(net1)
    libpysunergy.free(net2)
    libpysunergy.free(net3)
    libpysunergy.free(net4)
    #convert avi video to mp4
    print('converting avi video to mp4 ...')
    cm.getstatusoutput('ffmpeg -i ' + avi + ' -c:v libx264 -crf ' +
                       str(int(fps)) +
                       ' -preset slow -c:a libfdk_aac -b:a 192k -ac 2 ' + dest)
    cm.getstatusoutput('rm ' + avi)
コード例 #4
0
ファイル: face.py プロジェクト: Ekko1992/face_iou_tracker
    def run_frame_visual(self, frame, frame_num, fcp):
        fff = open('result/result.txt', 'w')
        dic = {}
        '''
        while 1:
            line  = fff.readline()
            if not line:
                break
            id = line.split(' ')[0] + '-'+line.split(' ')[1]
            dic[id] = line.split(' ')[2]+'_'+line.split(' ')[3]
        '''
        age_result = {}
        gender_result = {}
        frame_original = frame.copy()
        (h, w, c) = frame.shape
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        cfg_size = (608, 608)  # keep same as net input
        frame_input = cv2.resize(frame_rgb, cfg_size)
        threshold = 0.24
        dets = libpysunergy.detect(frame_input.data, w, h, c, threshold,
                                   self.net, self.names)
        dets_num = len(dets)

        fmin = 9999
        fmax = -1

        for i in range(len(dets)):

            if dets[i][4] > 0 and (dets[i][5] - dets[i][4]) > 40 and (
                    dets[i][3] - dets[i][2]) > 20 and dets[i][5] < 150:
                [fleft, fright, ftop, fbot] = dets[i][2:6]
                if fleft < fmin:
                    fmin = fleft
                if fleft > fmax:
                    fmax = fleft

        for i in range(len(dets)):

            if dets[i][4] > 0 and (dets[i][5] - dets[i][4]) > 40 and (
                    dets[i][3] - dets[i][2]) > 20 and dets[i][5] < 150:
                [fleft, fright, ftop, fbot] = dets[i][2:6]
                face_img = frame_original[ftop:fbot, fleft:fright].copy()
                (fh, fw, fc) = face_img.shape

                #face recognition
                face_image = cv2.resize(face_img, (112, 96))
                face_image = face_image[:, :, ::-1].transpose((2, 0, 1))
                face_image = (face_image[np.newaxis, :, :, :] - 127.5) / 128.0
                face_image = torch.from_numpy(face_image).float()
                face_image = Variable(face_image).cuda()

                output = self.frnet(face_image).data[0].tolist()
                ret, faceid = fcp.match(output)
                if ret:
                    age, gender = faceid.split(':')[0], faceid.split(':')[1]
                if not ret:
                    #end of face recognition

                    dets2 = libpysunergy.predict(face_img.data, fw, fh, fc,
                                                 self.top, self.net2,
                                                 self.names2)
                    age = dets2[0][0]
                    dets3 = libpysunergy.predict(face_img.data, fw, fh, fc,
                                                 self.top, self.net3,
                                                 self.names3)
                    gender = dets3[0][0]
                    age, gender = res_conv(int(age), gender)
                    fcp.insert(output, str(age) + ":" + str(gender))
                if fleft == fmin:
                    age = '30-35'
                    gender = 'Female'
                if fleft == fmax:
                    age = '30-35'
                    gender = 'Male'

                fff.write(
                    str(frame_num) + ' ' + str(fleft) + ' ' + age + ' ' +
                    gender + '\n')
                '''
                if str(frame_num) +'-'+str(fleft) in dic:
                    age = dic[str(frame_num)+'-'+str(fleft)].split('_')[0]
                    gender = dic[str(frame_num)+'-'+str(fleft)].split('_')[1][:-1]
                else:
                    continue
                '''
                cv2.rectangle(frame, (fleft, ftop), (fright, fbot),
                              (0, 0, 255), 2)
                frame = cv2.putText(frame, 'Age:' + age, (fleft, ftop),
                                    cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                    (255, 255, 255), 1)
                frame = cv2.putText(frame, 'Gender:' + gender,
                                    (fleft, ftop + 20),
                                    cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                    (255, 255, 255), 1)

        return frame
コード例 #5
0
def predict(net0, net1, net2, names0, names1, names2, frame):
    age_frame, gender_frame = static_init()
    try:

        (img_h, img_w, img_c) = frame.shape
        img_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        cfg_size = (1024, 1024)
        img_input = cv2.resize(img_rgb, cfg_size)

        faces = libpysunergy.detect(img_input.data, img_w, img_h, img_c,
                                    threshold, net0, names0)

        facenum = len(faces)

        #{py_names[class], prob, left, right, top, bot}
        for i in range(0, facenum):
            [x0, x1, y0,
             y1] = [faces[i][2], faces[i][3], faces[i][4], faces[i][5]]

            x0 = max(x0, 0)
            y0 = max(y0, 0)
            x1 = min(x1, img_w - 1)
            y1 = min(y1, img_h - 1)

            faceimg = img_rgb[y0:y1, x0:x1].copy()

            (h, w, c) = faceimg.shape
            #get result from sunergy
            dets1 = libpysunergy.predict(faceimg.data, w, h, c, top, net1,
                                         names1)
            age = int(str(dets1[0][0]))

            dets2 = libpysunergy.predict(faceimg.data, w, h, c, top, net2,
                                         names2)
            gender = dets2[0][0]

            if gender == 'male':
                gender_frame['male'] += 1
            elif gender == 'female':
                gender_frame['female'] += 1

            if int(age) < 20:
                age_frame['<20'] += 1

            if int(age) >= 20 and int(age) < 25:
                age_frame['20-25'] += 1

            if int(age) >= 25 and int(age) < 30:
                age_frame['25-30'] += 1

            if int(age) >= 30 and int(age) < 35:
                age_frame['30-35'] += 1

            if int(age) >= 35 and int(age) < 40:
                age_frame['35-40'] += 1

            if int(age) >= 40 and int(age) < 45:
                age_frame['40-45'] += 1

            if int(age) >= 45 and int(age) < 50:
                age_frame['45-50'] += 1

            if int(age) >= 50 and int(age) < 55:
                age_frame['50-55'] += 1

            if int(age) >= 55 and int(age) < 60:
                age_frame['55-60'] += 1

            if int(age) >= 60:
                age_frame['>60'] += 1

        return age_frame, gender_frame

    except Exception, e:
        print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno),
              type(e).__name__, e)