Exemplo n.º 1
0
def image_encode(args, i, item, q_out):
    oitem = [item.id]
    #print('flag', item.flag)
    if item.flag == 0:
        fullpath = item.image_path
        header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
        #print('write', item.flag, item.id, item.label)
        if item.aligned:
            with open(fullpath, 'rb') as fin:
                img = fin.read()
            s = mx.recordio.pack(header, img)
            q_out.put((i, s, oitem))
        else:
            img = cv2.imread(fullpath, args.color)
            assert item.landmark is not None
            img = face_align.norm_crop(img, item.landmark)
            s = mx.recordio.pack_img(header,
                                     img,
                                     quality=args.quality,
                                     img_fmt=args.encoding)
            q_out.put((i, s, oitem))
    else:
        header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
        #print('write', item.flag, item.id, item.label)
        s = mx.recordio.pack(header, '')
        q_out.put((i, s, oitem))
Exemplo n.º 2
0
def crop_face(faces, imgName, imgMat):
    savePath = './images'
    imgName = imgName.split('/')[-1]
    index = 0
    print(imgMat.shape)
    for num in range(faces.shape[0]):
        kpoint = faces[num, 5:15]
        kpoint = np.array(kpoint).reshape(5, 2)
        cropImg = norm_crop(imgMat, kpoint)
        cv2.imwrite(os.path.join(savePath, '{}_{}'.format(index, imgName)),
                    cropImg)
        index += 1
Exemplo n.º 3
0
def get_norm_crop(image_path):
    im = cv2.imread(image_path)
    im_shape = im.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(target_size) / float(im_size_min)
    # prevent bigger axis from being more than max_size:
    if np.round(im_scale * im_size_max) > max_size:
        im_scale = float(max_size) / float(im_size_max)
    bbox, landmark = detector.detect(im, threshold=0.5, scales=[im_scale])

    if bbox.shape[0] == 0:
        bbox, landmark = detector.detect(
            im,
            threshold=0.05,
            scales=[im_scale * 0.75, im_scale, im_scale * 2.0])
        # print('refine', im.shape, bbox.shape, landmark.shape)
    nrof_faces = bbox.shape[0]
    if nrof_faces > 0:
        det = bbox[:, 0:4]
        img_size = np.asarray(im.shape)[0:2]
        bindex = 0
        if nrof_faces > 1:
            bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] -
                                                           det[:, 1])
            img_center = img_size / 2
            offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
                                 (det[:, 1] + det[:, 3]) / 2 - img_center[0]])
            offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
            bindex = np.argmax(bounding_box_size - offset_dist_squared *
                               2.0)  # some extra weight on the centering

        _bbox = det[bindex, 0:4]

        top, bottom, left, right = adjust_bbox(_bbox, im_shape, 0.1)

        cropped = im[top:bottom, left:right]
        cropped = cv2.resize(cropped, (args.image_size, args.image_size))

        _landmark = landmark[bindex]
        warped = face_align.norm_crop(im,
                                      landmark=_landmark,
                                      image_size=args.image_size,
                                      mode=args.align_mode)
        return warped, cropped
    else:
        return None
Exemplo n.º 4
0
def test(FAPrefix, SSHPrefix, imgPath):
    FAPredictor = FaceAttributesPredictor(FAPrefix, epoch=1, ctxId=0)
    faceDetector = SSHDetector(SSHPrefix, epoch=0, ctx_id=0)
    images_name = get_images_name_list(imgPath)
    for img in images_name:
        print('image: %s' % (img))
        imgMat = cv2.imread(img)
        faces = faceDetector.detect(imgMat, threshold=0.8)
        print(faces.shape[0], ' faces detected.\n')
        imgMatList = []
        for num in range(faces.shape[0]):
            kpoint = faces[num, 5:15]
            kpoint = np.array(kpoint).reshape(5, 2)
            cropImg = norm_crop(imgMat, kpoint)
            imgMatList.append(cropImg)
        faResult = FAPredictor.predict(imgMatList)
        assert len(faResult) == len(imgMatList)
        show_face_rec_kpoint_attribute(faces, img, imgMat, faResult)
Exemplo n.º 5
0
 def get_align_input(self, img, points):
     nimg = face_align.norm_crop(
         img, landmark=points, image_size=112, mode='arcface')
     nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
     aligned = np.transpose(nimg, (2, 0, 1))
     return aligned