示例#1
0
def main():
    detector = MtcnnDetector(model_folder='model', ctx=mx.cpu(0), num_worker=1, accurate_landmark=True)

    img = cv2.imread('test2.jpg')

    # run detector
    results = detector.detect_face(img)

    if results is not None:

        total_boxes = results[0]
        points = results[1]

        # extract aligned face chips
        chips = detector.extract_image_chips(img, points, 128, 0.37)
        for i, chip in enumerate(chips):
            cv2.imshow('chip_' + str(i), chip)
            cv2.imwrite('chip_'+str(i)+'.png', chip)

        draw = img.copy()
        for b in total_boxes:
            cv2.rectangle(draw, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (255, 255, 255))

        for p in points:
            for i in range(5):
                cv2.circle(draw, (p[i], p[i + 5]), 1, (0, 0, 255), 2)

        cv2.imshow("detection result", draw)
        cv2.waitKey(0)
示例#2
0
def detect_face(image_path):
    #time_start=time.time()
    tiny_face_path = './result/tiny_face/'
    if not os.path.exists(tiny_face_path):
        os.makedirs(tiny_face_path)
    else:
        clean(tiny_face_path)

    detector = MtcnnDetector(model_folder='model',
                             ctx=mx.cpu(0),
                             num_worker=4,
                             accurate_landmark=False)
    print('detector', detector)
    img = cv2.imread(image_path)
    #print('img:',img)
    # run detector
    results = detector.detect_face(img)
    if results is not None:
        total_boxes = results[0]
        points = results[1]
        # extract aligned face chips
        chips = detector.extract_image_chips(img, points, 160, 0.37)

        for i, chip in enumerate(chips):
            cv2.imwrite(tiny_face_path + 'chip_' + str(i) + '.jpg', chip)
示例#3
0
def main():
    global args
    args = parser.parse_args()
    detector = MtcnnDetector(model_folder='model',
                             ctx=mx.cpu(0),
                             num_worker=args.num_workers,
                             accurate_landmark=False)

    for root, dirs, files in os.walk(args.root_path):
        try:
            if files[0].split('.')[1] != 'jpg':
                continue
        except IndexError:
            continue

        # make dirs
        if not os.path.exists(args.rotate_path):
            os.makedirs(args.rotate_path)
        if not os.path.exists(args.crop_path):
            os.makedirs(args.crop_path)

        for image in files:
            img = cv2.imread(root + '/' + image)

            # run detector first round
            results = detector.detect_face(img)

            if results is not None:
                total_boxes = results[0]
                points = results[1]

                leftx = points[0][0]
                lefty = points[0][5]
                rightx = points[0][1]
                righty = points[0][6]

                # rotate face
                img_tmp = PIL.Image.open(root + '/' + image)
                CropFace(img_tmp,
                         eye_left=(leftx, lefty),
                         eye_right=(rightx, righty),
                         offset_pct=(0.1, 0.1),
                         dest_sz=(200,
                                  200)).save(args.rotate_path + '/' + image)

            # run detector second round
            draw = cv2.imread(args.rotate_path + '/' + image)
            results = detector.detect_face(draw)

            if results is not None:
                total_boxes = results[0]
                points = results[1]

                # extract aligned face chips
                chips = detector.extract_image_chips(draw, points, 144,
                                                     args.crop_size)
                for i, chip in enumerate(chips):
                    cv2.imwrite(args.crop_path + '/' + image, chip)
                    print(args.crop_path + '/' + image)
示例#4
0
def detect_face(img,img_size):
    detector = MtcnnDetector(model_folder='./model', ctx=mx.cpu(0), num_worker = 1 , accurate_landmark = False)
    results = detector.detect_face(img)
    if results is None:
        return 0
    total_boxes = results[0]
    points = results[1]
    face_crops = detector.extract_image_chips(img, points, img_size, 0.37)
    '''
    draw = img.copy()
    for b in total_boxes:
        cv2.rectangle(draw, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (255, 255, 255))
    for p in points:
        for i in range(5):
            cv2.circle(draw, (p[i], p[i + 5]), 1, (255, 0, 0), 2)
    '''
    return face_crops, total_boxes
def face_to_file(dest, task):

    # at least have three gpu, uses second and third gpu
    detector = MtcnnDetector(
        model_folder='model',
        ctx=mx.cpu(0),
        #ctx=mx.gpu(int(task[0] / 2) + 1),
        num_worker=4,
        accurate_landmark=False)

    for file in task[1]:
        path_component = os.path.normpath(file).split(os.path.sep)

        for i, c in enumerate(path_component):
            if c == '..':
                path_component[i] = ''
        try:
            path_component.remove('')
        except:
            pass
        path_component[0] = dest

        basename = os.path.basename(file)
        jpg_dest = os.path.join(*path_component)
        makedir(jpg_dest)

        # command = 'ffmpeg -loglevel quiet -i {0} -vf fps=1 {1}/{2}_%03d.jpg'.format(
        #     file, jpg_dest, basename)

        img = cv2.imread(file)
        results = detector.detect_face(img)
        if results is not None:
            total_boxes = results[0]
            points = results[1]

            # extract aligned face chips
            chips = detector.extract_image_chips(img, points, 256, 0.37)
            for i, chip in enumerate(chips):
                cv2.imwrite('{0}/{1}_{2}.jpg'.format(jpg_dest, basename, i),
                            chip)
        else:
            print('no face in ', file)
class VideoDetector(object):
    def __init__(self, arguments, mx_context):
        self.args = arguments
        self.ctx = mx_context
        self.model = face_model.FaceModel(args)
        self.detector = MtcnnDetector(model_folder='mtcnn-model/',
                                      ctx=self.ctx,
                                      num_worker=4,
                                      accurate_landmark=False)
        self.names = None  # Names of the persons in the dataset
        self.dataset = None  # Collection of features of known names

    def prepare_faces(self, dataset_name='dataset.pkl'):
        image_names = os.listdir(self.args.faces_dir)
        face_names = set([x.split('_')[0] for x in image_names])

        dataset = {}
        for name in face_names:
            images = [
                cv2.imread(os.path.join(self.args.faces_dir, iname))
                for iname in image_names if name in iname
            ]
            features = [
                self.model.get_feature(self.model.get_input(img))
                for img in images
            ]
            features = np.stack(features)
            dataset[name] = features

        dataset_path = os.path.abspath(os.path.join(self.args.faces_dir, '..'))

        with open(dataset_path + '/' + dataset_name, 'wb') as f:
            pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)

    def detect(self):
        if self.dataset is None:
            self.load_features()
        cap = cv2.VideoCapture(args.in_file)  # Create a VideoCapture object
        frame_w, frame_h = int(cap.get(3)), int(
            cap.get(4))  # Convert resolutions from float to integer.

        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        renders = []

        frame_time = np.array([])
        for _ in tqdm(range(total_frames)):
            start = time()
            ret, frame = cap.read()
            if ret:
                render = self.detect_faces(frame)
                renders.append(render)
            frame_time = np.append(frame_time, time() - start)
        cap.release()
        return renders, {
            'w': frame_w,
            'h': frame_h
        }, {
            'fr_exec': frame_time.mean()
        }

    def load_features(self, dataset_name='dataset.pkl'):
        dataset_path = os.path.abspath(os.path.join(self.args.faces_dir, '..'))
        with open(dataset_path + '/' + dataset_name,
                  'rb') as f:  # Load Dataset on numpy format
            np_dataset = pickle.load(f)
        # Create dictionary with person names and their corresponding feature index
        self.names = {}
        i = 0
        for k, v in np_dataset.items():
            self.names[k] = slice(i, i + v.shape[0])
            i += v.shape[0]
        # Transform dataset to mx NDarray format
        self.dataset = nd.array(np.concatenate(
            [v for v in np_dataset.values()]),
                                ctx=self.ctx)

    def draw_names(self, frame, names):
        # names: dict{'name' : bounding_box}
        colors = box_colors[:len(names)]
        for name, b, c in zip(names.keys(), names.values(), colors):
            if name == 'unknown':
                for x in b:
                    cv2.rectangle(frame, (int(x[0]), int(x[1])),
                                  (int(x[2]), int(x[3])), colors[-1], 2)
                    # cv2.putText(frame, 'unknown', (int(b[0]),int(b[1])), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 2, cv2.LINE_AA)
            else:
                cv2.rectangle(frame, (int(b[0]), int(b[1])),
                              (int(b[2]), int(b[3])), c, 2)
                cv2.putText(frame, name, (int(b[0]), int(b[1])),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 3,
                            cv2.LINE_AA)
        return frame

    def name_faces(self, persons, total_boxes):
        faces_names = {}
        unknown_faces = []
        for person, box in zip(persons, total_boxes):
            face = self.model.get_input(person)
            if face is None:
                continue
            face = nd.array(self.model.get_feature(face), ctx=self.ctx)

            # Calculate the similarity between the known features and the current face feature
            sim = nd.dot(self.dataset, face)
            scores = {}
            for known_id, index in self.names.items():
                scores[known_id] = max(sim[index]).asnumpy()

            if max(scores.values()) > self.args.threshold_face:
                faces_names[max(scores, key=scores.get)] = box
            else:
                unknown_faces.append(box)

        if len(unknown_faces):
            faces_names['unknown'] = unknown_faces

        return faces_names

    def detect_faces(self, frame):
        resolution = int(self.args.image_size.split(',')[0])
        # run detector
        results = self.detector.detect_face(frame)
        if results is not None:
            total_boxes = results[0]
            points = results[1]
            # extract aligned face chips
            persons = self.detector.extract_image_chips(
                frame, points, resolution, 0.37)
            if self.args.recognize:
                faces_names = self.name_faces(persons, total_boxes)
            else:
                faces_names = {'unknown': [box for box in total_boxes]}
            return self.draw_names(frame, faces_names)

        else:
            return frame
示例#7
0
from utils.gen_loc import box_to_location, fillter_outlier

detector = MtcnnDetector(model_folder='model', ctx=mx.cpu(0), num_worker = 4 , accurate_landmark = False)

img = cv2.imread('2019MSEtest.jpeg')

# run detector
results = detector.detect_face(img)

if results is not None:
    total_boxes = fillter_outlier(results[0])
    points = results[1]

    # print(total_boxes)
    # extract aligned face chips
    chips = detector.extract_image_chips(img, points, 144, 0.37)
    # for i, chip in enumerate(chips):
    #     cv2.imshow('chip_'+str(i), chip)
    #     cv2.imwrite('chip_'+str(i)+'.png', chip)

    draw = img.copy()
    for b in total_boxes:
        cv2.rectangle(draw, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (255, 0, 255), thickness=1)

    # for p in points:
    #     for i in range(5):
    #         cv2.circle(draw, (p[i], p[i + 5]), 1, (0, 0, 255), 2)

    # w, h = img.shape[0:2]
    # draw = cv2.resize(draw, (int(h / 3), int(w / 3)))
示例#8
0
            error_img.append([file_path])
            img = cv2.resize(img, (112, 112))
            cv2.imwrite(dirc_path_new + '/' + file, img)
        else:
            total_boxes = results[0]
            points = results[1]
            index = 0

            if len(total_boxes) > 1:
                area = []
                for box in total_boxes:
                    area.append((box[2] - box[0]) * (box[3] - box[1]))
                index = area.index(max(area))

            # extract aligned face chips
            chip = detector.extract_image_chips(img, [points[index]], 112,
                                                0.37)
            cv2.imwrite(dirc_path_new + '/' + file, chip[0])

        cnt += 1
        if cnt % 1000 == 0:
            print('Current step', cnt)

end = time.time()
print(end - start)

with open(error_path, 'w') as f:
    writer = csv.writer(f)
    writer.writerows(error_img)

print('error img num', len(error_img))
print('total img num', cnt)
示例#9
0
def main():
    args = get_args()
    src_dir = args.src
    if not os.path.exists(src_dir):
        raise ValueError("src dir not exist {}".format(src_dir))

    split_ratio = args.split

    dst_dir = os.path.abspath(args.dst)
    err_dir = os.path.abspath(args.err)

    num_gpus = args.ngpus
    if num_gpus == -1:
        num_gpus = len(mx.test_utils.list_gpus())
    if num_gpus == 0:
        ctx = mx.cpu(0)
    else:
        ctx = [mx.gpu(i) for i in range(num_gpus)]

    print("src dir={} dst dir={} err_dir={} gpu={}".format(src_dir, dst_dir, err_dir, num_gpus))
    detector = MtcnnDetector(model_folder='model', ctx=ctx, num_worker=args.workers, accurate_landmark=False)

    file_count = 0
    for root, dirs, files in os.walk(src_dir):
        relpath = os.path.relpath(root, src_dir)
        # dd = os.path.join(dst_dir, relpath)
        ed = os.path.join(err_dir, relpath)
        class_data_written = False  # training
        for filename in files:
            if filename.lower().endswith(('.jpg', '.jpeg', '.gif', '.png')):
                absfile = os.path.join(root, filename)
                success = False
                try:
                    # warning cv2.imread does not handle file names with unicode characters.
                    img = cv2.imread(absfile)

                    # run detector
                    results = detector.detect_face(img)

                    if results is not None:

                        total_boxes = results[0]
                        points = results[1]

                        bigbox_idx = np.argmax([(b[2] - b[0]) * (b[3] - b[1]) for b in total_boxes])

                        # extract aligned face chips
                        chips = detector.extract_image_chips(img, points[bigbox_idx:bigbox_idx + 1], args.size,
                                                             args.padding)
                        for i, chip in enumerate(chips):
                            if split_ratio > 0:
                                if not class_data_written:
                                    ab = "train"
                                    class_data_written = True
                                    # let validation set has same class label as training set
                                    # see source code of pytorch's DatasetFolder
                                    os.makedirs(os.path.join(dst_dir, "val", relpath), exist_ok=True)
                                else:
                                    ab = "val" if random.random() > split_ratio else "train"
                                dd = os.path.join(dst_dir, ab, relpath)
                                os.makedirs(dd, exist_ok=True)
                                cv2.imwrite(os.path.join(dd, os.path.splitext(filename)[0] + ".png"),
                                            chip)
                                class_data_written = True
                            else:
                                dd = os.path.join(dst_dir, relpath)
                                os.makedirs(dd, exist_ok=True)
                                cv2.imwrite(os.path.join(dd, os.path.splitext(filename)[0] + ".png"),
                                            chip)
                            success = True

                except Exception as e:
                    print(relpath, filename, e)
                    pass

                if not success:
                    os.makedirs(ed, exist_ok=True)
                    shutil.copyfile(absfile, os.path.join(ed, filename))

                file_count = file_count + 1
                if file_count % 1000 == 0:
                    print(file_count)
示例#10
0
class Filter:
    def __init__(self,
                 img_path,
                 log_path,
                 meta_path,
                 purges=True,
                 only_one=False):
        self.detector = MtcnnDetector(model_folder='model',
                                      ctx=mx.gpu(0),
                                      num_worker=WORKER_COUNT,
                                      accurate_landmark=False)
        self.img_path = img_path
        self.success_logs_path = os.path.join(log_path, 'successes/')
        self.folders = [x for x in glob(img_path + "*")]
        self.log_path = log_path
        self.file_log = os.path.join(log_path, 'lists/')
        data_log = os.path.join(log_path, 'bboxes/')
        if purges:
            _purge(data_log, 'path')
        self.b_log = os.path.join(data_log, 'b/')  # bbox
        self.p_log = os.path.join(data_log, 'p/')  # facial points
        self.save_path = os.path.join(log_path, 'imgs/')
        # meta
        self.meta_path = meta_path
        self.meta_records = os.path.join(meta_path, 'records/')
        self.meta_rois = os.path.join(meta_path, 'rois/')
        self.only_one_mode = only_one
        if os.path.isdir(self.meta_records) and os.path.isdir(
                self.meta_rois):  # validate file structure
            pass
        else:
            raise Exception('meta record broken.')
        # reset logs
        if purges:
            _purge(self.save_path, 'path')
            _purge(self.file_log, 'path')
            _purge(self.b_log, 'path')
            _purge(self.p_log, 'path')

    def _intersect_ratio(self, box, rois):
        """Calculate max intersect ratio between a box and list of candidate, ratio relative to the box.
        E.g: area(overlap) / area(box)
        Note: https://stackoverflow.com/questions/27152904

        :param box: tuple, format: x, y, w, h.
        :param rois: list, a list of rois. Same format as box.
        :return: float, a number between 0 and 1.
        """
        res = 0
        assert (box[2] >= box[0])
        assert (box[3] >= box[1])
        box_area = (box[2] - box[0]) * (box[3] - box[1])
        if box_area == 0:
            return 0
        for roi in rois:
            assert (roi[2] >= roi[0])
            assert (roi[3] >= roi[1])
            dx = min(roi[2], box[2]) - max(roi[0], box[0])
            dy = min(roi[3], box[3]) - max(roi[1], box[1])
            if (dx >= 0) and (dy >= 0):
                res = ((dx * dy) / box_area)
        assert (res >= 0)
        assert (res <= 1)
        return res

    def _detect_img(self,
                    img_path,
                    img_id,
                    save_paths,
                    file_log,
                    b_log,
                    p_log,
                    rejection_log,
                    rois,
                    min_confidence=0.9,
                    min_resolution=24):
        print('Detecting:', img_path)
        img = cv2.imread(img_path)
        # run detector
        results = self.detector.detect_face(img)
        # filter out low resolution, increase next step CNN accuracy and decrease this step false positive
        total_boxes = []
        points = []
        if results is not None:
            for i, b in enumerate(results[0]):
                # check resolution, confidence, and intersect ratio to rcnn meta.
                if (b[2]-b[0] >= min_resolution) and (b[3]-b[1] >= min_resolution) and \
                        (b[4] >= min_confidence) and \
                        (self._intersect_ratio(b, rois) >= 0.7):
                    total_boxes.append(b)
                    points.append(results[1][i])
                else:  # rejected by filter
                    # write format: img_id, rejection code(Fail on filter: F),  x1, y1, x2, y2, confidence.
                    _log_one_line(
                        rejection_log,
                        '{} {} {} {} {} {} {}'.format(img_id, 'F', b[0], b[1],
                                                      b[2], b[3], b[4]))
        if (results is None) or (len(total_boxes) == 0):
            _log_one_line(file_log, '{} 0'.format(img_id))
            return

        # extract aligned face chips
        chips = self.detector.extract_image_chips(img, points, 144, 0.37)
        face_num = len(chips)
        _log_one_line(file_log, '{} {}'.format(img_id, face_num))
        if self.only_one_mode:
            # more than one face
            if face_num != 1:
                # Log as failures and record no image for this file.
                # Note: due to we have much data and want to reduce noise as much as possible
                # record less noise is more important than record more data.
                for i, b in enumerate(results[0]):
                    # log as failure code M
                    _log_one_line(
                        rejection_log,
                        '{} {} {} {} {} {} {}'.format(img_id, 'M', b[0], b[1],
                                                      b[2], b[3], b[4]))
                return  # end operation
            save_path = save_paths[0]
            cv2.imwrite(os.path.join(save_path, '{}.jpg'.format(img_id)),
                        chips[0])
        else:
            assert (len(save_paths) == 3)
            if face_num == 1:
                save_path = save_paths[0]
            elif face_num == 2:
                save_path = save_paths[1]
            else:
                save_path = save_paths[2]
            for ind, chip in enumerate(chips):
                cv2.imwrite(
                    os.path.join(save_path, '{}_{}.jpg'.format(img_id, ind)),
                    chip)
        # write boxes
        for ind, b in enumerate(total_boxes):
            # write format: img_id, x1, y1, x2, y2, confidence.
            _log_one_line(
                b_log, '{} {} {} {} {} {} {}'.format(img_id, ind, b[0], b[1],
                                                     b[2], b[3], b[4]))
        for ind, p in enumerate(points):
            for i in range(5):
                # write format: img_id
                _log_one_line(
                    p_log, '{} {} {} {} {}'.format(img_id, ind, i, p[i],
                                                   p[i + 5]))

    def write_all(self):
        # some extra logging info
        _total = len(self.folders)
        _count = 0
        for folder in self.folders:
            category_id = folder.split('/')[-1]
            big_ass_warning('CATEGORY: {}, PROGRESS: {:.4f}%'.format(
                category_id,
                float(_count) * 100 / _total))
            # load meta
            _meta_path = os.path.join(self.meta_rois,
                                      '{}.txt'.format(category_id))
            if not os.path.isfile(
                    _meta_path
            ):  # meta may not be there due to no person in category.
                continue
            with open(_meta_path) as _f:
                _lines = _f.readlines()
            records = {}  # meta key: image_id, value: roi
            for line in _lines:
                _raw = line.strip().split()
                assert (len(_raw) == 6)
                _img_id = _raw[0]
                roi = [
                    int(_raw[2]),
                    int(_raw[3]),
                    int(_raw[4]) + int(_raw[2]),
                    int(_raw[5]) + int(_raw[3]),
                ]  # x1, y1, x2, y2
                if _img_id in records:
                    records[_img_id].append(roi)
                else:
                    records[_img_id] = [roi]
            # build log system
            file_log = os.path.join(self.file_log,
                                    '{}.txt'.format(category_id))
            _purge(file_log, 'file')
            c_b_log = os.path.join(self.b_log, '{}.txt'.format(category_id))
            _purge(c_b_log, 'file')
            c_p_log = os.path.join(self.p_log, '{}.txt'.format(category_id))
            _purge(c_p_log, 'file')
            c_rj_log = os.path.join(self.b_log, 'r{}.txt'.format(category_id))
            _purge(c_rj_log, 'file')
            save_path = os.path.join(self.save_path, '{}/'.format(category_id))
            _purge(save_path, 'path')
            save_paths = []
            if self.only_one_mode:
                save_paths = [save_path]
            else:
                for i in range(3):  # three tiers for ppl number.
                    p = os.path.join(save_path, '{}/'.format((i + 1), ))
                    _purge(p, 'path')
                    save_paths.append(p)
            # write data
            for file in os.listdir(folder):
                if file.endswith(".jpg"):
                    _path = os.path.join(folder, file)
                    img_id = str(int(
                        file.split('/')[-1].split('.')[0])).zfill(6)
                    if not (img_id in records
                            ):  # zero person in image, based on meta data
                        continue
                    rois = records[img_id]
                    # If only one mode enabled and 1 rois, or if mode is not enabled.
                    # Note: added filter condition, only process if there is one person in frame.
                    if (not self.only_one_mode) or (len(rois) == 1):
                        self._detect_img(img_path=_path,
                                         img_id=img_id,
                                         save_paths=save_paths,
                                         file_log=file_log,
                                         b_log=c_b_log,
                                         p_log=c_p_log,
                                         rejection_log=c_rj_log,
                                         rois=rois)
            _count += 1
示例#11
0
def alignMain(args):
    mkdirP(args.outputDir)

    imgs = list(iterImgs(args.inputDir))

    # Shuffle so multiple versions can be run at once.
    random.shuffle(imgs)

    align = MtcnnDetector(model_folder=mtcnn_dir + 'model',
                          ctx=mx.gpu(int(args.gpus.split(',')[0])),
                          num_worker=4,
                          minsize=50,
                          accurate_landmark=True)

    nFallbacks = 0
    for imgObject in imgs:
        print("=== {} ===".format(imgObject.path))
        outDir = os.path.join(args.outputDir, imgObject.cls)
        mkdirP(outDir)
        outputPrefix = os.path.join(outDir, imgObject.name)
        imgName = outputPrefix + "." + args.ext

        if os.path.isfile(imgName):
            if args.verbose:
                print("  + Already found, skipping.")
        else:
            rgb = imgObject.getBGR()
            if rgb is None:
                if args.verbose:
                    print("  + Unable to load.")
                outRgb = None
            else:
                detect = align.detect_face(rgb)
                if detect is not None:
                    bb = detect[0]
                    pts = detect[1]
                    if bb.shape[0] > 1:
                        bb_size = (bb[:, 2] - bb[:, 0]) * (bb[:, 3] - bb[:, 1])
                        i_max = np.argmax(bb_size)
                        bb = bb[i_max:i_max + 1]
                        pts = pts[i_max:i_max + 1]
                    outBgr = align.extract_image_chips(rgb, pts, args.size,
                                                       args.pad)
                    outBgr = outBgr[0]
                else:
                    if args.verbose:
                        print("  + Unable to align.")

            if args.fallbackLfw and outRgb is None:
                nFallbacks += 1
                deepFunneled = "{}/{}.jpg".format(
                    os.path.join(args.fallbackLfw, imgObject.cls),
                    imgObject.name)
                shutil.copy(
                    deepFunneled, "{}/{}.jpg".format(
                        os.path.join(args.outputDir, imgObject.cls),
                        imgObject.name))

            if outBgr is not None:
                #if args.verbose:
                #print("  + Writing aligned file to disk.")
                #outBgr = cv2.cvtColor(outRgb, cv2.COLOR_RGB2BGR)
                #pdb.set_trace()
                cv2.imwrite(imgName, outBgr)

    if args.fallbackLfw:
        print('nFallbacks:', nFallbacks)
detector = MtcnnDetector(model_folder='model', ctx=mx.cpu(0), num_worker = 4 , accurate_landmark = False)


img = cv2.imread('test2.jpg')

# run detector
results = detector.detect_face(img)

if results is not None:

    total_boxes = results[0]
    points = results[1]
    
    # extract aligned face chips
    chips = detector.extract_image_chips(img, points, 144, 0.37)
    for i, chip in enumerate(chips):
        cv2.imshow('chip_'+str(i), chip)
        cv2.imwrite('chip_'+str(i)+'.png', chip)

    draw = img.copy()
    for b in total_boxes:
        cv2.rectangle(draw, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (255, 255, 255))

    for p in points:
        for i in range(5):
            cv2.circle(draw, (p[i], p[i + 5]), 1, (0, 0, 255), 2)

    cv2.imshow("detection result", draw)
    cv2.waitKey(0)
示例#13
0
def main():
    args = get_args()
    src_dir = args.src
    if not os.path.exists(src_dir):
        raise ValueError("src dir not exist {}".format(src_dir))

    split_ratio = args.split

    dst_dir = os.path.abspath(args.dst)
    err_dir = os.path.abspath(args.err)

    num_gpus = args.ngpus
    if num_gpus == -1:
        num_gpus = len(mx.test_utils.list_gpus())
    if num_gpus == 0:
        ctx = mx.cpu(0)
    else:
        ctx = [mx.gpu(i) for i in range(num_gpus)]

    print("src dir={} dst dir={} err_dir={} gpu={}".format(
        src_dir, dst_dir, err_dir, num_gpus))
    detector = MtcnnDetector(model_folder='model',
                             ctx=ctx,
                             num_worker=args.workers,
                             accurate_landmark=False)

    file_count = 0
    for root, dirs, files in os.walk(src_dir):
        relpath = os.path.relpath(root, src_dir)
        # dd = os.path.join(dst_dir, relpath)
        ed = os.path.join(err_dir, relpath)
        class_data_written = False  # training
        for filename in files:
            if filename.lower().endswith(('.jpg', '.jpeg', '.gif', '.png')):
                absfile = os.path.join(root, filename)
                success = False
                try:
                    # warning cv2.imread does not handle file names with unicode characters.
                    img = cv2.imread(absfile)

                    # run detector
                    results = detector.detect_face(img)

                    if results is not None:

                        total_boxes = results[0]
                        points = results[1]

                        bigbox_idx = np.argmax([(b[2] - b[0]) * (b[3] - b[1])
                                                for b in total_boxes])

                        # extract aligned face chips
                        chips = detector.extract_image_chips(
                            img, points[bigbox_idx:bigbox_idx + 1], args.size,
                            args.padding)
                        for i, chip in enumerate(chips):
                            if split_ratio > 0:
                                if not class_data_written:
                                    ab = "train"
                                    class_data_written = True
                                    # let validation set has same class label as training set
                                    # see source code of pytorch's DatasetFolder
                                    os.makedirs(os.path.join(
                                        dst_dir, "val", relpath),
                                                exist_ok=True)
                                else:
                                    ab = "val" if random.random(
                                    ) > split_ratio else "train"
                                dd = os.path.join(dst_dir, ab, relpath)
                                os.makedirs(dd, exist_ok=True)
                                cv2.imwrite(
                                    os.path.join(
                                        dd,
                                        os.path.splitext(filename)[0] +
                                        ".png"), chip)
                                class_data_written = True
                            else:
                                dd = os.path.join(dst_dir, relpath)
                                os.makedirs(dd, exist_ok=True)
                                cv2.imwrite(
                                    os.path.join(
                                        dd,
                                        os.path.splitext(filename)[0] +
                                        ".png"), chip)
                            success = True

                except Exception as e:
                    print(relpath, filename, e)
                    pass

                if not success:
                    os.makedirs(ed, exist_ok=True)
                    shutil.copyfile(absfile, os.path.join(ed, filename))

                file_count = file_count + 1
                if file_count % 1000 == 0:
                    print(file_count)
示例#14
0
class MyApi(threading.Thread):
	###Mtcnn人脸检测部分
	def __init__(self):
		threading.Thread.__init__(self)
		self.detector = MtcnnDetector(model_folder='mtcnnmodel', ctx=mx.gpu(0), num_worker=1, accurate_landmark=False)
		self.imgset=self.readFaceFeatureALL()
		self.threadhold = 0.45  # 设定阈值
		self.net = getattr(net_sphere,'sphere20a')()
		self.net.load_state_dict(torch.load('model/sphere20a_20171020.pth'))
		self.net.cuda()
		self.net.eval()
		self.net.feature = True
	##人脸关键点检测:输入一张原始图片,返回检测到所有人脸的五个关键点(face*10)10个float数值表示五个关键点的位置和检测到人脸的矩形框
	def faceDetect(self,img):
		results = self.detector.detect_face(img)
		if results is None:
			print("No face detected")
			return None,None
		all_box=results[0]
		img_points=results[1];
		for i in range(len(img_points)):
			this_point=[]
			for j in range(5):
				this_point.append(img_points[i][j])
				this_point.append(img_points[i][j+5])
			img_points[i]=this_point
		return img_points,all_box

	##人脸矫正:输入原始图像和五个关键点,返回矫正厚的人脸图像
	def alignment(self,src_img,src_pts):
		ref_pts = [ [30.2946, 51.6963],[65.5318, 51.5014],[48.0252, 71.7366],[33.5493, 92.3655],[62.7299, 92.2041]]
		crop_size = (96, 112)
		src_pts = np.array(src_pts).reshape(5,2)
		s = np.array(src_pts).astype(np.float32)
		r = np.array(ref_pts).astype(np.float32)
		tfm = get_similarity_transform_for_cv2(s, r)
		face_img = cv2.warpAffine(src_img, tfm, crop_size)
		return face_img


	###sphereface人脸识别部分

	##特征计算(sphereface卷积):输入人脸图片列表,返回特征列表(face*512dim)
	def imageCNN(self,face_img_list):
		for i in range(len(face_img_list)):
			face_img_list[i] = face_img_list[i].transpose(2, 0, 1).reshape((1,3,112,96))
			face_img_list[i] = (face_img_list[i]-127.5)/128.0
		img = np.vstack(face_img_list)
		img = Variable(torch.from_numpy(img).float(),volatile=True).cuda()
		output = self.net(img)
		return output.data

	##余弦距离计算:输入两个特征向量,返回两者之间的余弦距离
	def computeDistance(self,a,b):
		return a.dot(b)/(a.norm()*b.norm()+1e-5)

	##人脸对比:输入两个人脸图片,返回两张人脸图片间的距离
	def faceCompare(self,face_img1,face_img2):##both are image(np.array)
		feature=self.imageCNN([face_img1,face_img2])
		return self.computeDistance(feature[0],feature[1])

	##人脸查找:输入一张人脸图片和一个含有多张人脸图片的列表,返回目标人脸和列表中所有人脸最相似的一张人脸的序号和相似度
	def faceFind(self,face_img1,face_imgs_list):##img1 is a image(np.array),imags is a lsit of images
		face_imgs_list.insert(0,face_img1)
		feature=self.imageCNN(face_imgs_list)
		distance_list=[]
		for i in feature[1:]:
			distance_list.append(self.computeDistance(feature[0],i))
		return np.argmax(distance_list),np.max(distance_list)

	##检测所有人脸:输入一张原始照片,返回检测到的所有人脸的图片列表
	def detectAllFaces(self,img):
		key_points,all_box=self.faceDetect(img)
		if key_points is None:
			return None
		face_img_list=[]
		for i in key_points:
			face_img_list.append(self.alignment(img,i))
		return face_img_list

	##增加人脸:输入人脸名称和原始图片(只可包含一张人脸)。将名称和图片中人脸的特征向量存到‘imageSet.txt’中
	def addImage(self,name,img_path):
		img=cv2.imread(img_path)
		face_list=self.detectAllFaces(img)
		if face_list is None:
			return
		else:
			feature=self.imageCNN(face_list).numpy()[0]
			with open('imageSet.txt','a+') as f:
				f.write(name+' ')
				for i in feature:
					f.write(str(i)+' ')
				f.write('\n')
				self.writeImage(name,img)
	##添加照片第二步:写照片
	def writeImage(self,name,img):
		results = self.detector.detect_face(img)
		if results is not None:
			total_boxes = results[0]
			points = results[1]
		# extract aligned face chips
		chips = self.detector.extract_image_chips(img, points, 144, 0.37)
		for i, chip in enumerate(chips):
			cv2.imwrite('./img/' + name + '.jpg', chip)
	##人脸查找(从imageSet.txt中查找):输入人脸图片,计算其特征向量,并打开imageSet。找到相似的一张人脸,返回名称和相似度
	def faceFindFromSet(self,face_img):
		feature=self.imageCNN([face_img])
		imgset={}
		with open("imageSet.txt") as f:
			alllines=f.readlines()
		for line in alllines:
			line=line.replace('\n','').split(' ')
			imgset[line[0]]=torch.from_numpy(np.array([float(k) for k in line[1:-1]]).reshape(512)).float()
		distance={}
		for key in imgset:
			distance[key]=self.computeDistance(feature[0],imgset[key])
		max_item=max(distance.items(),key=lambda i:i[1])
		print(max_item[0],max_item[1])

	#根据需要查找的人名称查找对应的人脸特征向量:输入:人命列表["name"],返回特征向量列表{"name":[featureVector]}
	def readFaceFeatureALL(self):
		imgset={}
		with open("imageSet.txt") as f:
			alllines=f.readlines()
		for line in alllines:
			line=line.replace('\n','').split(' ')
			imgset[line[0]]=torch.from_numpy(np.array([float(k) for k in line[1:-1]]).reshape(512)).float().cuda()
		return imgset
	def readFaceFeatureByname(self,names):
		imgset={}
		for name in names:
			if name in self.imgset:
				imgset[name]=self.imgset[name]
		return imgset
	#返回多个数据人脸检测:检测原始图像中所有的人脸,返回所有人脸的五个关键点,矩形框,特征向量。·[{"points":[point],"rect":[rect],"feature":[feature]},{}]
	def detailFaceDetect(self,img):
		key_points,all_box=self.faceDetect(img)
		if key_points is None:
			return None
		face_img_list=[]
		for i in key_points:
			face_img_list.append(self.alignment(img,i))
		features=self.imageCNN(face_img_list)
		detail_list=[]
		for i in range(len(key_points)):
			this_dict={}
			this_dict["points"]=key_points[i]
			this_dict["rect"]=all_box[i]
			this_dict["feature"]=features[i]
			detail_list.append(this_dict)
		return detail_list

	#将检测到的人脸同人脸库中的人脸进行对比:输入待检测图片,输出画完人脸的图片
	##人脸标记:输入原始图像并对比找到需求的人脸,找到所有人脸并绘制五个关键点位置和人脸矩形框。和detectAllFaces功能相似
	def markFace(self,img,names):
		if len(names)==0:
			return img
		detail_list=self.detailFaceDetect(img)
		if detail_list is None:
			return img
		face_set=self.readFaceFeatureByname(names)
		points_list=[]
		rect_list=[]
		for i in detail_list:
			for j in names:
				if self.computeDistance(i["feature"],face_set[j])>self.threadhold:
					points_list.append(i["points"])
					rect_list.append(i["rect"])
		for b in rect_list:
			cv2.rectangle(img, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (255, 255, 255))
		for p in points_list:
			for i in range(0,10,2):
				cv2.circle(img, (p[i], p[i + 1]), 1, (0, 0, 255), 1)
		return img
示例#15
0
def main():
    args = get_args()
    src_file = args.src
    if not os.path.exists(src_file):
        raise ValueError("src dir not exist {}".format(src_file))

    split_ratio = args.split

    dst_dir = os.path.abspath(args.dst)

    num_gpus = args.ngpus
    if num_gpus == -1:
        num_gpus = len(mx.test_utils.list_gpus())
    if num_gpus == 0:
        ctx = mx.cpu(0)
    else:
        ctx = [mx.gpu(i) for i in range(num_gpus)]

    print("src={} dst dir={} gpu={}".format(src_file, dst_dir, num_gpus))
    s = read_clean_list(args.cleanlist)
    detector = MtcnnDetector(model_folder='model', ctx=ctx, num_worker=args.workers, accurate_landmark=True)

    file_count = 0
    with open(src_file, "r", encoding="utf-8") as f:
        last_m_id = "x"
        for line in f:
            m_id, image_search_rank, image_url, page_url, face_id, face_rectangle, face_data = line.split("\t")
            # rect = struct.unpack("ffff", base64.b64decode(face_rectangle))
            if "{}/{}".format(m_id, image_search_rank) in s:
                data = np.frombuffer(base64.b64decode(face_data), dtype=np.uint8)
                img = cv2.imdecode(data, cv2.IMREAD_COLOR)
                h, w, _ = img.shape
                if h > 128 and w > 128:
                    try:
                        # run detector
                        results = detector.detect_face(img)

                        if results is not None:
                            total_boxes = results[0]
                            points = results[1]

                            bigbox_idx = np.argmax([(b[2] - b[0]) * (b[3] - b[1]) for b in total_boxes])

                            # extract aligned face chips
                            chips = detector.extract_image_chips(img, points[bigbox_idx:bigbox_idx + 1], args.size,
                                                                 args.padding)
                            for i, chip in enumerate(chips):

                                if last_m_id != m_id:
                                    ab = "train"
                                    # let validation set has same class label as training set
                                    # see source code of pytorch's DatasetFolder
                                else:
                                    ab = "val" if random.random() > split_ratio else "train"
                                dd = os.path.join(dst_dir, ab, m_id)
                                os.makedirs(dd, exist_ok=True)
                                cv2.imwrite(os.path.join(dd, "{}.png".format(image_search_rank)),
                                            chip)
                                last_m_id = m_id

                    except Exception as e:
                        print(m_id, image_search_rank, e)

                    file_count = file_count + 1
                    if file_count % 1000 == 0:
                        print(file_count)
示例#16
0
def main():
    args = get_args()
    src_file = args.src
    if not os.path.exists(src_file):
        raise ValueError("src dir not exist {}".format(src_file))

    split_ratio = args.split

    dst_dir = os.path.abspath(args.dst)

    num_gpus = args.ngpus
    if num_gpus == -1:
        num_gpus = len(mx.test_utils.list_gpus())
    if num_gpus == 0:
        ctx = mx.cpu(0)
    else:
        ctx = [mx.gpu(i) for i in range(num_gpus)]

    print("src={} dst dir={} gpu={}".format(src_file, dst_dir, num_gpus))
    s = read_clean_list(args.cleanlist)
    detector = MtcnnDetector(model_folder='model',
                             ctx=ctx,
                             num_worker=args.workers,
                             accurate_landmark=True)

    file_count = 0
    with open(src_file, "r", encoding="utf-8") as f:
        last_m_id = "x"
        for line in f:
            m_id, image_search_rank, image_url, page_url, face_id, face_rectangle, face_data = line.split(
                "\t")
            # rect = struct.unpack("ffff", base64.b64decode(face_rectangle))
            if "{}/{}".format(m_id, image_search_rank) in s:
                data = np.frombuffer(base64.b64decode(face_data),
                                     dtype=np.uint8)
                img = cv2.imdecode(data, cv2.IMREAD_COLOR)
                h, w, _ = img.shape
                if h > 128 and w > 128:
                    try:
                        # run detector
                        results = detector.detect_face(img)

                        if results is not None:
                            total_boxes = results[0]
                            points = results[1]

                            bigbox_idx = np.argmax([
                                (b[2] - b[0]) * (b[3] - b[1])
                                for b in total_boxes
                            ])

                            # extract aligned face chips
                            chips = detector.extract_image_chips(
                                img, points[bigbox_idx:bigbox_idx + 1],
                                args.size, args.padding)
                            for i, chip in enumerate(chips):

                                if last_m_id != m_id:
                                    ab = "train"
                                    # let validation set has same class label as training set
                                    # see source code of pytorch's DatasetFolder
                                else:
                                    ab = "val" if random.random(
                                    ) > split_ratio else "train"
                                dd = os.path.join(dst_dir, ab, m_id)
                                os.makedirs(dd, exist_ok=True)
                                cv2.imwrite(
                                    os.path.join(
                                        dd,
                                        "{}.png".format(image_search_rank)),
                                    chip)
                                last_m_id = m_id

                    except Exception as e:
                        print(m_id, image_search_rank, e)

                    file_count = file_count + 1
                    if file_count % 1000 == 0:
                        print(file_count)