Exemple #1
0
def im_test(im):
    face_info = lib.align(im[:, :, (2, 1, 0)], front_face_detector,
                          lmark_predictor)
    # Samples
    if len(face_info) == 0:
        logging.warning('No faces are detected.')
        prob = -1  # we ignore this case
    else:
        # Check how many faces in an image
        logging.info('{} faces are detected.'.format(len(face_info)))
        max_prob = -1
        # If one face is fake, the image is fake
        for _, point in face_info:
            rois = []
            for i in range(sample_num):
                roi, _ = lib.cut_head([im], point, i)
                rois.append(cv2.resize(roi[0], tuple(cfg.IMG_SIZE[:2])))
            vis_im(rois, 'tmp/vis.jpg')
            prob = solver.test(rois)
            prob = np.mean(
                np.sort(prob[:, 0])[np.round(sample_num / 2).astype(int):])
            if prob >= max_prob:
                max_prob = prob
        prob = max_prob
    return prob
Exemple #2
0
def main(args):
    # Parse config
    cfg_file = args.cfg
    with open(cfg_file, 'r') as f:
        cfg = edict(yaml.load(f))

    # Data generator
    data_gen = DataProcTest(
        face_img_dir=args.data_dir,
        cache_path=args.cache_path,
        sample_num=args.sample_num)

    with tf.Session() as sess:
        # Build network
        reso_net = ResoNet(cfg=cfg, is_train=False)
        reso_net.build()
        # Build solver
        solver = Solver(sess=sess, cfg=cfg, net=reso_net)
        solver.init()
        for i in range(data_gen.batch_num):
            data = data_gen.get_batch(i, resize=cfg.IMG_SIZE[:2])
            images = data['images']
            name_list = data['name_list']
            vis_im(images, 'tmp/vis.jpg')
            prob = solver.test(images)
            print(np.mean(prob[:, 0]))
def main(args):
    # Parse config
    cfg_file = args.cfg
    with open(cfg_file, 'r') as f:
        cfg = edict(yaml.load(f))

    # Data generator
    data_gen = DataProcTrain(
        face_img_dir=args.data_dir,
        cache_path=args.cache_path,
        anno_path=args.list,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        is_shuffle=True)

    cfg.TRAIN.DECAY_STEP = data_gen.batch_num
    epoch = cfg.TRAIN.NUM_EPOCH
    with tf.Session() as sess:
        # Build network
        reso_net = ResoNet(cfg=cfg, is_train=True)
        reso_net.build()
        # Init solver
        solver = Solver(sess=sess, cfg=cfg, net=reso_net)
        solver.init()
        # Train
        count = 0
        for epoch_id in range(epoch):
            for i in range(data_gen.batch_num):
                data = data_gen.get_batch(i, resize=cfg.IMG_SIZE[:2])
                images = data['images']
                labels = data['images_label']
                ims_tmp = vis_im(images, 'tmp/vis.jpg')

                summary, prob, net_loss, total_loss, weights = solver.train(images, labels)
                pred_labels = np.argmax(prob, axis=1)
                print('====================================')
                print('Net loss: {}'.format(net_loss))
                print('Total loss: {}'.format(total_loss))
                print('Real label: {}'.format(np.array(labels)))
                print('Pred label: {}'.format(pred_labels))
                print('Neg hard mining: {}'.format(weights))
                print('epoch: {}, batch_idx: {}'.format(epoch_id, i))
                if count % 100 == 0:
                    solver.writer.add_summary(summary, count)
                count += 1

            if epoch_id % 2 == 0:
                solver.save(epoch_id)