Example #1
0
def main(args):
    # Parse config
    cfg_file = args.cfg
    with open(cfg_file, 'r') as f:
        cfg = edict(yaml.load(f))

    # Data generator
    data_gen = DataProcTest(
        face_img_dir=args.data_dir,
        cache_path=args.cache_path,
        sample_num=args.sample_num)

    with tf.Session() as sess:
        # Build network
        reso_net = ResoNet(cfg=cfg, is_train=False)
        reso_net.build()
        # Build solver
        solver = Solver(sess=sess, cfg=cfg, net=reso_net)
        solver.init()
        for i in range(data_gen.batch_num):
            data = data_gen.get_batch(i, resize=cfg.IMG_SIZE[:2])
            images = data['images']
            name_list = data['name_list']
            vis_im(images, 'tmp/vis.jpg')
            prob = solver.test(images)
            print(np.mean(prob[:, 0]))
def main(args):
    # Parse config
    cfg_file = args.cfg
    with open(cfg_file, 'r') as f:
        cfg = edict(yaml.load(f))

    # Data generator
    data_gen = DataProcTrain(
        face_img_dir=args.data_dir,
        cache_path=args.cache_path,
        anno_path=args.list,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        is_shuffle=True)

    cfg.TRAIN.DECAY_STEP = data_gen.batch_num
    epoch = cfg.TRAIN.NUM_EPOCH
    with tf.Session() as sess:
        # Build network
        reso_net = ResoNet(cfg=cfg, is_train=True)
        reso_net.build()
        # Init solver
        solver = Solver(sess=sess, cfg=cfg, net=reso_net)
        solver.init()
        # Train
        count = 0
        for epoch_id in range(epoch):
            for i in range(data_gen.batch_num):
                data = data_gen.get_batch(i, resize=cfg.IMG_SIZE[:2])
                images = data['images']
                labels = data['images_label']
                ims_tmp = vis_im(images, 'tmp/vis.jpg')

                summary, prob, net_loss, total_loss, weights = solver.train(images, labels)
                pred_labels = np.argmax(prob, axis=1)
                print('====================================')
                print('Net loss: {}'.format(net_loss))
                print('Total loss: {}'.format(total_loss))
                print('Real label: {}'.format(np.array(labels)))
                print('Pred label: {}'.format(pred_labels))
                print('Neg hard mining: {}'.format(weights))
                print('epoch: {}, batch_idx: {}'.format(epoch_id, i))
                if count % 100 == 0:
                    solver.writer.add_summary(summary, count)
                count += 1

            if epoch_id % 2 == 0:
                solver.save(epoch_id)
Example #3
0
sample_num = 10

# Employ dlib to extract face area and landmark points
pwd = os.path.dirname(__file__)
# Fix to get it running on colab
cwd = os.getcwd()
front_face_detector = dlib.get_frontal_face_detector()
lmark_predictor = dlib.shape_predictor(
    cwd + '/dlib_model/shape_predictor_68_face_landmarks.dat')

tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
# init session
sess = tf.Session(config=tfconfig)
# Build network
reso_net = ResoNet(cfg=cfg, is_train=False)
reso_net.build()
# Build solver
solver = Solver(sess=sess, cfg=cfg, net=reso_net)
solver.init()


def im_test(im):
    face_info = lib.align(im[:, :, (2, 1, 0)], front_face_detector,
                          lmark_predictor)
    # Samples
    if len(face_info) == 0:
        logging.warning('No faces are detected.')
        prob = -1  # we ignore this case
    else:
        # Check how many faces in an image