示例#1
0
def main_wflwe70():
    args = parse_args()
    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    dataset_type = get_dataset(config)
    dataset = dataset_type(config, is_train=True)

    for i in range(len(dataset)):
        # ipdb.set_trace()
        img, fname, meta = dataset[i]
        filename = osp.join('data/wflwe70/xximages', fname)
        if not osp.exists(osp.dirname(filename)):
            os.makedirs(osp.dirname(filename))
        scale = meta['scale']
        center = meta['center']
        tpts = meta['tpts']

        for spt in tpts:
            img = cv2.circle(img, (4 * spt[0], 4 * spt[1]),
                             1 + center[0] // 400, (255, 0, 0))
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        cv2.imwrite(filename, img)
def get_model_by_name(model_name,
                      root_models_path='hrnetv2_models',
                      prefix='HR18-',
                      model_type='landmarks',
                      device='cuda'):

    checkpoint_path = f'{root_models_path}/{prefix}{model_name}.pth'
    config_path = f'{root_models_path}/{prefix}{model_name}.yaml'

    merge_configs(config, config_path)

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()

    if model_type == 'landmarks':
        model = get_face_alignment_net(config)
    else:
        model = get_cls_net(config_imagenet)

    model.load_state_dict(torch.load(checkpoint_path))
    model.eval()
    model.to(device)

    return model
示例#3
0
def main_cofw():
    args = parse_args()
    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    ipdb.set_trace()
    dataset_type = get_dataset(config)
    dataset = dataset_type(config, is_train=True)

    fp = open('data/cofw/test.csv', 'w')
    for i in range(len(dataset)):
        # ipdb.set_trace()
        img, image_path, meta = dataset[i]
        fname = osp.join('data/cofw/test', osp.basename(image_path))
        fp.write('%s,1,128,128' % fname)
        tpts = meta['tpts']
        for j in range(tpts.shape[0]):
            fp.write(',%d,%d' % (tpts[j, 0], tpts[j, 1]))
        fp.write('\n')
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        cv2.imwrite(fname, img)
    fp.close()
示例#4
0
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    gpus = list(config.GPUS)
    model = nn.DataParallel(model, device_ids=gpus).cuda()

    # load model
    state_dict = torch.load(args.model_file)
    if 'state_dict' in state_dict.keys():
        state_dict = state_dict['state_dict']
        model.load_state_dict(state_dict)
    else:
        model.module.load_state_dict(state_dict)

    dataset_type = get_dataset(config)

    test_loader = DataLoader(dataset=dataset_type(config, is_train=False),
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    nme, predictions = function.inference(config, test_loader, model)
    with open('../data/crabs/crabs_data_test.csv', 'r') as f:
        data = np.loadtxt(f, str, delimiter=",", skiprows=1)
    paths = data[:, 0]
    for index, path in enumerate(paths):
        img = cv2.imread("../data/crabs/images/{}".format(path))
        a = predictions[index]
        b = a.numpy()

        for index, px in enumerate(b):
            # print(tuple(px))
            cv2.circle(img, tuple(px), 1, (0, 0, 255), 3, 8, 0)

        # cv2.imwrite("/home/njtech/Jiannan/crabs/dataset/result_new/{}".format(path.split('/')[-1]), img)

        cv2.imshow("img", img)

        cv2.waitKey(1000) & 0xFF

    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)
    if args.onnx_export:
        torch_out = torch.onnx._export(model,
                                       torch.rand(1, 3, config.IMAGE_SIZE),
                                       osp.join(final_output_dir,
                                                args.onnx_export),
                                       export_params=True)
        return

    gpus = list(config.GPUS)
    if gpus[0] > -1:
        model = nn.DataParallel(model, device_ids=gpus).cuda()

    # load model
    if gpus[0] > -1:
        state_dict = torch.load(args.model_file)
    else:
        state_dict = torch.load(args.model_file, map_location='cpu')
    if 'state_dict' in state_dict.keys():
        state_dict = state_dict['state_dict']
        model.load_state_dict(state_dict)
    else:
        if gpus[0] > -1:
            model.module.load_state_dict(state_dict)
        else:
            model.load_state_dict(state_dict)

    dataset_type = get_dataset(config)
    dataset = dataset_type(config, is_train=False)

    test_loader = DataLoader(dataset=dataset,
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    ipdb.set_trace()
    nme, predictions = function.inference(config, test_loader, model)

    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    # model = models.get_face_alignment_net(config)
    model = eval('models.' + config.MODEL.NAME + '.get_face_alignment_net')(
        config, is_train=True)

    gpus = list(config.GPUS)
    model = nn.DataParallel(model, device_ids=gpus).cuda()

    # load model
    # state_dict = torch.load(args.model_file)
    # if 'state_dict' in state_dict.keys():
    #     state_dict = state_dict['state_dict']
    #     model.load_state_dict(state_dict)
    # else:
    #     model.module.load_state_dict(state_dict)

    if args.model_file:
        logger.info('=> loading model from {}'.format(args.model_file))
        # model.load_state_dict(torch.load(args.model_file), strict=False)

        model_state = torch.load(args.model_file)
        model.module.load_state_dict(model_state.state_dict())
    else:
        model_state_file = os.path.join(final_output_dir, 'final_state.pth')
        logger.info('=> loading model from {}'.format(model_state_file))
        model_state = torch.load(model_state_file)
        model.module.load_state_dict(model_state)

    dataset_type = get_dataset(config)

    test_loader = DataLoader(dataset=dataset_type(config, is_train=False),
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    nme, predictions = function.inference(config, test_loader, model)

    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    gpus = list(config.GPUS)
    model = nn.DataParallel(model, device_ids=gpus).cuda()

    # load model
    #state_dict = torch.load(args.model_file)
    #if 'state_dict' in state_dict.keys():
    #    state_dict = state_dict['state_dict']
    #    model.load_state_dict(state_dict)
    #else:
    #    model.module.load_state_dict(state_dict)

    model = torch.load(args.model_file)
    model.eval()
    model = nn.DataParallel(model, device_ids=gpus).cuda()

    dataset_type = get_dataset(config)

    test_loader = DataLoader(dataset=dataset_type(config, is_train=False),
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    nme, predictions = function.inference(config, test_loader, model)

    import cv2
    img = cv2.imread('data/wflw/images/my3.jpg')
    print(predictions, predictions.shape)
    for item in predictions[0]:
        cv2.circle(img, (item[0], item[1]), 3, (0, 0, 255), -1)
    cv2.imwrite('out.png', img)

    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
示例#8
0
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    gpus = list(config.GPUS)
    if torch.cuda.is_available():
        model = nn.DataParallel(model, device_ids=gpus).cuda()

        # load model
        state_dict = torch.load(args.model_file)
    else:
        # model = nn.DataParallel(model)
        state_dict = torch.load(args.model_file,
                                map_location=lambda storage, loc: storage)

    if 'state_dict' in state_dict.keys():
        state_dict = state_dict['state_dict']
        model.load_state_dict(state_dict)
    else:
        try:
            model.module.load_state_dict(state_dict.state_dict())
        except AttributeError:
            state_dict  ## remove first seven
            model.load_state_dict(state_dict)

    dataset_type = get_dataset(config)

    test_loader = DataLoader(dataset=dataset_type(config, is_train=False),
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    #nme, predictions = function.inference(config, test_loader, model, args.model_file) #### testing
    function.test(config, test_loader, model, args.model_file)  #### testing
示例#9
0
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()

    model = torchvision.models.resnet101(pretrained=config.MODEL.PRETRAINED,
                                         progress=True)
    num_ftrs = model.fc.in_features
    model.fc = torch.nn.Linear(num_ftrs, config.MODEL.OUTPUT_SIZE[0])

    gpus = list(config.GPUS)
    model = nn.DataParallel(model, device_ids=gpus).cuda()

    # load model
    state_dict = torch.load(args.model_file)
    if 'state_dict' in state_dict.keys():
        state_dict = state_dict['state_dict']
        model.load_state_dict(state_dict)
    else:
        model.module.load_state_dict(state_dict)

    dataset_type = get_dataset(config)

    test_loader = DataLoader(dataset=dataset_type(config, is_train=False),
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    predictions = function.inference(config, test_loader, model)

    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
示例#10
0
def main_300w():
    args = parse_args()
    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    dataset_type = get_dataset(config)
    dataset = dataset_type(config, is_train=False)

    fp = open('data/300w/face_landmarks70_300w_test.csv', 'w')
    for i in range(len(dataset)):
        # ipdb.set_trace()
        img, fname, meta = dataset[i]
        filename = osp.join('data/300w/xximages', fname)
        if not osp.exists(osp.dirname(filename)):
            os.makedirs(osp.dirname(filename))
        scale = meta['scale']
        center = meta['center']
        tpts = meta['tpts']

        selpts = []
        for j in range(0, 68):
            selpts.append(tpts[j])
        selpts.append(tpts[36:42].mean(0))
        selpts.append(tpts[42:48].mean(0))

        fp.write('%s,%.2f,%.1f,%.1f' % (fname, scale, center[0], center[1]))
        for spt in selpts:
            img = cv2.circle(img, (spt[0], spt[1]), 1 + center[0] // 400,
                             (255, 0, 0))
            fp.write(',%f,%f' % (spt[0], spt[1]))
        fp.write('\n')
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        cv2.imwrite(filename, img)
    fp.close()
示例#11
0
    def __init__(self, config=config):

        config.defrost()
        config.merge_from_file(
            "ext/HRNet-Facial-Landmark-Detection/experiments/wflw/face_alignment_wflw_hrnet_w18.yaml"
        )
        config.freeze()

        cudnn.benchmark = config.CUDNN.BENCHMARK
        cudnn.determinstic = config.CUDNN.DETERMINISTIC
        cudnn.enabled = config.CUDNN.ENABLED

        config.defrost()
        config.MODEL.INIT_WEIGHTS = False
        config.freeze()

        self.model = models.get_face_alignment_net(config)
        state_dict = torch.load(
            "ext/HRNet-Facial-Landmark-Detection/hrnetv2_pretrained/HR18-WFLW.pth"
        )
        self.model.load_state_dict(state_dict, strict=False)

        gpus = list(config.GPUS)
        self.model = nn.DataParallel(self.model, device_ids=gpus).cuda()
示例#12
0
def main():
    # Step 1: load model
    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    gpus = list(config.GPUS)
    model = nn.DataParallel(model, device_ids=gpus).cuda()

    # load model
    state_dict = torch.load(args.model_file)
    if 'state_dict' in state_dict.keys():
        state_dict = state_dict['state_dict']
        model.load_state_dict(state_dict)
    else:
        model.module.load_state_dict(state_dict)

    # Step 2: detect face and predict landmark
    transform = transforms.Compose([transforms.ToTensor()])
    cap = cv2.VideoCapture(0)
    while True:
        ret, img = cap.read()
        if not ret: break

        height, width = img.shape[:2]

        bounding_boxes, landmarks = detect_faces(img)
        dataset = get_preprocess(config)
        # print('--------bboxes: ', bounding_boxes)
        for box in bounding_boxes:
            # x1, y1, x2, y2, _ = list(map(int, box))
            score = box[4]
            x1, y1, x2, y2 = (box[:4] + 0.5).astype(np.int32)
            w = x2 - x1 + 1
            h = y2 - y1 + 1

            size = int(max([w, h]) * 1.3)
            cx = x1 + w // 2
            cy = y1 + h // 2
            x1 = cx - size // 2
            x2 = x1 + size
            y1 = cy - size // 2
            y2 = y1 + size

            dx = max(0, -x1)
            dy = max(0, -y1)
            x1 = max(0, x1)
            y1 = max(0, y1)

            edx = max(0, x2 - width)
            edy = max(0, y2 - height)
            x2 = min(width, x2)
            y2 = min(height, y2)

            cropped = img[y1:y2, x1:x2]
            if (dx > 0 or dy > 0 or edx > 0 or edy > 0):
                cropped = cv2.copyMakeBorder(cropped, dy, edy, dx, edx,
                                             cv2.BORDER_CONSTANT, 0)

            # center_w = (x1+x2)/2
            # center_h = (y1+y2)/2
            # center = torch.Tensor([center_w, center_h])
            # input = img[y1:y2, x1:x2, :]
            # input = dataset._preprocessing(dataset, img=img, center=center, scale=1.0)
            landmarks = get_lmks_by_img(model, cropped)
            img = cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
            for (x, y) in landmarks.astype(np.int32):
                cv2.circle(img, (x1 + x, y1 + y), 2, (255, 255, 255))

        cv2.imshow('0', img)
        if cv2.waitKey(10) == 27:
            break
def main():
    #
    args = parse_args()
    #
    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')
    #
    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))
    #
    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED
    #
    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)
    #
    gpus = list(config.GPUS)
    #
    # # load model
    state_dict = torch.load(args.model_file)
    if 'state_dict' in state_dict.keys():
        state_dict = state_dict['state_dict']
        model.load_state_dict(state_dict)
    else:
        model.module.load_state_dict(state_dict)
    model = nn.DataParallel(model, device_ids=gpus).cuda()
    #
    dataset_type = get_dataset(config)

    test_loader = DataLoader(dataset=dataset_type(config, is_train=False),
                             batch_size=config.TEST.BATCH_SIZE_PER_GPU *
                             len(gpus),
                             shuffle=False,
                             num_workers=config.WORKERS,
                             pin_memory=config.PIN_MEMORY)

    nme, predictions = function.inference(config, test_loader, model)
    torch.save(predictions, os.path.join(final_output_dir, 'predictions.pth'))
    target = test_loader.dataset.load_all_pts()
    pred = 16 * predictions
    l = len(pred)
    res = 0.0
    res_tmp = [0.0 for i in range(config.MODEL.NUM_JOINTS)]

    res_tmp = np.array(res_tmp)
    res_temp_x = target - pred
    res_temp_x = res_temp_x[:, :, 0]
    res_temp_y = target - pred
    res_temp_y = res_temp_y[:, :, 1]

    # csv_file_test_x = pd.DataFrame(np.transpose(np.array(pred[:, :, 0])), columns=test_loader.dataset.annotation_files)
    # csv_file_test_y = pd.DataFrame(np.transpose(np.array(pred[:, :, 1])), columns=test_loader.dataset.annotation_files)
    # csv_file_target_x = pd.DataFrame(np.transpose(np.array(target[:, :, 0])), columns=test_loader.dataset.annotation_files)
    # csv_file_target_y = pd.DataFrame(np.transpose(np.array(target[:, :, 1])), columns=test_loader.dataset.annotation_files)

    for i in range(l):
        trans = np.sqrt(
            pow(target[i][0][0] - target[i][1][0], 2) +
            pow(target[i][0][1] - target[i][1][1], 2)) / 30.0
        res_temp_x[i] = res_temp_x[i] / trans
        res_temp_y[i] = res_temp_y[i] / trans
        for j in range(len(target[i])):
            dist = np.sqrt(
                np.power((target[i][j][0] - pred[i][j][0]), 2) +
                np.power((target[i][j][1] - pred[i][j][1]), 2)) / trans
            res += dist
            res_tmp[j] += dist
    res_t = np.sqrt(res_temp_x * res_temp_x + res_temp_y * res_temp_y)
    # pd.DataFrame(data=res_temp_x.data.value).to_csv('res_x')
    # pd.DataFrame(data=res_temp_y.data.value).to_csv('res_y')
    # pd.DataFrame(data=res_t.data.value).to_csv('res_t')
    res_tmp /= np.float(len(pred))
    print(res_tmp)
    print(np.mean(res_tmp))
    res /= (len(pred) * len(pred[0]))
    print(res)
示例#14
0
def main():

    args = parse_args()

    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    # cudnn.benchmark = config.CUDNN.BENCHMARK
    # cudnn.determinstic = config.CUDNN.DETERMINISTIC
    # cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    gpus = list(config.GPUS)
    # model = nn.DataParallel(model, device_ids=gpus).cuda()
    model.to("cuda")
    # print(model)
    # load model
    # state_dict = torch.load(args.model_file)
    # print(state_dict)
    # model = torch.load(args.model_file)
    with open(args.model_file, "rb") as fp:
        state_dict = torch.load(fp)
        model.load_state_dict(state_dict)
    # model.load_state_dict(state_dict['state_dict'])
    # if 'state_dict' in state_dict.keys():
    #     state_dict = state_dict['state_dict']
    #     # print(state_dict)
    #     model.load_state_dict(state_dict)
    # else:
    #     model.module.load_state_dict(state_dict)

    dataset_type = get_dataset(config)

    test_loader = DataLoader(
        dataset=dataset_type(config,
                             is_train=False),
        batch_size=config.TEST.BATCH_SIZE_PER_GPU*len(gpus),
        shuffle=False,
        num_workers=config.WORKERS,
        pin_memory=config.PIN_MEMORY
    )

    predictions  = function.inference(config, test_loader, model)
    # print("len(predictions)", len(predictions))
    # print(predictions[0])
    df_predictions = []
    for pred in predictions:
        row = dict()
        row['file_name'] = pred[0]
        for id_point in range(194):
            row[f'Point_M{id_point}_X'] = int(pred[1][id_point])
            row[f'Point_M{id_point}_Y'] = int(pred[2][id_point])
        df_predictions.append(row)
    df_predictions = pd.DataFrame(df_predictions)
    # print(predictions_meta[0])
    df_predictions.to_csv('pred_test.csv', index=False)
示例#15
0
def main_wflw():
    args = parse_args()
    logger, final_output_dir, tb_log_dir = \
        utils.create_logger(config, args.cfg, 'test')

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.determinstic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED

    config.defrost()
    config.MODEL.INIT_WEIGHTS = False
    config.freeze()
    model = models.get_face_alignment_net(config)

    dataset_type = get_dataset(config)
    dataset = dataset_type(config, is_train=True)

    fp = open('data/wflw/face_landmarks70_wflw_train.csv', 'w')
    for i in range(len(dataset)):
        img, image_path, meta = dataset[i]
        fold, name = image_path.split('/')[-2], image_path.split('/')[-1]
        folder = osp.join('data/wflw/xximages', fold)
        if not osp.exists(folder):
            os.makedirs(folder)
        fname = osp.join(folder, name)
        scale = meta['scale']
        center = meta['center']
        tpts = meta['tpts']

        selpts = []
        for j in range(0, 33, 2):
            selpts.append(tpts[j])
        # eyebow
        selpts.append(tpts[33])
        selpts.append((tpts[34] + tpts[41]) / 2)
        selpts.append((tpts[35] + tpts[40]) / 2)
        selpts.append((tpts[36] + tpts[39]) / 2)
        selpts.append((tpts[37] + tpts[38]) / 2)
        selpts.append((tpts[42] + tpts[50]) / 2)
        selpts.append((tpts[43] + tpts[49]) / 2)
        selpts.append((tpts[44] + tpts[48]) / 2)
        selpts.append((tpts[45] + tpts[47]) / 2)
        selpts.append(tpts[46])
        # nose
        for j in range(51, 60):
            selpts.append(tpts[j])
        # eye
        selpts.append(tpts[60])
        selpts.append((tpts[61] + tpts[62]) / 2)
        selpts.append(tpts[63])
        selpts.append(tpts[64])
        selpts.append(tpts[65])
        selpts.append((tpts[66] + tpts[67]) / 2)
        selpts.append(tpts[68])
        selpts.append(tpts[69])
        selpts.append((tpts[70] + tpts[71]) / 2)
        selpts.append(tpts[72])
        selpts.append((tpts[73] + tpts[74]) / 2)
        selpts.append(tpts[75])
        for j in range(76, 98):
            selpts.append(tpts[j])

        fp.write('%s,%.2f,%.1f,%.1f' %
                 (osp.join(fold, name), scale, center[0], center[1]))
        for spt in selpts:
            cv2.circle(img, (spt[0], spt[1]), 1, (0, 0, 255))
            fp.write(',%f,%f' % (spt[0], spt[1]))
        fp.write('\n')
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        cv2.imwrite(fname, img)
    fp.close()