Esempio n. 1
0
    p.add_argument('--frame-offset',
                   help="Offset all frame numbers by this number",
                   type=int,
                   default=0)

    p.set_defaults(normalize=False)

    return p.parse_args()


#
# Setup
#

args = parse_arguments()
paths = build_paths(args)
paths.input = Path(args.input)

resize = tuple(map(
    int, args.resize.split('x'))) if args.resize is not None else None
crop = tuple(map(int, args.crop.split('x'))) if args.crop is not None else None
trim = tuple(map(float,
                 args.trim.split(':'))) if args.trim is not None else None
flip = {
    'v': 0,
    'h': 1,
    'hv': -1,
    'vh': -1
}[args.flip] if args.flip is not None else None
normalize = args.normalize
crop_center = CropCenter[args.crop_center]
Esempio n. 2
0
                    h_far) and a_close > a_far and h_close > h_far:
                success = True

            zthreshold += zthreshold_inc
        window += window_inc

    if not success:
        raise Exception(
            "Could not find a window and z-score threshold that result in expected transformations"
        )

    return a_close, h_close, a_far, h_far, window, zthreshold


args = parse_arguments()
source_paths = build_paths(args, directory_prefix='test')
target_paths = build_paths(args,
                           directory_prefix='train',
                           dataroot=args.target_dataroot)

if source_paths.norm_calculations.exists():
    print("Loading normalization calculations from {}".format(
        source_paths.norm_calculations))
    calculations = np.load(source_paths.norm_calculations)
else:
    s_ankles, s_noses, s_heights, s_images = calculate_metrics(
        source_paths, limit=args.limit)
    t_ankles, t_noses, t_heights, t_images = calculate_metrics(
        target_paths, limit=args.limit)

    s_calc = calculate_close_and_far_transformations(
Esempio n. 3
0
                   required=True,
                   type=int)
    p.add_argument('--frame-offset',
                   help='The frame offset for the two datasets',
                   required=True,
                   type=int)
    p.add_argument('--target-label-offset', required=True, type=int)
    p = Labeller.add_arguments(p)

    return p.parse_args()


print("Synthesizing face puppet")

args = parse_arguments()
paths_base = build_paths(args, directory_prefix='base')
paths_out = build_paths(args, directory_prefix='test')
create_directories(paths_out)
labeller = Labeller.build_from_arguments(args, paths_base)
labeller_t = Labeller.build_from_arguments(
    args, paths_base, label_offset=args.target_label_offset)

del labeller.face_labeller.landmarks["right_eyebrow"]
del labeller.face_labeller.landmarks["left_eyebrow"]
del labeller.face_labeller.landmarks["right_eye"]
del labeller.face_labeller.landmarks["left_eye"]

del labeller_t.face_labeller.landmarks["mouth"]
del labeller_t.face_labeller.landmarks["inner_mouth"]
del labeller_t.face_labeller.landmarks["nose"]
del labeller_t.face_labeller.landmarks["jaw"]
Esempio n. 4
0
TAU = 6.2831853071795864769252867665590057683943



def parse_arguments():
    p = argparse.ArgumentParser(description="Generate labels based on programmatic movement", fromfile_prefix_chars='@')
    p.add_argument('--dataroot', type=str)
    p.add_argument('-i', '--input', help='Path to a frame image that provides the basis for the generation', required=True)
    p.add_argument('-n', '--nframes', help='Number of frames to generate', required=True, type=int)
    p = Labeller.add_arguments(p)


    return p.parse_args()

args = parse_arguments()
paths = build_paths(args, directory_prefix='test')
create_directories(paths)
labeller = Labeller.build_from_arguments(args, paths)

image_path = Path(args.input)
finput = cv.imread(str(image_path))

pose = Pose(labeller.detect_pose(finput))
larm = np.atleast_2d(pose.larm.copy())
origin = np.atleast_2d(pose.lshoulder.copy())

for i in tqdm(range(0,args.nframes)):
    t = float(i) / float(args.nframes)

    theta = TAU * .25 * t - TAU * .125
    c, s = np.cos(theta), np.sin(theta)