コード例 #1
0
def parse_arguments():
    p = argparse.ArgumentParser(description="Generate labels based on programmatic movement", fromfile_prefix_chars='@')
    p.add_argument('--dataroot', type=str)
    p.add_argument('-n', '--nframes', help='Number of frames to generate', required=True, type=int)
    p.add_argument('--frame-offset', help='The frame offset for the two datasets', required=True, type=int)
    p = Labeller.add_arguments(p)

    return p.parse_args()
コード例 #2
0
def parse_arguments():
    p = argparse.ArgumentParser(description="Generate labels based on programmatic movement", fromfile_prefix_chars='@')
    p.add_argument('--dataroot', type=str)
    p.add_argument('-i', '--input', help='Path to a frame image that provides the basis for the generation', required=True)
    p.add_argument('-n', '--nframes', help='Number of frames to generate', required=True, type=int)
    p = Labeller.add_arguments(p)


    return p.parse_args()
コード例 #3
0
def parse_arguments():
    p = argparse.ArgumentParser(
        description="Transfer motion from a source video to a target video",
        fromfile_prefix_chars='@')
    p.add_argument('--dataroot', type=str)
    p.add_argument('-i', '--input', help='Path to the video', required=True)
    p.add_argument(
        '--trim',
        help=
        'Decimal, colon separated seconds to trim the input video to. -1 indicates the end of the video',
        type=str,
        default='0:-1')
    p.add_argument(
        '--subsample',
        help=
        'Factor to subsample the source frames, every Nth frame will be selected',
        type=int,
        default=1)
    p.add_argument(
        '--subsample-offset',
        help=
        'Offset for subsampling the source frames, every Nth+i frame will be selected',
        type=int,
        default=0)
    p.add_argument('--resize', help='Resize source to the given size')
    p.add_argument('--crop', help='After resizing, crop to the given size')
    p.add_argument('--crop-center',
                   help='Center to use for cropping',
                   choices=[c.name for c in CropCenter],
                   default=CropCenter.body.name)
    p.add_argument('--flip',
                   help='Flip vertically, horizontally, or both',
                   choices=['v', 'h', 'vh', 'hv'])
    p.add_argument('--normalize',
                   help='Output frame data for normalization',
                   action='store_true')

    p = Labeller.add_arguments(p)

    p.add_argument(
        '--face-size',
        help='The size (squared) of faces extracted to train the face network',
        type=int,
        default=128)
    p.add_argument(
        '--directory-prefix',
        help='Image and label directory prefixes for label training',
        default='train')
    p.add_argument('--no-label', help='Disable labeling', action='store_true')
    p.add_argument(
        '--train-a',
        help="Put images in the train_A directory for non-label training",
        action='store_true')
    p.add_argument(
        '--train-b',
        help="Put images in the train_B directory for non-label training",
        action='store_true')
    p.add_argument(
        '--test-a',
        help="Put images in the test_A directory for non-label training",
        action='store_true')
    p.add_argument('--frame-offset',
                   help="Offset all frame numbers by this number",
                   type=int,
                   default=0)

    p.set_defaults(normalize=False)

    return p.parse_args()
コード例 #4
0
    'hv': -1,
    'vh': -1
}[args.flip] if args.flip is not None else None
normalize = args.normalize
crop_center = CropCenter[args.crop_center]

print("Creating directory hierarchy")
create_directories(paths)
print("Fetching models")
fetch_models(paths)

if args.no_label:
    print("Decimating video")
    labeller = None
else:
    print("Decimating video and labelling with {}".format(args.label_with))
    labeller = Labeller.build_from_arguments(args, paths)

decimate_and_label_video(paths,
                         labeller,
                         trim=trim,
                         subsample=args.subsample,
                         subsample_offset=args.subsample_offset,
                         resize=resize,
                         crop=crop,
                         crop_center=crop_center,
                         flip=flip,
                         normalize=normalize,
                         frame_offset=args.frame_offset,
                         face_size=args.face_size)
コード例 #5
0
                   help='The frame offset for the two datasets',
                   required=True,
                   type=int)
    p.add_argument('--target-label-offset', required=True, type=int)
    p = Labeller.add_arguments(p)

    return p.parse_args()


print("Synthesizing face puppet")

args = parse_arguments()
paths_base = build_paths(args, directory_prefix='base')
paths_out = build_paths(args, directory_prefix='test')
create_directories(paths_out)
labeller = Labeller.build_from_arguments(args, paths_base)
labeller_t = Labeller.build_from_arguments(
    args, paths_base, label_offset=args.target_label_offset)

del labeller.face_labeller.landmarks["right_eyebrow"]
del labeller.face_labeller.landmarks["left_eyebrow"]
del labeller.face_labeller.landmarks["right_eye"]
del labeller.face_labeller.landmarks["left_eye"]

del labeller_t.face_labeller.landmarks["mouth"]
del labeller_t.face_labeller.landmarks["inner_mouth"]
del labeller_t.face_labeller.landmarks["nose"]
del labeller_t.face_labeller.landmarks["jaw"]

base_image_fns = sorted(os.listdir(paths_base.img_dir))
コード例 #6
0
def parse_arguments():
    p = argparse.ArgumentParser(description="Generate labels based on programmatic movement", fromfile_prefix_chars='@')
    p.add_argument('--dataroot', type=str)
    p.add_argument('-n', '--nframes', help='Number of frames to generate', required=True, type=int)
    p.add_argument('--frame-offset', help='The frame offset for the two datasets', required=True, type=int)
    p = Labeller.add_arguments(p)

    return p.parse_args()

print("Synthesizing half and half faces")

args = parse_arguments()
paths_base = build_paths(args, directory_prefix='base')
paths_out = build_paths(args, directory_prefix='test')
create_directories(paths_out)
labeller_a = Labeller.build_from_arguments(args, paths_base, label_offset=0)
labeller_b = Labeller.build_from_arguments(args, paths_base)

base_image_fns = sorted(os.listdir(paths_base.img_dir))

for i in tqdm(range(0,args.nframes)):
    t = float(i) / float(args.nframes)
    paths = data_paths_for_idx(paths_out, i)

    if paths.label.exists():
        continue

    path_base_image_a = paths_base.img_dir / base_image_fns[i]
    path_base_image_b = paths_base.img_dir / base_image_fns[i + args.frame_offset]

    base_image_a = cv.imread(str(path_base_image_a))