Ejemplo n.º 1
0
def handle2x(config, args):
    # resize input
    h1, w1, scale1 = pad_to_height(config.img_size[0], args.img1_height, args.img1_width)
    h2, w2, scale2 = pad_to_height(config.img_size[0], args.img2_height, args.img2_width)

    # load trained model
    net = get_autoencoder(config)
    net.load_state_dict(torch.load(args.model_path))
    net.to(config.device)
    net.eval()

    # mean/std pose
    mean_pose, std_pose = get_meanpose(config)

    # get input
    input1 = openpose2motion(args.vid1_json_dir, scale=scale1, max_frame=args.max_length)
    input2 = openpose2motion(args.vid2_json_dir, scale=scale2, max_frame=args.max_length)
    input1 = preprocess_motion2d(input1, mean_pose, std_pose)
    input2 = preprocess_motion2d(input2, mean_pose, std_pose)
    input1 = input1.to(config.device)
    input2 = input2.to(config.device)

    # transfer by network
    out12 = net.transfer(input1, input2)
    out21 = net.transfer(input2, input1)

    # postprocessing the outputs
    input1 = postprocess_motion2d(input1, mean_pose, std_pose, w1 // 2, h1 // 2)
    input2 = postprocess_motion2d(input2, mean_pose, std_pose, w2 // 2, h2 // 2)
    out12 = postprocess_motion2d(out12, mean_pose, std_pose, w2 // 2, h2 // 2)
    out21 = postprocess_motion2d(out21, mean_pose, std_pose, w1 // 2, h1 // 2)

    if not args.disable_smooth:
        out12 = gaussian_filter1d(out12, sigma=2, axis=-1)
        out21 = gaussian_filter1d(out21, sigma=2, axis=-1)

    if args.out_dir is not None:
        save_dir = args.out_dir
        ensure_dir(save_dir)
        color1 = hex2rgb(args.color1)
        color2 = hex2rgb(args.color2)
        np.savez(os.path.join(save_dir, 'results.npz'),
                 input1=input1,
                 input2=input2,
                 out12=out12,
                 out21=out21)
        if args.render_video:
            print("Generating videos...")
            motion2video(input1, h1, w1, os.path.join(save_dir, 'input1.mp4'), color1, args.transparency,
                         fps=args.fps, save_frame=args.save_frame)
            motion2video(input2, h2, w2, os.path.join(save_dir,'input2.mp4'), color2, args.transparency,
                         fps=args.fps, save_frame=args.save_frame)
            motion2video(out12, h2, w2, os.path.join(save_dir,'out12.mp4'), color2, args.transparency,
                         fps=args.fps, save_frame=args.save_frame)
            motion2video(out21, h1, w1, os.path.join(save_dir,'out21.mp4'), color1, args.transparency,
                         fps=args.fps, save_frame=args.save_frame)
    print("Done.")
Ejemplo n.º 2
0
def motion2video(motion, h, w, save_path, colors, transparency=False, motion_tgt=None, fps=25, save_frame=False):
    nr_joints = motion.shape[0]
    videowriter = imageio.get_writer(save_path, fps=fps)
    vlen = motion.shape[-1]
    if save_frame:
        frames_dir = save_path[:-4] + '-frames'
        ensure_dir(frames_dir)
    for i in tqdm(range(vlen)):
        [img, img_cropped] = joints2image(motion[:, :, i], colors, transparency, H=h, W=w, nr_joints=nr_joints)
        if motion_tgt is not None:
            [img_tgt, img_tgt_cropped] = joints2image(motion_tgt[:, :, i], colors, transparency, H=h, W=w, nr_joints=nr_joints)
            img_ori = img.copy()
            img = cv2.addWeighted(img_tgt, 0.3, img_ori, 0.7, 0)
            img_cropped = cv2.addWeighted(img_tgt, 0.3, img_ori, 0.7, 0)
            bb = bounding_box(img_cropped)
            img_cropped = img_cropped[:, bb[2]:bb[3], :]
        if save_frame:
            save_image(img_cropped, os.path.join(frames_dir, "%04d.png" % i))
        videowriter.append_data(img)
    videowriter.close()
Ejemplo n.º 3
0
def motion_feature_extract(config, args):
    # resize input
    h1, w1, scale1 = pad_to_height(config.img_size[0], args.img1_height, args.img1_width)
    
    # load trained model
    net = get_autoencoder(config)
    net.load_state_dict(torch.load(args.model_path))
    net.to(config.device)
    net.eval()

    # mean/std pose
    mean_pose, std_pose = get_meanpose(config)

    # get input
    input1 = openpose2motion(args.vid1_json_dir, scale=scale1, max_frame=args.max_length)
    print("after motion")
    print(input1.shape)
    input1 = preprocess_motion2d(input1, mean_pose, std_pose)
    print("after preprocess")
    print(input1.shape)

    if args.out_dir is not None:
        save_dir = args.out_dir
        ensure_dir(save_dir)
    #     color1 = hex2rgb(args.color1)
    #     color2 = hex2rgb(args.color2)
        np.savez(os.path.join(save_dir, 'pose_feature.npz'), pose=input1)

    input1 = input1.to(config.device)

    # transfer by network
    # out = net.transfer_three(input1, input2, input3)
    out = net.forward(input1)
    mot = net.mot_encoder(input1)
    print(mot.shape)
    # postprocessing the outputs
    input1 = postprocess_motion2d(input1, mean_pose, std_pose, w1 // 2, h1 // 2)
    out = postprocess_motion2d(out, mean_pose, std_pose, w1 // 2, h1 // 2)
    print("after postprocess")
    print(input1.shape)

    if not args.disable_smooth:
        out = gaussian_filter1d(out, sigma=2, axis=-1)

    # if args.out_dir is not None:
    #     save_dir = args.out_dir
    #     ensure_dir(save_dir)
    # #     color1 = hex2rgb(args.color1)
    # #     color2 = hex2rgb(args.color2)
    #     np.savez(os.path.join(save_dir, 'results.npz'), pose=input1)
        #          input1=input1,
        #          input2=input2,
        #          input3=input3,
        #          out=out)
        # if args.render_video:
        #     print("Generating videos...")
        #     motion2video(input1, h1, w1, os.path.join(save_dir,'input1.mp4'), color1, args.transparency,
        #                  fps=args.fps, save_frame=args.save_frame)
            # motion2video(input2, h2, w2, os.path.join(save_dir,'input2.mp4'), color2, args.transparency,
            #              fps=args.fps, save_frame=args.save_frame)
            # motion2video(input3, h3, w3, os.path.join(save_dir,'input3.mp4'), color3, args.transparency,
            #              fps=args.fps, save_frame=args.save_frame)
            # motion2video(out, h1, w1, os.path.join(save_dir,'out.mp4'), color2, args.transparency,
            #              fps=args.fps, save_frame=args.save_frame)

    print("Done.")
Ejemplo n.º 4
0
def handle2x(config, args):
    w1 = h1 = w2 = h2 = 512

    # load trained model
    net = get_autoencoder(config)
    net.load_state_dict(torch.load(args.model_path))
    net.to(config.device)
    net.eval()

    # mean/std pose
    mean_pose, std_pose = get_meanpose(config)

    # get input
    dataloder = get_dataloader('test', config)
    input1 = dataloder.dataset.preprocessing(args.path1,
                                             args.view1).unsqueeze(0)
    input2 = dataloder.dataset.preprocessing(args.path2,
                                             args.view2).unsqueeze(0)
    input1 = input1.to(config.device)
    input2 = input2.to(config.device)

    # transfer by network
    out12 = net.transfer(input1, input2)
    out21 = net.transfer(input2, input1)

    # postprocessing the outputs
    input1 = postprocess_motion2d(input1, mean_pose, std_pose, w1 // 2,
                                  h1 // 2)
    input2 = postprocess_motion2d(input2, mean_pose, std_pose, w2 // 2,
                                  h2 // 2)
    out12 = postprocess_motion2d(out12, mean_pose, std_pose, w2 // 2, h2 // 2)
    out21 = postprocess_motion2d(out21, mean_pose, std_pose, w1 // 2, h1 // 2)

    if not args.disable_smooth:
        out12 = gaussian_filter1d(out12, sigma=2, axis=-1)
        out21 = gaussian_filter1d(out21, sigma=2, axis=-1)

    if args.out_dir is not None:
        save_dir = args.out_dir
        ensure_dir(save_dir)
        color1 = hex2rgb(args.color1)
        color2 = hex2rgb(args.color2)
        np.savez(os.path.join(save_dir, 'results.npz'),
                 input1=input1,
                 input2=input2,
                 out12=out12,
                 out21=out21)
        if args.render_video:
            print("Generating videos...")
            motion2video(input1,
                         h1,
                         w1,
                         os.path.join(save_dir, 'input1.mp4'),
                         color1,
                         args.transparency,
                         fps=args.fps,
                         save_frame=args.save_frame)
            motion2video(input2,
                         h2,
                         w2,
                         os.path.join(save_dir, 'input2.mp4'),
                         color2,
                         args.transparency,
                         fps=args.fps,
                         save_frame=args.save_frame)
            motion2video(out12,
                         h2,
                         w2,
                         os.path.join(save_dir, 'out12.mp4'),
                         color2,
                         args.transparency,
                         fps=args.fps,
                         save_frame=args.save_frame)
            motion2video(out21,
                         h1,
                         w1,
                         os.path.join(save_dir, 'out21.mp4'),
                         color1,
                         args.transparency,
                         fps=args.fps,
                         save_frame=args.save_frame)
    print("Done.")
Ejemplo n.º 5
0
    input2 = postprocess_motion2d(input2, mean_pose, std_pose, w2 // 2, h2 // 2)

    # interpolated motions [(J, 2, L), ..., (J, 2, L)]
    interp_motions = [postprocess_motion2d(out12[i:i+1, :, :], mean_pose, std_pose) for i in range(out12.shape[0])]

    # each cell's position
    if args.form == 'line':
        position = [str(i) for i in range(len(interp_motions))]
    else:
        position = [str(i // args.nr_sample) + '.' + str(i % args.nr_sample) for i in range(len(interp_motions))]

    # write output video
    out_path = args.out_path
    if out_path is not None:
        pardir = os.path.split(out_path)[0]
        ensure_dir(pardir)
        print('generating video...')
        cell_height = cell_width = args.cell_height
        color1 = hex2rgb(args.color1)
        color2 = hex2rgb(args.color2)
        vlen = min(input1.shape[-1], input2.shape[-1])

        videowriter = imageio.get_writer(out_path, fps=25)
        for i in tqdm(range(vlen)):
            img_iterps = []
            for j, motion in enumerate(interp_motions):
                if args.form == 'line':
                    color = interpolate_color(color1, color2, j / (args.nr_sample - 1))
                else:
                    color = interpolate_color(color1, color2, (j // args.nr_sample) / (args.nr_sample - 1))
                img, img_cropped = joints2image(motion[:, :, i], color, transparency=args.transparency,