def main(args): model = evaluation_util.load_model(vars(args)) chainer.serializers.load_npz(args.lift_model, model) cap = cv.VideoCapture(args.input if args.input else 0) hasFrame, frame = cap.read() if not hasFrame: exit(0) points = OpenPose(args).predict(args, frame) points = [vec for vec in points] points = [np.array(vec) for vec in points] BODY_PARTS, POSE_PAIRS = parts(args) points = np.float32(to36M(points, BODY_PARTS)) points = np.reshape(points, [1, -1]) points = projection_gan.pose.dataset.pose_dataset.pose_dataset_base.Normalization.normalize_2d( points) pose = create_pose(model, points) out_directory = "demo_out" os.makedirs(out_directory, exist_ok=True) split = 10 for i in range(split): img = evaluation_util.create_projection_img( pose, np.pi * float(2) * i / split) cv.imwrite(os.path.join(out_directory, "{:02d}.png".format(i)), img)
def main(args): model = evaluation_util.load_model(vars(args)) chainer.serializers.load_npz(args.lift_model, model) cap = cv.VideoCapture(args.input if args.input else 0) hasFrame, frame = cap.read() if not hasFrame: exit(0) points = OpenPose(args).predict(args, frame) points = [vec for vec in points] points = [np.array(vec) for vec in points] BODY_PARTS, POSE_PAIRS = parts(args) points = to36M(points, BODY_PARTS) points = np.reshape(points, [1, -1]).astype('f') points_norm = projection_gan.pose.dataset.pose_dataset.pose_dataset_base.Normalization.normalize_2d( points) pose = create_pose(model, points_norm) out_directory = "demo_out" os.makedirs(out_directory, exist_ok=True) out_img = evaluation_util.create_img(points[0], frame) cv.imwrite(os.path.join(out_directory, 'openpose_detect.jpg'), out_img) deg = 15 for d in range(0, 360 + deg, deg): img = evaluation_util.create_projection_img(pose, np.pi * d / 180.) cv.imwrite( os.path.join(out_directory, "rot_{:03d}_degree.png".format(d)), img)
def main(args): model = evaluation_util.load_model(vars(args)) chainer.serializers.load_npz(args.lift_model, model) # cap = cv.VideoCapture(args.input if args.input else 0) # 深度推定結果ディレクトリ({動画名}_json_{実行日時}_idx00) subdir = args.base_target frame3d_dir = "{0}/frame3d_gan".format(subdir) os.makedirs(frame3d_dir) #関節位置情報ファイル posf = open(subdir +'/pos_gan.txt', 'w') #正規化済みOpenpose位置情報ファイル smoothedf = open(subdir +'/smoothed_gan.txt', 'w') start_frame_index, smoothed = openpose_utils.read_openpose_json("{0}/json".format(subdir), 0) before_pose = None png_lib = [] for n, (frame, xy) in enumerate(smoothed.items()): if frame % 100 == 0: logger.info("calc idx {0}, frame {1}".format(0, frame)) logger.debug("xy") logger.debug(xy) points = [] for o in range(0,len(xy),2): points.append(np.array( [xy[o], xy[o+1]] )) logger.debug("points pre 36m") logger.debug(points) BODY_PARTS, POSE_PAIRS = parts(args) # Openpose位置情報をとりあえず出力する for poi in points: # logger.debug(poi) # logger.debug(poi.dtype) # logger.debug(poi.dtype == 'object') if poi.dtype == 'object': # logger.debug('poi is None') pass else: # logger.debug(' ' + str(poi[0]) + ' ' + str(poi[1])) smoothedf.write(' ' + str(poi[0]) + ' ' + str(poi[1])) smoothedf.write("\n") # 2d→3dに変換 points = to36M(points, BODY_PARTS) logger.debug("points after 36m") logger.debug(points) points = np.reshape(points, [1, -1]).astype('f') logger.debug("points reshape 36m") logger.debug(points) points_norm = projection_gan.pose.dataset.pose_dataset.pose_dataset_base.Normalization.normalize_2d(points) logger.debug("points_norm") logger.debug(points_norm) poses3d = create_pose(model, points_norm) logger.debug("poses3d") logger.debug(poses3d) # Plot 3d predictions subplot_idx, exidx = 1, 1 gs1 = gridspec.GridSpec(1, 1) gs1.update(wspace=-0.00, hspace=0.05) # set the spacing between axes. ax = plt.subplot(gs1[subplot_idx - 1], projection='3d') ax.view_init(18, 280) logger.debug(np.min(poses3d)) if np.min(poses3d) < -1000 and before_pose is not None: poses3d = before_pose p3d = poses3d xs = p3d[:, 0::3] ys = p3d[:, 1::3] zs = p3d[:, 2::3] # 拡大する xs *= 600 ys *= 600 zs *= 600 # 画像の出力だけYZ反転させる p3d_copy = copy.deepcopy(p3d) p3d_copy[:, 1::3] *= -1 p3d_copy[:, 2::3] *= -1 # 3D画像を出力する if level[args.verbose] <= logging.INFO: # d = 30 # img = evaluation_util.create_img_xyz(xs, ys, zs, np.pi * d / 180.) # cv.imwrite(os.path.join(frame3d_dir, "out_{0:012d}_{0:03d}_degree.png".format(n, d)), img) viz.show3Dpose(p3d_copy, ax, lcolor="#9b59b6", rcolor="#2ecc71", add_labels=True) # 各フレームの単一視点からのはINFO時のみ pngName = os.path.join(frame3d_dir, "3d_gan_{0:012d}.png".format(n)) plt.savefig(pngName) png_lib.append(imageio.imread(pngName)) before_pose = poses3d # 各フレームの角度別出力はデバッグ時のみ if level[args.verbose] == logging.DEBUG: for azim in [0, 45, 90, 135, 180, 225, 270, 315, 360]: ax2 = plt.subplot(gs1[subplot_idx - 1], projection='3d') ax2.view_init(18, azim) viz.show3Dpose(p3d, ax2, lcolor="#FF0000", rcolor="#0000FF", add_labels=True) pngName2 = os.path.join(frame3d_dir, "debug_{0:012d}_{1:03d}.png".format(n, azim)) plt.savefig(pngName2) # 3D関節位置情報を出力する for o in range(0,len(p3d[0]),3): logger.debug(str(o) + " "+ str(p3d[0][o]) +" "+ str(p3d[0][o+2]) +" "+ str(p3d[0][o+1] * -1) + ", ") posf.write(str(o) + " "+ str(p3d[0][o]) +" "+ str(p3d[0][o+2]) +" "+ str(p3d[0][o+1] * -1) + ", ") posf.write("\n") # # 各角度の出力はデバッグ時のみ # if level[args.verbose] == logging.DEBUG: # deg = 15 # for d in range(0, 360 + deg, deg): # img = evaluation_util.create_projection_img(pose, np.pi * d / 180.) # cv.imwrite(os.path.join(out_sub_dir, "rot_{0:03d}_degree.png".format(d)), img) n += 1 smoothedf.close() posf.close() # INFO時は、アニメーションGIF生成 if level[args.verbose] <= logging.INFO: logger.info("creating Gif {0}/movie_smoothing_gan.gif, please Wait!".format(subdir)) imageio.mimsave('{0}/movie_smoothing_gan.gif'.format(subdir), png_lib, fps=30)
parser.add_argument('gen_path', type=str) parser.add_argument('--row', type=int, default=6) parser.add_argument('--col', type=int, default=6) parser.add_argument('--action', '-a', type=str, default='') parser.add_argument('--image', action='store_true') args = parser.parse_args() col, row = args.col, args.row gen_path = args.gen_path with open(os.path.join(os.path.dirname(gen_path), 'options.json'), 'rb') as f: opts = json.load(f) action = args.action if args.action else opts['action'] imgs = np.zeros((350 * col, 600 * row, 3), dtype=np.uint8) gen = evaluation_util.load_model(opts) chainer.serializers.load_npz(gen_path, gen) if opts['dataset'] == 'h36m': test = H36M(action=opts['action'], length=1, train=False, use_sh_detection=opts['use_sh_detection']) elif opts['dataset'] == 'mpii': test = MPII(train=False, use_sh_detection=opts['use_sh_detection']) elif opts['dataset'] == 'mpi_inf': test = MPII3DDataset(train=False) test_iter = chainer.iterators.SerialIterator(test, batch_size=row, shuffle=True, repeat=False)
def main(args): model = evaluation_util.load_model(vars(args)) chainer.serializers.load_npz(args.lift_model, model) # cap = cv.VideoCapture(args.input if args.input else 0) out_directory = "demo_out" out_vmd_directory = "vmd_out" os.makedirs(out_vmd_directory, exist_ok=True) #Openpose位置情報ファイル smoothedf = open(os.path.join(out_vmd_directory, 'smoothed.txt'), 'w') #関節位置情報ファイル posf = open(os.path.join(out_vmd_directory, 'pos.txt'), 'w') # 動画を1枚ずつ画像に変換する n = 0 cap = cv.VideoCapture(args.input if args.input else 0) while (cap.isOpened()): # 動画から1枚キャプチャして読み込む flag, frame = cap.read() # Capture frame-by-frame # キャプチャが終わっていたら終了 if flag == False: # Is a frame left? break print("{0} ---------------------".format(n)) points = OpenPose(args).predict(args, frame) points = [vec for vec in points] points = [np.array(vec) for vec in points] BODY_PARTS, POSE_PAIRS = parts(args) print("points pre 36m") print(points) # Openpose位置情報をとりあえず出力する for poi in points: # print(poi) # print(poi.dtype) # print(poi.dtype == 'object') if poi.dtype == 'object': # print('poi is None') pass else: # print(' ' + str(poi[0]) + ' ' + str(poi[1])) smoothedf.write(' ' + str(poi[0]) + ' ' + str(poi[1])) smoothedf.write("\n") # 2d→3dに変換 points = to36M(points, BODY_PARTS) print("points after 36m") print(points) points = np.reshape(points, [1, -1]).astype('f') print("points reshape 36m") print(points) points_norm = projection_gan.pose.dataset.pose_dataset.pose_dataset_base.Normalization.normalize_2d( points) print("points_norm") print(points_norm) pose = create_pose(model, points_norm) print("pose") print(pose) # 3D関節位置情報を出力する for o in range(0, len(pose[0]), 3): print( str(o) + " " + str(pose[0][o]) + " " + str(pose[0][o + 2]) + " " + str(pose[0][o + 1] * -1) + ", ") posf.write( str(o) + " " + str(pose[0][o]) + " " + str(pose[0][o + 2]) + " " + str(pose[0][o + 1] * -1) + ", ") posf.write("\n") out_sub_dir = os.path.join(out_directory, "{0:012d}".format(n)) os.makedirs(out_sub_dir, exist_ok=True) out_img = evaluation_util.create_img(points[0], frame) cv.imwrite(os.path.join(out_sub_dir, 'openpose_detect.jpg'), out_img) deg = 15 for d in range(0, 360 + deg, deg): img = evaluation_util.create_projection_img(pose, np.pi * d / 180.) cv.imwrite( os.path.join(out_sub_dir, "rot_{0:03d}_degree.png".format(d)), img) n += 1 smoothedf.close() posf.close()
def main(args): model = evaluation_util.load_model(vars(args)) chainer.serializers.load_npz(args.lift_model, model) # cap = cv.VideoCapture(args.input if args.input else 0) out_directory = "demo_out" out_vmd_directory = "vmd_out" os.makedirs(out_vmd_directory, exist_ok=True) #Openpose位置情報ファイル smoothedf = open(os.path.join(out_vmd_directory, 'smoothed.txt'), 'w') #関節位置情報ファイル posf = open(os.path.join(out_vmd_directory, 'pos.txt'), 'w') smoothed = read_openpose_json(args.openpose_output_dir, 0) for n, (frame, xy) in enumerate(smoothed.items()): logger.info("calc idx {0}, frame {1}".format(0, frame)) print("xy") print(xy) points = [] for o in range(0, len(xy), 2): points.append(np.array([xy[o], xy[o + 1]])) print("points pre 36m") print(points) BODY_PARTS, POSE_PAIRS = parts(args) # Openpose位置情報をとりあえず出力する for poi in points: # print(poi) # print(poi.dtype) # print(poi.dtype == 'object') if poi.dtype == 'object': # print('poi is None') pass else: # print(' ' + str(poi[0]) + ' ' + str(poi[1])) smoothedf.write(' ' + str(poi[0]) + ' ' + str(poi[1])) smoothedf.write("\n") # 2d→3dに変換 points = to36M(points, BODY_PARTS) print("points after 36m") print(points) points = np.reshape(points, [1, -1]).astype('f') print("points reshape 36m") print(points) points_norm = projection_gan.pose.dataset.pose_dataset.pose_dataset_base.Normalization.normalize_2d( points) print("points_norm") print(points_norm) pose = create_pose(model, points_norm) print("pose") print(pose) # 3D関節位置情報を出力する for o in range(0, len(pose[0]), 3): print( str(o) + " " + str(pose[0][o]) + " " + str(pose[0][o + 2]) + " " + str(pose[0][o + 1] * -1) + ", ") posf.write( str(o) + " " + str(pose[0][o]) + " " + str(pose[0][o + 2]) + " " + str(pose[0][o + 1] * -1) + ", ") posf.write("\n") out_sub_dir = os.path.join(out_directory, "{0:012d}".format(n)) os.makedirs(out_sub_dir, exist_ok=True) out_img = evaluation_util.create_img(points[0], frame) cv.imwrite(os.path.join(out_sub_dir, 'openpose_detect.jpg'), out_img) deg = 15 for d in range(0, 360 + deg, deg): img = evaluation_util.create_projection_img(pose, np.pi * d / 180.) cv.imwrite( os.path.join(out_sub_dir, "rot_{0:03d}_degree.png".format(d)), img) n += 1 smoothedf.close() posf.close()