コード例 #1
0
ファイル: estimator.py プロジェクト: liuguoyou/VNect
 def __init__(self):
     print('Initializing VNectEstimator...')
     # the ratio factors to scale the input image crops, no more than 1.0
     self.scales = [1]  # or [1, 0.7] to be consistent with the author when training
     # initialize one euro filters for all the joints
     config_2d = {
         'freq': 120,
         'mincutoff': 1.7,
         'beta': 0.3,
         'dcutoff': 1.0
     }
     config_3d = {
         'freq': 120,
         'mincutoff': 0.8,
         'beta': 0.4,
         'dcutoff': 1.0
     }
     self.filter_2d = [(OneEuroFilter(**config_2d), OneEuroFilter(**config_2d)) for _ in range(self._joints_num)]
     self.filter_3d = [(OneEuroFilter(**config_3d), OneEuroFilter(**config_3d), OneEuroFilter(**config_3d))
                       for _ in range(self._joints_num)]
     # load pretrained VNect model
     self.sess = tf.Session()
     saver = tf.train.import_meta_graph('../models/tf_model/vnect_tf.meta' if os.getcwd().endswith('src') else
                                        './models/tf_model/vnect_tf.meta')
     saver.restore(self.sess, tf.train.latest_checkpoint('../models/tf_model/'if os.getcwd().endswith('src') else
                                                         './models/tf_model/'))
     graph = tf.get_default_graph()
     self.input_crops = graph.get_tensor_by_name('Placeholder:0')
     self.heatmap = graph.get_tensor_by_name('split_2:0')
     self.x_heatmap = graph.get_tensor_by_name('split_2:1')
     self.y_heatmap = graph.get_tensor_by_name('split_2:2')
     self.z_heatmap = graph.get_tensor_by_name('split_2:3')
     print('VNectEstimator initialized.')
コード例 #2
0
    def __init__(self, plot=True, T=False):
        print('Initializing VnectEstimator...')
        # whether plot 2d and 3d animation
        self.plot = plot
        # whether apply transposed matrix (when camera is flipped)
        self.T = T
        # the ratio factors to scale the input image crops, no more than 1.0
        self.scales = [
            1
        ]  # or [1, 0.7] to be consistent with the author when training
        # initialize one euro filters for all the joints
        config_2d = {
            'freq': 120,
            'mincutoff': 1.7,
            'beta': 0.3,
            'dcutoff': 1.0
        }
        config_3d = {
            'freq': 120,
            'mincutoff': 0.8,
            'beta': 0.4,
            'dcutoff': 1.0
        }
        self.filter_2d = [(OneEuroFilter(**config_2d),
                           OneEuroFilter(**config_2d))
                          for _ in range(self._joints_num)]
        self.filter_3d = [(OneEuroFilter(**config_3d),
                           OneEuroFilter(**config_3d),
                           OneEuroFilter(**config_3d))
                          for _ in range(self._joints_num)]
        # load pretrained VNect model
        self.sess = tf.Session()
        saver = tf.train.import_meta_graph(
            '../models/tf_model/vnect_tf.meta' if os.getcwd(
            ).endswith('src') else './models/tf_model/vnect_tf.meta')
        saver.restore(
            self.sess,
            tf.train.latest_checkpoint('../models/tf_model/' if os.getcwd(
            ).endswith('src') else './models/tf_model/'))
        graph = tf.get_default_graph()
        self.input_crops = graph.get_tensor_by_name('Placeholder:0')
        self.heatmap = graph.get_tensor_by_name('split_2:0')
        self.x_heatmap = graph.get_tensor_by_name('split_2:1')
        self.y_heatmap = graph.get_tensor_by_name('split_2:2')
        self.z_heatmap = graph.get_tensor_by_name('split_2:3')

        if self.plot:
            self.ax_3d = plt.axes(projection='3d')
            plt.ion()
            self.ax_3d.clear()
            plt.show()
        print('Initialization done.')
コード例 #3
0
def filter_batch(pts, filter_indices=None, config=None, freq=None):
    assert (pts.shape[-1] == 2 or pts.shape[-1] == 3)
    if filter_indices is None:
        filter_indices = np.arange(skeleton.num_joints)
    if config is None:
        config = {
            'freq': 100,  # Hz
            'mincutoff': 0.1,  # FIXME
            'beta': 2.0,  # FIXME
            'dcutoff': 1.0  # this one should be ok
        }
    if freq is not None:
        config['freq'] = freq

    f = [[OneEuroFilter(**config) for j in range(pts.shape[-1])]
         for i in range(skeleton.num_joints)]
    timestamp = 0.0  # seconds
    pts_after = np.zeros_like(pts)
    for i in range(pts.shape[0]):
        for j in range(pts.shape[1]):
            if j in filter_indices:
                pts_after[i, j, 0] = f[j][0](pts[i, j, 0], i * 0.1)
                pts_after[i, j, 1] = f[j][1](pts[i, j, 1], i * 0.1)
                pts_after[i, j, 2] = f[j][2](pts[i, j, 2], i * 0.1)
            else:
                pts_after[i, j] = pts[i, j]

    return pts_after
コード例 #4
0
ファイル: estimator.py プロジェクト: ipa-rar/VNect
    def __init__(self):
        print('Initializing VNect Estimator...')
        # the scale factors to zoom down the input image crops
        # put different scales to get better average performance
        # for faster loops, use less scales e.g. [1], [1, 0.7]
        self.scales = [1, 0.85, 0.7]
        # initializing one euro filters for all the joints
        filter_config_2d = {
            'freq': 30,  # system frequency about 30 Hz
            'mincutoff': 1.7,  # value refer to the paper
            'beta': 0.3,  # value refer to the paper
            'dcutoff': 0.4  # not mentioned, empirically set
        }
        filter_config_3d = {
            'freq': 30,  # system frequency about 30 Hz
            'mincutoff': 0.8,  # value refer to the paper
            'beta': 0.4,  # value refer to the paper
            'dcutoff': 0.4  # not mentioned, empirically set
        }
        self.filter_2d = [(OneEuroFilter(**filter_config_2d),
                           OneEuroFilter(**filter_config_2d))
                          for _ in range(self.joints_sum)]
        self.filter_3d = [(OneEuroFilter(**filter_config_3d),
                           OneEuroFilter(**filter_config_3d),
                           OneEuroFilter(**filter_config_3d))
                          for _ in range(self.joints_sum)]
        # load pretrained VNect model
        self.sess = tf.Session()
        if os.getcwd().endswith('src'):
            saver = tf.train.import_meta_graph(
                '../models/tf_model/vnect_tf.meta')
            saver.restore(self.sess,
                          tf.train.latest_checkpoint('../models/tf_model/'))
        else:
            saver = tf.train.import_meta_graph(
                './models/tf_model/vnect_tf.meta')
            saver.restore(self.sess,
                          tf.train.latest_checkpoint('./models/tf_model/'))
        graph = tf.get_default_graph()
        self.input_crops = graph.get_tensor_by_name('Placeholder:0')
        self.heatmap = graph.get_tensor_by_name('split_2:0')
        self.x_heatmap = graph.get_tensor_by_name('split_2:1')
        self.y_heatmap = graph.get_tensor_by_name('split_2:2')
        self.z_heatmap = graph.get_tensor_by_name('split_2:3')

        print('VNect Estimator initialized.')
コード例 #5
0
 def set_up_filtering(self):
     ### Initialize filters if not already done
     if self.init_filters_ == False:
         self.translation_filters_ = [[
             OneEuroFilter(self.run_frequency_, self.filter_mincutoff_,
                           self.filter_beta_, self.filter_dcutoff_),
             OneEuroFilter(self.run_frequency_, self.filter_mincutoff_,
                           self.filter_beta_, self.filter_dcutoff_),
             OneEuroFilter(self.run_frequency_, self.filter_mincutoff_,
                           self.filter_beta_, self.filter_dcutoff_)
         ]] * len(self.frame_names_)
         self.translation_timestamps_ = [[0.0, 0.0, 0.0]] * len(
             self.frame_names_)
         self.transforms_merged_ = [Transform()] * len(self.frame_names_)
         self.confs_merged_ = [0] * len(self.frame_names_)
         self.translations_merged_ = [Vector3(0.0, 0.0, 0.0)] * len(
             self.frame_names_)
         self.translations_merged_mm_ = [Vector3(0.0, 0.0, 0.0)] * len(
             self.frame_names_)
         self.translations_merged_body_mm_ = [Vector3(0.0, 0.0, 0.0)] * len(
             self.frame_names_)
         self.merged_transform_exists_ = [False] * len(self.frame_names_)
         self.init_filters_ = True
コード例 #6
0
    def __init__(self, filter=True):
        print('Initializing Joints2Angles...')
        self.filter = filter

        if self.filter:
            # filter configuration
            config_filter = {
                'freq': 120,
                'mincutoff': 0.5,
                'beta': 0.5,
                'dcutoff': 1.0
            }
            self.filter_angles = [OneEuroFilter(**config_filter) for _ in range(8)]
        print('Joints2Angles initialized.')
コード例 #7
0
def filter_poses(poses, fcmin=0.05, beta=0.4, freq=1):
    config = {
        'freq': freq,  # Hz
        'mincutoff': fcmin,
        'beta': beta,
        'dcutoff': 1.0
    }

    poses_filtered = 0 * poses
    for j in range(poses.shape[1]):
        for i in range(poses.shape[2]):
            f = OneEuroFilter(**config)
            timestamp = 0.0
            for t in range(poses.shape[0]):
                filtered = f(poses[t, j, i], t)
                timestamp += 1.0 / config["freq"]
                poses_filtered[t, j, i] = filtered

    return poses_filtered
    def __init__(self,
                 track_id,
                 time,
                 cameras,
                 poses2d,
                 pose3d,
                 joints_views,
                 args,
                 build3D='SVD'):
        # for campus w2d=0.4, alpha2d=25, w3d=0.6, alpha3d=0.1, lambda_a=5, lambda_t=10
        # for shelf and panoptic w2d=0.4, alpha2d=60, w3d=0.6, alpha3d=0.15, lambda_a=5, lambda_t=10
        # for 5FLobby w2d=0.4, alpha2d=70, w3d=0.6, alpha3d=0.25, lambda_a=3, lambda_t=5
        """
        :param track_id: track's id
        :param hypothesis: 3d pose, included camera, 2d poses
        :param z_axis: some datasets are rotated around one axis
        :param max_age: maximum time hasn't been updated
        :param n_init: a confirmed threshold that need to be updated
        """
        self.track_id = track_id
        self.hits = 1
        self.age = 1
        self.time_since_update = 0
        self.already_update = False

        self.joints = len(pose3d)
        self.poses2d = self.init_poses2d(time, cameras, poses2d)
        self.poses3d = [{
            'time': time,
            'pose3d': np.array(pose3d),
            'joints_views': joints_views
        }]
        self.next_pose3d = np.array(pose3d)
        self.velocity_3d = np.array([[0., 0., 0.] for i in range(self.joints)])

        self.state = TrackState.Tentative
        self._n_init = args.n_init
        self._max_age = args.max_age
        self.alpha2d = args.alpha2d
        self.alpha3d = args.alpha3d
        self.lambda_a = args.lambda_a
        self.lambda_t = args.lambda_t
        self.joint_threshold = args.joint_threshold
        self.sigma = args.sigma
        self.arm_sigma = args.arm_sigma
        self.build3D = build3D

        # initializing one euro filters for all the joints
        filter_config_2d = {
            'freq': 25,  # system frequency about 25 Hz
            'mincutoff': 1.7,  # value refer to the paper
            'beta': 0.3,  # value refer to the paper
            'dcutoff': 0.4  # not mentioned, empirically set
        }
        filter_config_3d = {
            'freq': 25,  # system frequency about 25 Hz
            'mincutoff': 0.8,  # value refer to the paper
            'beta': 0.4,  # value refer to the paper
            'dcutoff': 0.4  # not mentioned, empirically set
        }
        self.filter_2d = [(OneEuroFilter(**filter_config_2d),
                           OneEuroFilter(**filter_config_2d))
                          for _ in range(self.joints)]
        self.filter_3d = [(OneEuroFilter(**filter_config_3d),
                           OneEuroFilter(**filter_config_3d),
                           OneEuroFilter(**filter_config_3d))
                          for _ in range(self.joints)]
コード例 #9
0
def evaluate_sequence(all_data, seq_info, pred_dir):
    global j3d, j2d, cam, ppre_dof, pre_dof, root_rot, ppre_root_rot, pre_root_rot
    ppre_dof, pre_dof = np.zeros((1, 1)), np.zeros((1, 1))
    ppre_root_rot, pre_root_rot = np.zeros((1, 1)), np.zeros((1, 1))

    sub_id, action, trial_id, cam_id = seq_info
    file_seq_name = 'S%d_%s-%d_cam%01d' % (sub_id, action, trial_id, cam_id)
    print('%s' % (file_seq_name))
    save_path = pred_dir + file_seq_name + '_pred.h5'
    if os.path.exists(save_path):
        results = dd.io.load(save_path)
        errors = results['errors']
        errors_pa = results['errors_pa']
        errors_n = results['errors_n']
        errors_vel = results['errors_vel']
        return errors, errors_pa, errors_n, errors_vel

    sub_id = 'S' + str(sub_id)
    action = action + '-' + str(trial_id)
    pose_3d = all_data['positions_3d_pred'].item()
    gt_3d = all_data['positions_3d_gt'].item()
    pose_2d = all_data['positions_2d'].item()
    j3ds = pose_3d[sub_id][action][cam_id]
    gt3ds = gt_3d[sub_id][action][cam_id]
    j2ds = pose_2d[sub_id][action][cam_id]
    index_f19_t17 = [
        0, 1, 2, 3, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18
    ]
    # j3d: m   j2d: [-1, 1]
    j3ds, j2ds, gt3ds = j3ds[:,
                             index_f19_t17], j2ds[:,
                                                  index_f19_t17], gt3ds[:,
                                                                        index_f19_t17]
    j3ds = h36m_skeleton_fit(j3ds)
    j3ds[:, 0] = gt3ds[:, 0]
    j_cal_cam = np.array([1, 2, 3, 4, 5, 6, 7, 9, 11, 12, 13, 14, 15, 16])
    cam = infer_camera_intrinsics(j2ds[:, j_cal_cam, :2], j3ds[:, j_cal_cam])
    j2ds[:, :, :2] = np.dot(j3ds / j3ds[:, :, 2:], cam.T)[:, :, :2]
    frame_num = j3ds.shape[0]
    dofs = np.zeros((frame_num, 28), dtype=float)
    dofs[:, :3] = j3ds[:, 0]
    config_filter = {
        'freq': 50,
        'mincutoff': 20.0,
        'beta': 0.4,
        'dcutoff': 1.0
    }
    # 有3个自由度的节点有6个,有1个自由度的节点有4个
    filter_dof3 = [
        (OneEuroFilter(**config_filter), OneEuroFilter(**config_filter),
         OneEuroFilter(**config_filter), OneEuroFilter(**config_filter))
        for _ in range(6)
    ]
    filter_dof1 = [OneEuroFilter(**config_filter) for _ in range(4)]

    j3d_pre = np.zeros_like(j3ds)
    results = {}
    for f in tqdm(range(frame_num)):
        j3d, j2d = j3ds[f], j2ds[f]
        # 用上一帧的结果做初始化可以极大缩短迭代次数
        if f > 0:
            dofs[f, 3:] = dofs[f - 1, 3:]
        # predict root rot
        sol = root(optimize_root_rot, dofs[f, 3:6], method='lm')
        root_rot = sol.x
        dofs[f, 3:6] = root_rot
        # predict all rot
        sol = root(optimize, dofs[f, 6:], method='lm')
        dofs[f, 6:] = sol.x
        if f == 0:
            ppre_dof = dofs[f, 6:]
            ppre_root_rot = root_rot
        elif f == 1:
            pre_dof = dofs[f, 6:]
            pre_root_rot = root_rot
        else:
            ppre_dof = pre_dof
            pre_dof = dofs[f, 6:]
            ppre_root_rot = pre_root_rot
            pre_root_rot = root_rot

        # oneEuroFilter
        if use_filter:
            # 对根节点的旋转不做滤波,防止unity读取计算问题导致个别帧朝向错误
            for i, j in enumerate([6, 10, 14, 17, 20, 14]):
                dof_j = dofs[f, j:j + 3]
                theta = np.linalg.norm(dof_j)
                axis = np.divide(dof_j, theta, where=theta != 0)
                axis[0] = filter_dof3[i][0](axis[0])
                axis[1] = filter_dof3[i][1](axis[1])
                axis[2] = filter_dof3[i][2](axis[2])
                theta = filter_dof3[i][3](theta)
                dofs[f, j:j + 3] = axis * theta
            for i, j in enumerate([9, 13, 12, 15]):
                dofs[f, j] = filter_dof1[i](dofs[f, j])

        j3d_pre[f], _ = compute_joints_from_dofs(dofs[f, 6:], cam)
        mpjpe = np.mean(np.linalg.norm(j3d * 1000 - j3d_pre[f] * 1000,
                                       axis=-1))
        print(('%04d' % f) + '-MPJPE: ' + ('%.2f' % mpjpe) + ' mm')

    errors, errors_pa = compute_errors(gt3ds * 1000., j3d_pre * 1000.)
    errors_n = n_mpjpe(gt3ds * 1000., j3d_pre * 1000.)
    errors_vel = mean_velocity_error(gt3ds * 1000., j3d_pre * 1000.)
    results['errors'] = errors
    results['errors_pa'] = errors_pa
    results['errors_n'] = errors_n
    results['errors_vel'] = errors_vel
    # Save results
    dd.io.save(save_path, results)
    np.savetxt(pred_dir + 'dofs/' + file_seq_name + '.txt', dofs, fmt='%1.6f')
    return errors, errors_pa, errors_n, errors_vel
コード例 #10
0
def begin():
    global j3d, j2d, cam, ppre_dof, pre_dof, root_rot, ppre_root_rot, pre_root_rot
    ppre_dof, pre_dof = np.zeros((1, 1)), np.zeros((1, 1))
    ppre_root_rot, pre_root_rot = np.zeros((1, 1)), np.zeros((1, 1))
    j3ds, j2ds, cam = read_joints_from_h36m()  # (F, 17, 3/3)
    frame_num = j3ds.shape[0]
    dofs = np.zeros((frame_num, 28), dtype=float)
    dofs[:, :3] = j3ds[:, 0]

    time_str = datetime.now().strftime("%m_%d_%H_%M")
    subject_name = data_path.split('/')[-2]
    if 'all' in data_path:
        subject_name = 'all_' + subject_name
    save_dir = './out/' + time_str + '_' + subject_name + '_3d_' + str(
        w_3d) + '+2d_' + str(w_2d)
    if use_lim:
        save_dir += '+lim_' + str(w_lim)
    if use_temp:
        save_dir += '+temp_' + str(w_temp)
    if use_filter:
        save_dir += '+filter'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    print('save path: ' + save_dir)
    config_filter = {
        'freq': 10,
        'mincutoff': 20.0,
        'beta': 0.4,
        'dcutoff': 1.0
    }
    if 'all' in data_path:
        config_filter['freq'] = 50
    # 有3个自由度的节点有6个,有1个自由度的节点有4个
    filter_dof3 = [
        (OneEuroFilter(**config_filter), OneEuroFilter(**config_filter),
         OneEuroFilter(**config_filter), OneEuroFilter(**config_filter))
        for _ in range(6)
    ]
    filter_dof1 = [OneEuroFilter(**config_filter) for _ in range(4)]

    start_t = time.time()
    mpjpe_all = []
    threshold = 100.
    num = 0
    # dofs = np.loadtxt('./out/09_13_11_54_WalkingDog-2_3d_1+2d_1e-05+lim_0.1+te mp_0.1_49.63mm_1/09_13_11_54_dofs.txt')
    # j3d, j2d = j3ds[0], j2ds[0]
    # root_rot = dofs[0, 3:6]
    # j3d_pre, j2d_pre = compute_joints_from_dofs(dofs[0, 6:], cam)
    # print(j3d * 100)
    # print(root_rot)
    for f in range(frame_num):
        print('-------------------------------------')
        j3d, j2d = j3ds[f], j2ds[f]
        # 用上一帧的结果做初始化可以极大缩短迭代次数
        if f > 0:
            dofs[f, 3:] = dofs[f - 1, 3:]
        # predict root rot
        sol = root(optimize_root_rot, dofs[f, 3:6], method='lm')
        root_rot = sol.x
        dofs[f, 3:6] = root_rot

        sol = root(optimize, dofs[f, 6:], method='lm')
        dofs[f, 6:] = sol.x
        if f == 0:
            ppre_dof = dofs[f, 6:]
            ppre_root_rot = root_rot
        elif f == 1:
            pre_dof = dofs[f, 6:]
            pre_root_rot = root_rot
        else:
            ppre_dof = pre_dof
            pre_dof = dofs[f, 6:]
            ppre_root_rot = pre_root_rot
            pre_root_rot = root_rot

        # oneEuroFilter
        if use_filter:
            # 对根节点的旋转不做滤波,防止unity读取计算问题导致个别帧朝向错误
            for i, j in enumerate([6, 10, 14, 17, 20, 14]):
                dof_j = dofs[f, j:j + 3]
                theta = np.linalg.norm(dof_j)
                axis = np.divide(dof_j, theta, where=theta != 0)
                axis[0] = filter_dof3[i][0](axis[0])
                axis[1] = filter_dof3[i][1](axis[1])
                axis[2] = filter_dof3[i][2](axis[2])
                theta = filter_dof3[i][3](theta)
                dofs[f, j:j + 3] = axis * theta
            for i, j in enumerate([9, 13, 12, 15]):
                dofs[f, j] = filter_dof1[i](dofs[f, j])

        # print(sol.x)
        # print(sol.success)
        # print(sol.nfev)
        # print(sol.message)
        # print(sol.fun)
        j3d_pre, j2d_pre = compute_joints_from_dofs(dofs[f, 6:], cam)
        mpjpe = np.mean(np.linalg.norm(j3d * 1000 - j3d_pre * 1000, axis=-1))
        if mpjpe > threshold:
            num += 1
        mpjpe_all.append(mpjpe)
        print(('%04d' % f) + '-MPJPE: ' + ('%.2f' % mpjpe) + ' mm')
        fd = open(save_dir + '/logs.txt', 'a+')
        fd.write(('%04d' % f) + '-MPJPE: ' + ('%.2f' % mpjpe) + ' mm\n')
        fd.close()
        plot_2skeleton(j3d * 100, j3d_pre * 100, f, mpjpe, save_dir)

    end_t = time.time()
    fd = open(save_dir + '/logs.txt', 'a+')
    fd.write('MPJPE-mean: ' + ('%.2f' % np.mean(mpjpe_all)) + ' mm\n')
    fd.close()
    np.savetxt(save_dir + '/' + time_str + '_' + subject_name + '_dofs.txt',
               dofs,
               fmt='%1.6f')
    time_per = (end_t - start_t) / frame_num
    print('time every frame: ' + str(time_per))
    print(np.mean(mpjpe_all))

    frame_to_video(save_dir + '/3d_skeleton')
    os.rename(
        save_dir, save_dir + ('_%.2fmm_' % np.mean(mpjpe_all)) + str(num) +
        ('_%.2fs' % time_per))
コード例 #11
0
    def __init__(self,
                 serialport,
                 *,
                 baudrate=115200,
                 outpath=None,
                 printlines=False,
                 firstline='',
                 printHz=False,
                 bufsize=2048,
                 pMin=0,
                 pMax=25,
                 convert_pressure=False,
                 socket_start=False):
        super().__init__()

        self.serialport = serialport
        self.baudrate = baudrate
        self.outpath = outpath
        self.outfile = None
        self.printlines = printlines
        self.printHz = printHz
        self.bufsize = bufsize
        self.plotbuf = deque(maxlen=self.bufsize)
        self.rawdata = deque(maxlen=self.bufsize)
        self.pMin = pMin
        self.pMax = pMax

        self.convert_pressure = convert_pressure
        self.f = OneEuroFilter(100, .25, .1)
        self.event = 0
        self.run = True
        self.filterflag = True

        #Touch event detection
        self.diffsamps = 100  #How far back to look for diff
        self.event_thresh = 7  #Standard Deviation value to consider an event is happening
        self.touch = False  #Is a touch happening?
        self.touchcount = 0  #How many touch samples so far?
        self.touchthresh = 1000  #Number of touch samples to check
        self.touch_data = [
        ]  #Data from touch to be sent to machine learning thingy
        self.touch_buf = []
        #		self.model        = load(model) # Path to compiled model file
        self.printed = False

        self.socket = socket_start

        #Keep values to update baseline
        self.baselinebuf = deque(maxlen=self.bufsize // 4)

        app = QtGui.QApplication(sys.argv)
        loop = QEventLoop(app)
        asyncio.set_event_loop(loop)

        self.resize(800, 400)

        p = self.addPlot()
        self.plotline = p.plot(pen='y')
        self.baseline = p.plot(pen='b')
        self.show()

        if self.socket:
            # Create socket and wait for connection
            ip = ''
            port = 6969
            connected = False
            s = socket.socket()
            print('Waiting on connection')
            s.bind((ip, port))
            s.listen(1)
            self.conn, self.addr = s.accept()
            loop.run_until_complete(self.read_data())

        if self.outpath:
            with open(self.outpath, 'w') as self.outfile:
                if firstline:
                    self.log('#{}'.format(firstline))
                loop.run_until_complete(self.read_data())
        else:
            if firstline:
                self.log('#{}'.format(firstline))
            loop.run_until_complete(self.read_data())
コード例 #12
0
def main():
    global j3d, j2d, cam, ppre_dof, pre_dof, root_rot, ppre_root_rot, pre_root_rot
    ppre_dof, pre_dof = np.zeros((1, 1)), np.zeros((1, 1))
    ppre_root_rot, pre_root_rot = np.zeros((1, 1)), np.zeros((1, 1))
    print("Begin load data...")
    if use_gt:
        j3ds, j2ds, cam = read_joints_from_h36m()  # (F, 17, 3/3)
    else:
        j3ds, j2ds, cam, gt_j3ds = read_joints_from_eval()  # (F, 17, 3/3)
    frame_num = j3ds.shape[0]
    dofs = np.zeros((frame_num, 28), dtype=float)
    dofs[:, :3] = j3ds[:, 0]

    time_str = datetime.now().strftime("%m_%d_%H_%M")
    subject_name = data_path.split('/')[-2]
    if 'all' in data_path:
        subject_name = 'all_' + subject_name
    if not use_gt:
        subject_name = 'all_pre_' + subject_name
    prefix = time_str + '_'
    if isXiaoice:
        prefix += 'Xiaoice_'
    elif isAijiang:
        prefix += 'Aijiang_'
    save_dir = './out/main2_' + prefix + subject_name + '_3d_' + str(
        w_3d) + '+2d_' + str(w_2d)
    if use_lim:
        save_dir += '+lim_' + str(w_lim)
    if use_temp:
        save_dir += '+temp_' + str(w_temp)
    if use_filter:
        save_dir += '+filter'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    print('save path: ' + save_dir)
    config_filter = {
        'freq': 10,
        'mincutoff': 20.0,
        'beta': 0.4,
        'dcutoff': 1.0
    }
    if 'all' in data_path:
        config_filter['freq'] = 50
    # 有3个自由度的节点有6个,有1个自由度的节点有4个
    filter_dof3 = [
        (OneEuroFilter(**config_filter), OneEuroFilter(**config_filter),
         OneEuroFilter(**config_filter), OneEuroFilter(**config_filter))
        for _ in range(6)
    ]
    filter_dof1 = [OneEuroFilter(**config_filter) for _ in range(4)]

    start_t = time.time()
    mpjpe_all = []
    threshold = 100.
    num = 0
    for f in range(frame_num):
        print('-------------------------------------')
        j3d, j2d = j3ds[f], j2ds[f]
        # 用上一帧的结果做初始化可以极大缩短迭代次数
        if f > 0:
            dofs[f, 3:] = dofs[f - 1, 3:]
        # predict root rot
        sol = root(optimize_root_rot, dofs[f, 3:6], method='lm')
        root_rot = sol.x
        dofs[f, 3:6] = root_rot
        # predict all rot
        sol = root(optimize, dofs[f, 6:], method='lm')
        dofs[f, 6:] = sol.x
        if f == 0:
            ppre_dof = dofs[f, 6:]
            ppre_root_rot = root_rot
        elif f == 1:
            pre_dof = dofs[f, 6:]
            pre_root_rot = root_rot
        else:
            ppre_dof = pre_dof
            pre_dof = dofs[f, 6:]
            ppre_root_rot = pre_root_rot
            pre_root_rot = root_rot

        # oneEuroFilter
        if use_filter:
            # 对根节点的旋转不做滤波,防止unity读取计算问题导致个别帧朝向错误
            for i, j in enumerate([6, 10, 14, 17, 20, 14]):
                dof_j = dofs[f, j:j + 3]
                theta = np.linalg.norm(dof_j)
                axis = np.divide(dof_j, theta, where=theta != 0)
                axis[0] = filter_dof3[i][0](axis[0])
                axis[1] = filter_dof3[i][1](axis[1])
                axis[2] = filter_dof3[i][2](axis[2])
                theta = filter_dof3[i][3](theta)
                dofs[f, j:j + 3] = axis * theta
            for i, j in enumerate([9, 13, 12, 15]):
                dofs[f, j] = filter_dof1[i](dofs[f, j])

        j3d_pre, j2d_pre = compute_joints_from_dofs(dofs[f, 6:], cam)
        if use_gt:
            mpjpe = np.mean(
                np.linalg.norm(j3d * 1000 - j3d_pre * 1000, axis=-1))
        else:
            mpjpe = np.mean(
                np.linalg.norm(gt_j3ds[f] * 1000 - j3d_pre * 1000, axis=-1))
        if mpjpe > threshold:
            num += 1
        mpjpe_all.append(mpjpe)
        print(('%04d' % f) + '-MPJPE: ' + ('%.2f' % mpjpe) + ' mm')
        fd = open(save_dir + '/logs.txt', 'a+')
        fd.write(('%04d' % f) + '-MPJPE: ' + ('%.2f' % mpjpe) + ' mm\n')
        fd.close()
        plot_2skeleton(j3d * 100, j3d_pre * 100, f, mpjpe, save_dir)

    end_t = time.time()
    fd = open(save_dir + '/logs.txt', 'a+')
    fd.write('MPJPE-mean: ' + ('%.2f' % np.mean(mpjpe_all)) + ' mm\n')
    fd.close()
    np.savetxt(save_dir + '/' + prefix + subject_name + '_dofs.txt',
               dofs,
               fmt='%1.6f')
    dofs_new = np.zeros((dofs.shape[0], 18, 3))
    dofs_new[:, 0] = dofs[:, 0:3]
    dofs_new[:, 1] = dofs[:, 3:6]
    dofs_new[:, 2] = dofs[:, 6:9]
    dofs_new[:, 3, 0] = dofs[:, 9]
    dofs_new[:, 5] = dofs[:, 10:13]
    dofs_new[:, 6, 0] = dofs[:, 13]
    dofs_new[:, 8] = dofs[:, 14:17]
    dofs_new[:, 10] = dofs[:, 17:20]
    dofs_new[:, 12] = dofs[:, 20:23]
    dofs_new[:, 13, 1] = dofs[:, 23]
    dofs_new[:, 15] = dofs[:, 24:27]
    dofs_new[:, 16, 1] = dofs[:, 27]
    np.savetxt(save_dir + '/' + prefix + subject_name + '_dofs_new.txt',
               dofs_new.reshape((frame_num, -1)),
               fmt='%1.6f')
    dofs_world = dofs_local2world(dofs)  # (f, 18, 3)
    np.savetxt(save_dir + '/' + prefix + subject_name + '_dofs_world.txt',
               dofs_world.reshape((frame_num, -1)),
               fmt='%1.6f')
    time_per = (end_t - start_t) / frame_num
    print('time every frame: ' + str(time_per))
    print(np.mean(mpjpe_all))
    frame_to_video(save_dir + '/3d_skeleton')
    os.rename(
        save_dir, save_dir + ('_%.2fmm_' % np.mean(mpjpe_all)) + str(num) +
        ('_%.2fs' % time_per))
コード例 #13
0
ファイル: benchmark.py プロジェクト: PuChan-HCI/VNect
    def __init__(self, video=None, T=False):
        print('Initializing...')

        # the input camera serial number of the PC (int), or PATH to input video (str)
        self.video = 0 if not video else video
        # whether apply transposed matrix
        self.T = T

        ## hyper-parameters ##
        # the side length of the bounding box
        self.box_size = 368
        # this factor indicates that the input box size is 8 times the side length of the output heatmaps
        self.hm_factor = 8
        # number of the joints to be detected
        self.joints_num = 21
        # the ratio factors to scale the input image crops, no more than 1.0
        self.scales = [1]  # or [1, 0.7] to be consistent with the author
        # parent joint indexes of each joint (for plotting the skeleton lines)
        self.joint_parents = [
            16, 15, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 14, 14, 1, 4, 7,
            10, 13
        ]

        ## one euro filter ##
        config_2d = {'freq': 12, 'mincutoff': 1.7, 'beta': 0.3, 'dcutoff': 1.0}
        config_3d = {'freq': 12, 'mincutoff': 0.8, 'beta': 0.4, 'dcutoff': 1.0}
        self.filter_2d = [(OneEuroFilter(**config_2d),
                           OneEuroFilter(**config_2d))
                          for i in range(self.joints_num)]
        self.filter_3d = [(OneEuroFilter(**config_3d),
                           OneEuroFilter(**config_3d),
                           OneEuroFilter(**config_3d))
                          for i in range(self.joints_num)]

        ## flags ##
        # flag for determining whether the left mouse button is clicked
        self._clicked = False

        ## place holders ##
        self.rect = None
        self.frame_square = None
        self.input_batch = None

        # VNect model
        self.sess = tf.Session()
        saver = tf.train.import_meta_graph('./models/tf_model/vnect_tf.meta')
        saver.restore(self.sess,
                      tf.train.latest_checkpoint('./models/tf_model/'))
        graph = tf.get_default_graph()
        self.input_crops = graph.get_tensor_by_name('Placeholder:0')
        self.heatmap = graph.get_tensor_by_name('split_2:0')
        self.x_heatmap = graph.get_tensor_by_name('split_2:1')
        self.y_heatmap = graph.get_tensor_by_name('split_2:2')
        self.z_heatmap = graph.get_tensor_by_name('split_2:3')

        # init the joint coord placeholders
        self.joints_2d = np.zeros((self.joints_num, 2), dtype=np.int32)
        self.joints_3d = np.zeros((self.joints_num, 3), dtype=np.float32)

        # catch the video stream
        self.cameraCapture = cv2.VideoCapture(self.video)
        if not self.cameraCapture.isOpened():
            raise Exception('video stream not opened: %s' % self.video)

        # frame width and height
        self.W = int(self.cameraCapture.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.H = int(self.cameraCapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        if self.T:
            self.W, self.H = self.H, self.W

        # 3D joints visualization
        self.fig = plt.figure()
        self.ax_3d = plt.axes(projection='3d')
        plt.ion()
        self.ax_3d.clear()

        print('Initializing done.')