예제 #1
0
    def __init__(self, config, train=True):

        self.config = config
        self.train = train
        self.formatdata = FormatData(config)
        if train:
            subjects = os.listdir('{0}/{1}/{2}'.format(config.data_root,
                                                       'train',
                                                       config.filename))
        else:
            subjects = os.listdir('{0}/{1}/{2}'.format(config.data_root,
                                                       'test',
                                                       config.filename))

        set = []
        complete_train = []
        for sub in subjects:
            if train:
                folderdir = '{0}/{1}/{2}/{3}'.format(config.data_root, 'train',
                                                     config.filename, sub)
            else:
                folderdir = '{0}/{1}/{2}/{3}'.format(config.data_root, 'test',
                                                     config.filename, sub)
            for file in os.listdir(folderdir):
                filedir = '{0}/{1}'.format(folderdir, file)
                rawdata = np.load(filedir)['poses'][:, :66]
                rawdata = self.frame_filter(rawdata)
                # 去除帧太少的序列
                if rawdata.shape[0] > 150:
                    set.append(rawdata)
            if len(complete_train) == 0:
                complete_train = copy.deepcopy(
                    set[-1])  #每个subjects取最后一个动作序列计算均值方差
            else:
                complete_train = np.append(complete_train, set[-1], axis=0)
        if train:
            print('video num for training:', len(set))
        else:
            print('video num for test:', len(set))
        if not train and config.data_mean is None:
            print('Load train dataset first!')
        if train:
            data_mean, data_std, dim_to_ignore, dim_to_use = utils.normalization_stats(
                complete_train)
            config.data_mean = data_mean
            config.data_std = data_std
            config.dim_to_ignore = dim_to_ignore
            config.dim_to_use = dim_to_use

        set = utils.normalize_data(set, config.data_mean, config.data_std,
                                   config.dim_to_use)
        # [S_num, frame_for_S, 60]
        self.data = set
    def __init__(self, config, train=True):

        self.config = config
        self.train = train
        self.formatdata = FormatData(config)
        if config.datatype == 'smpl':
            train_path = config.data_root
        else:
            print('CMUDataset only support the smpl datatype')
            sys.exit(1)
        if config.filename != 'all':
            if train:
                subjects = config.subjects_train
            else:
                subjects = config.subjects_test
        else:
            print('Only support walking and dance action')
            sys.exit(1)

        set = []
        complete_train = []
        for sub in subjects:
            folderdir = '{0}/{1}'.format(train_path, sub)
            for file in os.listdir(folderdir):
                filedir = '{0}/{1}'.format(folderdir, file)
                rawdata = np.load(filedir)['poses'][:, :66]
                rawdata = self.frame_filter(rawdata)
                if rawdata.shape[0] > 70:
                    set.append(rawdata)
            if len(complete_train) == 0:
                complete_train = copy.deepcopy(
                    set[-1])  #每个subjects取最后一个动作序列计算均值方差
            else:
                complete_train = np.append(complete_train, set[-1], axis=0)
        print('视频个数:', len(set))
        if not train and config.data_mean is None:
            print('Load train dataset first!')
        if train and config.datatype == 'smpl':
            data_mean, data_std, dim_to_ignore, dim_to_use = utils.normalization_stats(
                complete_train)
            config.data_mean = data_mean
            config.data_std = data_std
            config.dim_to_ignore = dim_to_ignore
            config.dim_to_use = dim_to_use

        set = utils.normalize_data(set, config.data_mean, config.data_std,
                                   config.dim_to_use)
        # [S_num, frame_for_S, 66]
        self.data = set
예제 #3
0
    def __init__(self, opt, training=True):
        self.dataset_path = os.path.join(opt.dataset_root, opt.dataset)
        train_data = []
        self.seq_num = 0
        self.set_start_nums = []
        if opt.dataset == 'SBU':
            # x and y are normalized as [0, 1] while z is normalized as[0, 7.8125]
            if training:
                sets = range(1, 19)  # train
                print(
                    "************start loading SBU training set!!!*************"
                )
            else:
                sets = range(19, 22)  # test
                print(
                    "************start loading SBU testing set!!!*************"
                )
            self.train_data = {}
            complete_train = None

            for i in tqdm(sets):  # all (1,22)
                self.set_start_nums.append(self.seq_num)
                for cat in range(1, 9):
                    tmp_path = os.path.join(self.dataset_path, '%02d' % i,
                                            '%02d' % cat)
                    if not os.path.exists(tmp_path):
                        continue
                    for txt_file in os.listdir(tmp_path):
                        self.seq_num += 1
                        txt_path = os.path.join(tmp_path, txt_file)
                        one_seq_data = utils.read_SBU_txt(
                            txt_path)  # np.array float32
                        if complete_train is None:
                            complete_train = one_seq_data
                        else:
                            complete_train = np.concatenate(
                                (complete_train, one_seq_data), axis=0)
                        train_data.append(one_seq_data)
            print(complete_train.shape)
            # calculate the mean and std
            data_mean, data_std = utils.normalization_stats(complete_train)
            opt.train_mean = data_mean
            opt.train_std = data_std
            self.train_data = utils.normalize_SBU_data(train_data, data_mean,
                                                       data_std)
    def __init__(self, config, train=True):

        self.config = config
        self.train = train
        self.lie_tsfm = LieTsfm(config)
        self.formatdata = FormatData(config)
        if config.datatype == 'lie':
            if train:
                train_path = './data/h3.6m/Train/train_lie'
            else:
                train_path = './data/h3.6m/Test/test_lie'
        elif config.datatype == '':
            train_path = './data/h3.6m/Train/train_xyz'
        if train:
            subjects = ['S1', 'S6', 'S7', 'S8', 'S9', 'S11']
        else:
            subjects = ['S5']

        if config.filename == 'all':
            actions = [
                'directions', 'discussion', 'eating', 'greeting', 'phoning',
                'posing', 'purchases', 'sitting', 'sittingdown', 'smoking',
                'takingphoto', 'waiting', 'walking', 'walkingdog',
                'walkingtogether'
            ]
        else:
            actions = [config.filename]

        set = []
        complete_train = []
        for id in subjects:
            for action in actions:
                for i in range(2):
                    if config.datatype == 'lie':
                        filename = '{0}/{1}_{2}_{3}_lie.mat'.format(
                            train_path, id, action, i + 1)
                        rawdata = sio.loadmat(filename)['lie_parameters']
                        set.append(rawdata)
                    elif config.datatype == 'xyz':
                        filename = '{0}/{1}_{2}_{3}_xyz.mat'.format(
                            train_path, id, action, i + 1)
                        rawdata = sio.loadmat(filename)['joint_xyz']
                        set.append(rawdata.reshape(rawdata.shape[0], -1))

                if len(complete_train) == 0:
                    complete_train = copy.deepcopy(set[-1])
                else:
                    complete_train = np.append(complete_train, set[-1], axis=0)

        if not train and config.data_mean is None:
            print('Load train dataset first!')

        if train and config.datatype == 'lie':
            data_mean, data_std, dim_to_ignore, dim_to_use = utils.normalization_stats(
                complete_train)
            config.data_mean = data_mean
            config.data_std = data_std
            config.dim_to_ignore = dim_to_ignore
            config.dim_to_use = dim_to_use

        set = utils.normalize_data(set, config.data_mean, config.data_std,
                                   config.dim_to_use)
        # [S_num, frame_for_S, 54]
        self.data = set