Beispiel #1
0
    def getOriginalTrajectory(self, frame, origin_offset=0):
        x_dat = loadData("../motions/{}/data/0_xData.dat".format(self.motion))
        y_dat = loadData("../motions/{}/data/0_yData.dat".format(self.motion))

        x_dat = x_dat[1 + origin_offset:frame + 1 + origin_offset]
        y_dat = y_dat[1 + origin_offset:frame + 1 + origin_offset]

        x_dat = np.array([
            RNNConfig.instance().xNormal.denormalize_and_fill_zero(x)
            for x in x_dat
        ])
        y_dat = np.array([
            RNNConfig.instance().yNormal.denormalize_and_fill_zero(y)
            for y in y_dat
        ])

        self.resetProperties()

        trajectories = []
        targets = []

        for x, y in zip(x_dat, y_dat):
            localPose = Pose2d([x[0] * 100, x[1] * 100])
            targets.append(
                np.array(self.rootPose[0].localToGlobal(localPose).p) / 100)
            trajectories.append(self.getGlobalPositions(y, 0))

        trajectories = np.asarray(trajectories, dtype=np.float32)
        targets = np.asarray(targets, dtype=np.float32)
        return trajectories, targets
Beispiel #2
0
def train(motion_name):
    RNNConfig.instance().loadData(motion_name)
    data = MotionData()
    model = RNNModel()
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)

    loss_name = ["total", "root", "pose", "foot", "pred"]
    loss_list = np.array([[]] * 5)
    loss_list_smoothed = np.array([[]] * 5)
    st = time.time()
    for c in range(100000000):
        data.resetBatch()
        model.resetState(data.batchSize)
        for n in range(data.numEpoch):
            batch_x, batch_y = data.getNextBatch()
            with tf.GradientTape() as tape:
                controls = tf.transpose(batch_x[:, :-1], perm=[1, 0, 2])
                generated = []
                pose = batch_y[:, 0]
                for i in range(data.stepSize):
                    control = controls[i]

                    output = model.forwardOneStep(control, pose, True)
                    generated.append(output)

                    if data.useControlPrediction:
                        pose = output[:, :data.yDimension]
                    else:
                        pose = output

                generated = tf.convert_to_tensor(generated)
                generated = tf.transpose(generated, perm=[1, 0, 2])

                loss, loss_detail = data.loss(batch_y, batch_x[:, 1:],
                                              generated)
                reg_loss = model.losses
                total_loss = loss + reg_loss

            gradients = tape.gradient(total_loss, model.trainable_variables)
            gradients, _grad_norm = tf.clip_by_global_norm(gradients, 0.5)
            optimizer.apply_gradients(zip(gradients,
                                          model.trainable_variables))

        if c % 100 == 0:
            loss_detail = tf.convert_to_tensor(loss_detail).numpy()
            loss_list = np.insert(loss_list,
                                  loss_list.shape[1],
                                  loss_detail,
                                  axis=1)
            # loss_list_smoothed = np.insert(loss_list_smoothed, loss_list_smoothed.shape[1], np.array([np.mean(loss_list[-10:], axis=1)]), axis=1)
            Plot([*zip(loss_list, loss_name)], "loss_s", 1)
            print(
                "\rElapsed : {:8.2f}s, Total : {:.6f}, [ root : {:.6f}, pose : {:.6f}, foot : {:.6f}, pred : {:.6f} ]"
                .format(time.time() - st, *loss_detail))

            model.save("../motions/{}/train/network".format(motion_name))
        else:
            print("\r{}/100 : {:.6f}, {:.6f}".format(c % 100, np.array(loss),
                                                     np.array(reg_loss)),
                  end="")
Beispiel #3
0
    def __init__(self, num_slaves, motion="walk"):
        np.random.seed(int(time.time()))
        self.num_slaves = num_slaves
        self.motion = motion

        RNNConfig.instance().loadData(motion)

        # initialize kinematic poses(root and character poses)
        self.rootPose = []
        self.rootPosePrev = []

        self.characterPose = []
        self.controlPrediction = []
        self.initialCharacterPose = np.zeros(RNNConfig.instance().yDimension,
                                             dtype=np.float32)
        self.initialControlPrediction = np.zeros(
            RNNConfig.instance().xDimension, dtype=np.float32)
        for _ in range(self.num_slaves):
            self.rootPose.append(Pose2d())
            self.rootPosePrev.append(Pose2d())
            self.characterPose.append(self.initialCharacterPose)
            self.controlPrediction.append(self.initialControlPrediction)

        self.model = None
        self.isModelLoaded = False

        # parameter for root height
        self.target_height = 88.

        # parameter for fall over and stand up
        self.fallOverState = [0] * self.num_slaves
        self.fallOverCount = [0] * self.num_slaves
        self.standUpCount = [40] * self.num_slaves

        self.isWalk = True

        # random target parameters
        self.target_dist_lower = 300.0
        self.target_dist_upper = 350.0
        self.target_angle_upper = math.pi * 0.9
        self.target_angle_lower = math.pi * (-0.9)

        # trajectory save
        self.log_trajectory = []
        self.log_target_trajectory = []

        # initialize targets
        self.targets = []
        for i in range(self.num_slaves):
            self.targets.append(self.randomTarget(i))
Beispiel #4
0
    def getReferences(self, targets=None):
        if self.isModelLoaded is False:
            self.loadNetworks()
        # if target is given, set target
        if targets is not None:
            self.targets = np.array(targets, dtype=np.float32)
        # else use random generated targets which are generated when the charater is close enough
        else:
            for i in range(self.num_slaves):
                cur_pose = self.rootPose[i].p
                target = [self.targets[i][0] * 100, self.targets[i][1] * 100]
                dx = cur_pose[0] - target[0]
                dy = cur_pose[1] - target[1]
                if (dx * dx + dy * dy < 100 * 100):
                    self.targets[i] = self.randomTarget(i)

        convertedTargets = self.convertAndClipTarget(self.targets)
        # run rnn model
        output = self.model.forwardOneStep(
            tf.convert_to_tensor(convertedTargets),
            tf.convert_to_tensor(self.characterPose),
            training=False)

        if RNNConfig.instance().useControlPrediction:
            self.characterPose = tf.slice(
                output, [0, 0], [-1, RNNConfig.instance().yDimension])
            self.controlPrediction = tf.slice(
                output, [0, RNNConfig.instance().yDimension], [-1, -1])
        else:
            self.characterPose = output

        # convert outputs to global coordinate
        self.characterPose = np.array(self.characterPose, dtype=np.float32)
        self.controlPrediction = np.array(self.controlPrediction,
                                          dtype=np.float32)

        pose_list = []
        for j in range(self.num_slaves):
            pose = RNNConfig.instance().yNormal.denormalize_and_fill_zero(
                self.characterPose[j])
            pose = self.getGlobalPositions(pose, j)
            pose_list.append(pose)

        # log trajectory
        self.log_trajectory.append(pose_list)
        self.log_target_trajectory.append(self.getTargets())

        return np.array(pose_list, dtype=np.float32)
Beispiel #5
0
    def setDynamicPose(self, dpose, index=0):
        new_rootPose = Pose2d().transform([dpose[3], dpose[0], dpose[2]])
        pose_delta = self.rootPosePrev[index].relativePose(new_rootPose)
        new_characterPose = deepcopy(
            RNNConfig.instance().yNormal.denormalize_and_fill_zero(
                self.characterPose[index]))

        new_characterPose[0:2] = dpose[52:54]  # foot contact
        new_characterPose[2] = pose_delta.rotatedAngle()  # root rotate
        new_characterPose[3:5] = pose_delta.p  # root translate
        new_characterPose[5] = dpose[1]  # root height
        new_characterPose[6:63] = dpose[54:111]  # positions
        new_characterPose[63:111] = dpose[4:52]  # orientations

        self.rootPose[index] = new_rootPose
        self.rootPosePrev[index] = new_rootPose
        self.characterPose[index] = RNNConfig.instance(
        ).yNormal.normalize_and_remove_zero(new_characterPose)
Beispiel #6
0
    def __init__(self):
        input_size = RNNConfig.instance().xDimension + RNNConfig.instance(
        ).yDimension
        if RNNConfig.instance().useControlPrediction:
            output_size = RNNConfig.instance().yDimension + RNNConfig.instance(
            ).xDimension
        else:
            output_size = RNNConfig.instance().yDimension

        self.regularizer = tf.keras.regularizers.l2(0.00001)

        cells = [
            tf.keras.layers.LSTMCell(
                units=RNNConfig.instance().lstmLayerSize,
                unit_forget_bias=False,
                bias_initializer=ForgetBiasInitializer(0.8),
                dropout=0.0,
                kernel_regularizer=self.regularizer,
                bias_regularizer=self.regularizer,
                recurrent_regularizer=self.regularizer)
            for _ in range(RNNConfig.instance().lstmLayerNumber)
        ]

        self.stacked_cells = tf.keras.layers.StackedRNNCells(
            cells, input_shape=(None, input_size), dtype=tf.float32)
        self.dense = tf.keras.layers.Dense(output_size,
                                           kernel_regularizer=self.regularizer,
                                           bias_regularizer=self.regularizer,
                                           dtype=tf.float32)

        self.stacked_cells.build(input_shape=(None, input_size))
        self.dropout = tf.keras.layers.Dropout(0.0)
        self.dense.build(input_shape=(None,
                                      RNNConfig.instance().lstmLayerSize))

        self._trainable_variables = self.stacked_cells.trainable_variables + self.dense.trainable_variables
        self._losses = tf.reduce_sum(
            tf.convert_to_tensor(self.stacked_cells.losses +
                                 self.dense.losses))

        self._ckpt = tf.train.Checkpoint(stacked_cells=self.stacked_cells,
                                         dense=self.dense)
Beispiel #7
0
    def updateCharacterFallState(self, index):
        root_height = RNNConfig.instance().yNormal.denormalize_and_fill_zero(
            self.characterPose[index])[5]
        if (root_height < 30):
            if (self.fallOverState[index] == 1):
                self.fallOverState[index] = 2
                print("standing up")
            self.fallOverCount[index] %= 200
        elif (root_height < 70):
            if (self.fallOverState[index] == 0):
                self.fallOverState[index] = 1
                self.fallOverCount[index] = 0
                print("falling over")
        elif (root_height > 83):
            if (self.fallOverState[index] == 2):
                self.fallOverState[index] = 0
                self.standUpCount[index] = 0
                print("normal")

        if (self.fallOverState[index] != 0):
            self.fallOverCount[index] += 1
        else:
            self.standUpCount[index] += 1
Beispiel #8
0
    def __init__(self):
        motion = RNNConfig.instance().motion

        self.x_data = loadData("../motions/%s/data/0_xData.dat" % (motion))
        self.y_data = loadData("../motions/%s/data/0_yData.dat" % (motion))

        self.xDimension = RNNConfig.instance().xDimension
        self.yDimension = RNNConfig.instance().yDimension
        self.footSlideWeight = RNNConfig.instance().footSlideWeight

        self.useControlPrediction = RNNConfig.instance().useControlPrediction

        self.yNormal = RNNConfig.instance().yNormal

        self.rootDimension = RNNConfig.instance().rootDimension
        self.rootAngleIdx = RNNConfig.instance().rootAngleIdx

        self.stepSize = RNNConfig.instance().stepSize
        self.batchSize = RNNConfig.instance().batchSize
        self.numEpoch = RNNConfig.instance().numEpoch
Beispiel #9
0
    def convertAndClipTarget(self, targets):
        # clip target and change to local coordinate
        targets = deepcopy(targets)
        for j in range(self.num_slaves):
            if self.motion == "walkrunfall":
                self.updateCharacterFallState(j)

                t = targets[j][:2]
                t = Pose2d(t)
                t = self.rootPose[j].relativePose(t)
                t = t.p
                t_len = math.sqrt(t[0] * t[0] + t[1] * t[1])

                t_angle = math.atan2(t[1], t[0])
                t_angle = np.clip(t_angle, -0.4 * np.pi, 0.4 * np.pi)

                if (self.isWalk):
                    clip_len = 60
                else:
                    clip_len = 250

                t_len = np.clip(t_len, 0.0, clip_len)

                t[0] = np.cos(t_angle) * t_len
                t[1] = np.sin(t_angle) * t_len

                # targets[j][0] = t[0]
                # targets[j][1] = t[1]
                # targets[j][2] = self.target_height
                if (self.fallOverState[j] == 0):
                    if (self.standUpCount[j] < 40):
                        targets[j][0] = 60
                        targets[j][1] = 0
                        targets[j][2] = self.target_height
                    else:
                        targets[j][0] = t[0]
                        targets[j][1] = t[1]
                        targets[j][2] = self.target_height
                else:
                    if (self.fallOverCount[j] < 70):
                        pred = RNNConfig.instance(
                        ).xNormal.denormalize_and_fill_zero(
                            self.controlPrediction[j])
                        targets[j][0] = RNNConfig.instance().xNormal.mean[0]
                        targets[j][1] = RNNConfig.instance().xNormal.mean[1]
                        targets[j][2] = 0  #0#self.target_height
                    else:
                        if (RNNConfig.instance().useControlPrediction):
                            pred = RNNConfig.instance(
                            ).xNormal.denormalize_and_fill_zero(
                                self.controlPrediction[j])
                            targets[j][0] = pred[
                                0]  # self.config.x_normal.mean[0]
                            targets[j][1] = pred[
                                1]  # self.config.x_normal.mean[1]
                            targets[j][2] = pred[2]  # min(88, pred[2]+20)
                        else:
                            targets[j][0] = 0  # self.config.x_normal.mean[0]
                            targets[j][1] = 0  # self.config.x_normal.mean[1]
                            targets[j][
                                2] = self.target_height  #min(88, self.config.y_normal.de_normalize_l(self.current_y[j])[5]+20)

                targets[j] = RNNConfig.instance(
                ).xNormal.normalize_and_remove_zero(targets[j])
            elif self.motion == "chicken":
                self.updateCharacterFallState(j)

                t = targets[j][:2]
                t = Pose2d([t[0] * 100, t[1] * 100])
                t = self.rootPose[j].relativePose(t)
                t = t.p
                t[0] /= 100
                t[1] /= 100
                t_len = math.sqrt(t[0] * t[0] + t[1] * t[1])

                t_angle = math.atan2(t[1], t[0])
                t_angle = np.clip(t_angle, -0.6 * np.pi, 0.6 * np.pi)

                clip_len = 0.6

                t_len = np.clip(t_len, 0.0, clip_len)

                t[0] = np.cos(t_angle) * t_len
                t[1] = np.sin(t_angle) * t_len

                targets[j][0] = t[0]
                targets[j][1] = t[1]
                targets[j] = RNNConfig.instance(
                ).xNormal.normalize_and_remove_zero(targets[j])

        return np.array(targets, dtype=np.float32)