def generate_seq(initial_seq_np, generate_frames_number, model,
                 save_dance_folder):
    #dance_batch_np dim =(5,15,171)
    #set hip_x and hip_z as the difference from the future frame to current frame
    dif = initial_seq_np[:, 1:initial_seq_np.
                         shape[1]] - initial_seq_np[:,
                                                    0:initial_seq_np.shape[1] -
                                                    1]
    #for all of the batch( 0 to 4), take 2nd dim from 1:end minus 0:end-1, as u can see it is using one less in this dim. e.g. 1:end and 0:end-1
    print('dif: ', dif.shape)
    initial_seq_dif_hip_x_z_np = initial_seq_np[:, 0:initial_seq_np.shape[1] -
                                                1].copy()
    #copy original values, from 0: end-1
    initial_seq_dif_hip_x_z_np[:, :, Hip_index * 3] = dif[:, :, Hip_index * 3]
    initial_seq_dif_hip_x_z_np[:, :,
                               Hip_index * 3 + 2] = dif[:, :,
                                                        Hip_index * 3 + 2]
    #as the top there, change the hip_x and hip_z as the difference from the future frame to the current frame

    initial_seq = torch.autograd.Variable(
        torch.FloatTensor(initial_seq_dif_hip_x_z_np.tolist()).cuda())
    print('initial_seq: ', initial_seq.size())
    #initial_seq = 5,14,171
    predict_seq = model.forward(initial_seq, generate_frames_number)

    batch = initial_seq_np.shape[0]
    print('batch: ', batch)
    #batch=5

    for b in range(batch):

        out_seq = np.array(predict_seq[b].data.tolist()).reshape(
            -1, In_frame_size)
        last_x = 0.0
        last_z = 0.0
        for frame in range(out_seq.shape[0]):
            out_seq[frame,
                    Hip_index * 3] = out_seq[frame, Hip_index * 3] + last_x
            last_x = out_seq[frame, Hip_index * 3]

            out_seq[frame, Hip_index * 3 +
                    2] = out_seq[frame, Hip_index * 3 + 2] + last_z
            last_z = out_seq[frame, Hip_index * 3 + 2]

        read_bvh.write_traindata_to_bvh(
            save_dance_folder + "out" + "%02d" % b + ".bvh", out_seq)
    return np.array(predict_seq.data.tolist()).reshape(batch, -1,
                                                       In_frame_size)
コード例 #2
0
def generate_bvh_from_traindata(src_train_folder, tar_bvh_folder):

    print("Generating bvh data for " + src_train_folder)
    if (os.path.exists(tar_bvh_folder) == False):
        os.makedirs(tar_bvh_folder)
    dances_names = listdir(src_train_folder)
    for dance_name in dances_names:
        name_len = len(dance_name)
        if (name_len > 4):
            if (dance_name[name_len - 4:name_len] == ".npy"):
                print("Processing" + dance_name)
                dance = np.load(src_train_folder + dance_name)
                dance2 = []
                for i in range(dance.shape[0] / 8):
                    dance2 = dance2 + [dance[i * 8]]
                print(len(dance2))
                read_bvh.write_traindata_to_bvh(
                    tar_bvh_folder + dance_name + ".bvh", np.array(dance2))
def generate_seq(initial_seq_np, generate_frames_number, model,
                 save_dance_folder):

    #set hip_x and hip_z as the difference from the future frame to current frame
    dif = initial_seq_np[:, 1:initial_seq_np.
                         shape[1]] - initial_seq_np[:,
                                                    0:initial_seq_np.shape[1] -
                                                    1]
    initial_seq_dif_hip_x_z_np = initial_seq_np[:, 0:initial_seq_np.shape[1] -
                                                1].copy()
    initial_seq_dif_hip_x_z_np[:, :, Hip_index * 3] = dif[:, :, Hip_index * 3]
    initial_seq_dif_hip_x_z_np[:, :,
                               Hip_index * 3 + 2] = dif[:, :,
                                                        Hip_index * 3 + 2]

    initial_seq = torch.autograd.Variable(
        torch.FloatTensor(initial_seq_dif_hip_x_z_np.tolist()).cuda())

    predict_seq = model.forward(initial_seq, generate_frames_number)

    batch = initial_seq_np.shape[0]

    for b in range(batch):

        out_seq = np.array(predict_seq[b].data.tolist()).reshape(
            -1, In_frame_size)
        last_x = 0.0
        last_z = 0.0
        for frame in range(out_seq.shape[0]):
            out_seq[frame,
                    Hip_index * 3] = out_seq[frame, Hip_index * 3] + last_x
            last_x = out_seq[frame, Hip_index * 3]

            out_seq[frame, Hip_index * 3 +
                    2] = out_seq[frame, Hip_index * 3 + 2] + last_z
            last_z = out_seq[frame, Hip_index * 3 + 2]

        read_bvh.write_traindata_to_bvh(
            save_dance_folder + "out" + "%02d" % b + ".bvh", out_seq)
    return np.array(predict_seq.data.tolist()).reshape(batch, -1,
                                                       In_frame_size)
コード例 #4
0
def train_one_iteraton(real_seq_np,
                       model,
                       optimizer,
                       iteration,
                       save_dance_folder,
                       print_loss=False,
                       save_bvh_motion=True):
    #train_one_iteraton(dance_batch_np, model, optimizer, iteration, write_bvh_motion_folder, print_loss, save_bvh_motion)
    #for last and 3rd last parameter, i think if last is True, then write bvh to 3rd last folder
    #dance_batch_np dim =(32,102,171)
    print('data input dim: ', real_seq_np.shape)

    #set hip_x and hip_z as the difference from the future frame to current frame
    dif = real_seq_np[:, 1:real_seq_np.
                      shape[1]] - real_seq_np[:, 0:real_seq_np.shape[1] - 1]
    #for all of the batch( 0 to 31), take 2nd dim from 1:end minus 0:end-1, as u can see it is using one less in this dim. e.g. 1:end and 0:end-1
    print('dif: ', dif.shape)
    real_seq_dif_hip_x_z_np = real_seq_np[:, 0:real_seq_np.shape[1] - 1].copy()
    #copy original values, from 0: end-1
    real_seq_dif_hip_x_z_np[:, :, Hip_index * 3] = dif[:, :, Hip_index * 3]
    real_seq_dif_hip_x_z_np[:, :, Hip_index * 3 + 2] = dif[:, :,
                                                           Hip_index * 3 + 2]
    #as the top there, change the hip_x and hip_z as the difference from the future frame to the current frame

    real_seq = torch.autograd.Variable(
        torch.FloatTensor(real_seq_dif_hip_x_z_np.tolist()).cuda())
    #convert back to real_seq
    #real_seq dim = 32,101,171
    print('real_seq: ', real_seq.size())
    seq_len = real_seq.size()[1] - 1

    #seq_len= 101-1=100
    print('seq_len in train one interation: ', seq_len)
    in_real_seq = real_seq[:, 0:seq_len]
    #in_real_seq dim = 32,100,171
    print('in_real_seq: ', in_real_seq.size())
    #means disregard the last frame?

    predict_groundtruth_seq = torch.autograd.Variable(
        torch.FloatTensor(real_seq_dif_hip_x_z_np[:, 1:seq_len +
                                                  1].tolist())).cuda().view(
                                                      real_seq_np.shape[0], -1)
    #the groundtruth that is needed to be predicted, which is the next frame, that is why its 1:seq_len+1 ==1:end for real_seq
    #predict_groundtruth_seq dim = 32,17100
    #.cuda sends the variable to cud, .view changes the dimensions
    print('predict_groundtruth_seq: ', predict_groundtruth_seq.shape)

    predict_seq = model.forward(in_real_seq, Condition_num, Groundtruth_num)
    #condition num = 5, groundtruth= 5

    optimizer.zero_grad()
    #Clears the gradients of all optimized

    loss = model.calculate_loss(predict_seq, predict_groundtruth_seq)

    loss.backward()

    optimizer.step()

    if (print_loss == True):
        print("###########" + "iter %07d" % iteration +
              "######################")
        print(loss.data.tolist())
        print("loss: " + str(loss.data.tolist()))

    if (save_bvh_motion == True):
        ##save the first motion sequence int the batch.
        gt_seq = np.array(predict_groundtruth_seq[0].data.tolist()).reshape(
            -1, In_frame_size)
        last_x = 0.0
        last_z = 0.0
        for frame in range(gt_seq.shape[0]):
            gt_seq[frame,
                   Hip_index * 3] = gt_seq[frame, Hip_index * 3] + last_x
            last_x = gt_seq[frame, Hip_index * 3]

            gt_seq[frame, Hip_index * 3 +
                   2] = gt_seq[frame, Hip_index * 3 + 2] + last_z
            last_z = gt_seq[frame, Hip_index * 3 + 2]

        out_seq = np.array(predict_seq[0].data.tolist()).reshape(
            -1, In_frame_size)
        last_x = 0.0
        last_z = 0.0
        for frame in range(out_seq.shape[0]):
            out_seq[frame,
                    Hip_index * 3] = out_seq[frame, Hip_index * 3] + last_x
            last_x = out_seq[frame, Hip_index * 3]

            out_seq[frame, Hip_index * 3 +
                    2] = out_seq[frame, Hip_index * 3 + 2] + last_z
            last_z = out_seq[frame, Hip_index * 3 + 2]

        read_bvh.write_traindata_to_bvh(
            save_dance_folder + "%07d" % iteration + "_gt.bvh", gt_seq)
        read_bvh.write_traindata_to_bvh(
            save_dance_folder + "%07d" % iteration + "_out.bvh", out_seq)
コード例 #5
0
def train_one_iteraton(real_seq_np,
                       model,
                       optimizer,
                       iteration,
                       save_dance_folder,
                       print_loss=False,
                       save_bvh_motion=True):

    #set hip_x and hip_z as the difference from the future frame to current frame
    dif = real_seq_np[:, 1:real_seq_np.
                      shape[1]] - real_seq_np[:, 0:real_seq_np.shape[1] - 1]
    real_seq_dif_hip_x_z_np = real_seq_np[:, 0:real_seq_np.shape[1] - 1].copy()
    real_seq_dif_hip_x_z_np[:, :, Hip_index * 3] = dif[:, :, Hip_index * 3]
    real_seq_dif_hip_x_z_np[:, :, Hip_index * 3 + 2] = dif[:, :,
                                                           Hip_index * 3 + 2]

    real_seq = torch.autograd.Variable(
        torch.FloatTensor(real_seq_dif_hip_x_z_np.tolist()).cuda())

    seq_len = real_seq.size()[1] - 1
    in_real_seq = real_seq[:, 0:seq_len]

    predict_groundtruth_seq = torch.autograd.Variable(
        torch.FloatTensor(real_seq_dif_hip_x_z_np[:, 1:seq_len +
                                                  1].tolist())).cuda().view(
                                                      real_seq_np.shape[0], -1)

    predict_seq = model.forward(in_real_seq, Condition_num, Groundtruth_num)

    optimizer.zero_grad()

    loss = model.calculate_loss(predict_seq, predict_groundtruth_seq)

    loss.backward()

    optimizer.step()

    if (print_loss == True):
        print("###########" + "iter %07d" % iteration +
              "######################")
        print("loss: " + str(loss.data.tolist()[0]))

    if (save_bvh_motion == True):
        ##save the first motion sequence int the batch.
        gt_seq = np.array(predict_groundtruth_seq[0].data.tolist()).reshape(
            -1, In_frame_size)
        last_x = 0.0
        last_z = 0.0
        for frame in range(gt_seq.shape[0]):
            gt_seq[frame,
                   Hip_index * 3] = gt_seq[frame, Hip_index * 3] + last_x
            last_x = gt_seq[frame, Hip_index * 3]

            gt_seq[frame, Hip_index * 3 +
                   2] = gt_seq[frame, Hip_index * 3 + 2] + last_z
            last_z = gt_seq[frame, Hip_index * 3 + 2]

        out_seq = np.array(predict_seq[0].data.tolist()).reshape(
            -1, In_frame_size)
        last_x = 0.0
        last_z = 0.0
        for frame in range(out_seq.shape[0]):
            out_seq[frame,
                    Hip_index * 3] = out_seq[frame, Hip_index * 3] + last_x
            last_x = out_seq[frame, Hip_index * 3]

            out_seq[frame, Hip_index * 3 +
                    2] = out_seq[frame, Hip_index * 3 + 2] + last_z
            last_z = out_seq[frame, Hip_index * 3 + 2]

        read_bvh.write_traindata_to_bvh(
            save_dance_folder + "%07d" % iteration + "_gt.bvh", gt_seq)
        read_bvh.write_traindata_to_bvh(
            save_dance_folder + "%07d" % iteration + "_out.bvh", out_seq)
コード例 #6
0
                       3] - train_data[0:num_frames - 1, hip_idx:hip_idx +
                                       3]  ##difference of hips between frames.
is_last_frame_feet_on_ground = [0, 0]  # left, right
for i in range(1, num_frames - 1):
    hip_pos = train_data[i, hip_idx:hip_idx + 3]
    feet_pos = (train_data[i, lfn_idx:lfn_idx + 3],
                train_data[i, rfn_idx:rfn_idx + 3])
    lower_foot_idx = (int)(feet_pos[0][1] > feet_pos[1][1])
    global_lower_foot = feet_pos[lower_foot_idx] + hip_pos

    if (global_lower_foot[1] < 0.01):  ##If touches the ground
        if (is_last_frame_feet_on_ground[lower_foot_idx] == 1):
            offset = (last_feet_pos[lower_foot_idx] - feet_pos[lower_foot_idx])
            hip_pos_v[i - 1, 0] = offset[0]
            hip_pos_v[i - 1, 2] = offset[2]
        is_last_frame_feet_on_ground = [0, 0]
        is_last_frame_feet_on_ground[lower_foot_idx] = 1
        print("fix frame " + str(i))
    else:
        is_last_frame_feet_on_ground = [0, 0]

    last_feet_pos = feet_pos

for i in range(1, num_frames):
    train_data[i, hip_idx:hip_idx +
               3] = train_data[i - 1, hip_idx:hip_idx + 3] + hip_pos_v[i - 1]

print("write bvh")

read_bvh.write_traindata_to_bvh('xx_fixed.bvh', train_data)