コード例 #1
0
def MotionNet(order,
              epoch,
              batch_size,
              save_period,
              optimizer='sgd',
              learning_rate=0.1,
              lr_step=1000,
              lr_factor=0.9,
              stop_factor_lr=1e-08,
              use_gpu=True,
              use_cudnn=True,
              test=False,
              predict_size=5,
              time_step=150,
              seed_timestep=30,
              batch_Frame=5,
              frame_time=30):

    print("-------------------Motion Net-------------------")
    '''1. Data_Loading - bvh_reader'''
    Normalization_factor, train_motion, train_label_motion, seed_timestep, pre_timestep, column, file_directory = br.Motion_Data_Preprocessing(
        time_step, seed_timestep, batch_Frame)
    if test == True:

        data = OrderedDict()
        data['seed_motion'] = train_motion
        label = {'label_motion': train_label_motion}

        test_iter = mx.io.NDArrayIter(data=data, label=label)
        train_iter = mx.io.NDArrayIter(
            data=data, label=label, shuffle=False, last_batch_handle='pad'
        )  # Motion data is complex and requires sequential learning to learn from easy examples. So shuffle = False

    else:

        train_motion = train_motion[:predict_size]
        train_label_motion = train_label_motion[:predict_size]

        data = OrderedDict()
        data['seed_motion'] = train_motion
        label = {'label_motion': train_label_motion}

        train_iter = mx.io.NDArrayIter(
            data=data,
            label=label,
            batch_size=batch_size,
            shuffle=False,
            last_batch_handle='pad'
        )  # Motion data is complex and requires sequential learning to learn from easy examples. So shuffle = False
    '''2. hyperparameter'''

    rnn_layer_number = 1
    rnn_hidden_number = 200
    fc_number = 200
    Dropout_rate = 0.0

    if use_cudnn == True and use_gpu == True:
        ctx = mx.gpu(0)
    elif use_cudnn == False and use_gpu == True:
        ctx = mx.gpu(0)
    else:
        #why? mx.rnn.FusedRNNCell only works on use_cudnn == True and use_gpu == True
        ctx = mx.cpu(0)
    '''3. Network'''
    all_motion = mx.sym.Variable('seed_motion')
    seed_motion = mx.sym.transpose(mx.sym.slice_axis(data=all_motion,
                                                     axis=1,
                                                     begin=0,
                                                     end=seed_timestep),
                                   axes=(1, 0, 2))  # (time,batch,column)
    label_motion = mx.sym.Variable('label_motion')

    e_cell = encoder(use_cudnn=use_cudnn,
                     layer_number=rnn_layer_number,
                     hidden_number=rnn_hidden_number,
                     Dropout_rate=Dropout_rate)
    d_cell = decoder(use_cudnn=use_cudnn,
                     layer_number=rnn_layer_number,
                     hidden_number=rnn_hidden_number,
                     Dropout_rate=Dropout_rate)
    e_output, e_state = e_cell.unroll(length=seed_timestep,
                                      inputs=seed_motion,
                                      merge_outputs=False,
                                      layout='TNC')  #(batch,hidden)
    print(e_state)
    #seq2seq in test
    if test == True:

        e_output_end = e_output[-1]  # Shape: (1, N, C)
        e_output_end = mx.sym.reshape(data=e_output_end, shape=(1, 1, -1))
        d_input = mx.sym.broadcast_to(e_output_end, shape=(pre_timestep, 0, 0))
        d_output, d_state = d_cell.unroll(length=pre_timestep,
                                          begin_state=e_state,
                                          inputs=d_input,
                                          merge_outputs=False,
                                          layout='TNC')

    #seq2seq in training

    else:
        e_output_end = e_output[-1]  # Shape: (1, N, C)
        e_output_end = mx.sym.reshape(data=e_output_end,
                                      shape=(1, batch_size, -1))
        d_input = mx.sym.broadcast_to(e_output_end, shape=(pre_timestep, 0, 0))
        d_output, d_state = d_cell.unroll(length=pre_timestep,
                                          begin_state=e_state,
                                          inputs=d_input,
                                          merge_outputs=False,
                                          layout='TNC')

        # if use_cudnn=False , begin_state = e_state do not working.

    if use_cudnn:
        rnn_output = mx.sym.Reshape(d_output[-1],
                                    shape=(-1, 2 * rnn_hidden_number))
    else:
        rnn_output = d_output[-1]

    # if you use dropout
    # rnn_output = mx.sym.Dropout(data=rnn_output,p=0.3)
    affine1 = mx.sym.FullyConnected(
        data=rnn_output, num_hidden=fc_number,
        name='affine1')  # if use_cudnn=False , data=state[-1] i
    act1 = mx.sym.Activation(data=affine1, act_type='tanh', name='tanh1')
    drop1 = mx.sym.Dropout(act1, p=Dropout_rate)

    affine2 = mx.sym.FullyConnected(data=drop1,
                                    num_hidden=fc_number,
                                    name='affine2')
    act2 = mx.sym.Activation(data=affine2, act_type='tanh', name='tanh2')
    drop2 = mx.sym.Dropout(act2, p=Dropout_rate)

    output = mx.sym.FullyConnected(data=drop2,
                                   num_hidden=pre_timestep * column,
                                   name='affine3')
    output = mx.sym.LinearRegressionOutput(data=output, label=label_motion)

    # We visualize the network structure with output size (the batch_size is ignored.) - In pycharm or vs code, not working
    if use_cudnn:
        #shape = {'seed_motion': (seed_timestep, batch_size, column)
        mx.viz.plot_network(
            symbol=output)  # The diagram can be found on the Jupiter notebook.
    else:
        #shape = {'seed_motion': (batch_size, seed_timestep , column)}
        mx.viz.plot_network(
            symbol=output)  # The diagram can be found on the Jupiter notebook.

    print(output.list_arguments())

    # training mod
    mod = mx.module.Module(symbol=output,
                           data_names=['seed_motion'],
                           label_names=['label_motion'],
                           context=ctx)

    # Network information print
    print(mod.data_names)
    print(mod.label_names)

    if test == False:
        print(train_iter.provide_data)
        print(train_iter.provide_label)
    else:
        print(test_iter.provide_data)
        print(test_iter.provide_label)
    '''
    grad_req (str, list of str, dict of str to str) – 
    Requirement for gradient accumulation. Can be ‘write’, ‘add’, or ‘null’ 
    (default to ‘write’). Can be specified globally (str) or for each argument (list, dict).
    '''

    mod.bind(data_shapes=train_iter.provide_data,
             label_shapes=train_iter.provide_label,
             for_training=True,
             shared_module=None,
             inputs_need_grad=False,
             force_rebind=False,
             grad_req='write')

    # weights load
    cudnn_weights_path = 'weights/Cudnn_MotionNet-{}th-{}.params'.format(
        order, save_period)
    weights_path = 'weights/MotionNet-{}th-{}.params'.format(
        order, save_period)

    if os.path.exists(
            cudnn_weights_path) and use_cudnn == True and use_gpu == True:
        mod.load_params(cudnn_weights_path)
    elif (os.path.exists(weights_path) and use_cudnn == False and use_gpu
          == True) and (os.path.exists(weights_path) and use_cudnn == True
                        and use_gpu == False) and (os.path.exists(weights_path)
                                                   and use_cudnn == False
                                                   and use_gpu == False):
        mod.load_params(weights_path)
    else:
        mod.init_params(initializer=mx.initializer.Xavier(
            rnd_type='gaussian', factor_type='avg', magnitude=1))

    if optimizer == 'sgd':
        lr_sch = mx.lr_scheduler.FactorScheduler(step=lr_step,
                                                 factor=lr_factor,
                                                 stop_factor_lr=stop_factor_lr)
        mod.init_optimizer(optimizer=optimizer,
                           optimizer_params={
                               'learning_rate': learning_rate,
                               'lr_scheduler': lr_sch
                           })
    else:
        mod.init_optimizer(optimizer=optimizer,
                           optimizer_params={'learning_rate': learning_rate})

    metric = mx.metric.create(['mse'])

    start_time = time.time()

    if test == False:

        for epoch in range(1, epoch + 1, 1):
            train_iter.reset()
            for batch in train_iter:
                mod.forward(batch, is_train=True)
                mod.update_metric(metric, batch.label)
                mod.backward()
                mod.update()
            print('Epoch : {}'.format(epoch, metric.get()))

            if epoch % 100 == 0:
                end_time = time.time()
                print(
                    "-------------------------------------------------------")
                print("{}_learning time : {}".format(epoch,
                                                     end_time - start_time))
                print(
                    "-------------------------------------------------------")

            cal = mod.predict(eval_data=train_iter,
                              merge_batches=True,
                              reset=True,
                              always_output_list=False).asnumpy()
            cost = cal - train_label_motion
            cost = (cost**2) / 2
            cost = np.mean(cost)

            print("joint angle Square Error : {}".format(cost))

            if cost < 0.01:

                if not os.path.exists("weights"):
                    os.makedirs("weights")

                if use_cudnn:
                    print('Saving weights')
                    mod.save_params(
                        "weights/Cudnn_MotionNet-{}th-{}.params".format(
                            order, epoch))

                else:
                    print('Saving weights')
                    mod.save_params("weights/MotionNet-{}th-{}.params".format(
                        order, epoch))

                print(
                    "############################################################################################"
                )
                print("End the learning.")
                print(
                    "############################################################################################"
                )

                #training-data_test
                seed = train_iter.data[0][1].asnumpy()
                prediction_motion = mod.predict(
                    eval_data=train_iter,
                    merge_batches=True,
                    reset=True,
                    always_output_list=False).asnumpy() / Normalization_factor
                '''Creating a bvh file with predicted values -bvh_writer'''
                bw.Motion_Data_Making(seed[:, :seed_timestep] /
                                      Normalization_factor,
                                      prediction_motion,
                                      seed_timestep,
                                      pre_timestep,
                                      batch_Frame,
                                      frame_time,
                                      file_directory,
                                      test=False)

                return "completed", epoch

        # Network information print
        print(mod.data_shapes)
        print(mod.label_shapes)
        print(mod.output_shapes)
        print(mod.get_params())
        print(mod.get_outputs())
        print("Optimization complete")

    #tall data test
    if test == True:
        # test mod
        test_mod = mx.mod.Module(symbol=output,
                                 data_names=['seed_motion'],
                                 label_names=['label_motion'],
                                 context=ctx)
        test_mod.bind(data_shapes=test_iter.provide_data,
                      label_shapes=test_iter.provide_label,
                      for_training=False,
                      shared_module=mod)

        if os.path.exists(cudnn_weights_path) or os.path.exists(
                weights_path):  #FusedRNN
            seed = test_iter.data[0][1].asnumpy()
            prediction_motion = test_mod.predict(
                eval_data=test_iter,
                merge_batches=True,
                always_output_list=False).asnumpy() / Normalization_factor
            '''Creating a bvh file with predicted values -bvh_writer'''
            bw.Motion_Data_Making(seed[:, :seed_timestep],
                                  prediction_motion,
                                  seed_timestep,
                                  pre_timestep,
                                  batch_Frame,
                                  frame_time,
                                  file_directory,
                                  test=True)

        else:
            print("Can not test")

    print("finished")
コード例 #2
0
def MotionNet(epoch=None , batch_size=None , save_period=None , cost_limit=None ,
    optimizer=None, learning_rate=None , lr_step=None , lr_factor=None , stop_factor_lr=None , use_gpu=True ,
    TEST=None , num_layer=None , cell=None, hidden_unit=None ,time_step = None , seed_timestep = None , batch_Frame= None , frame_time=None, graphviz=None , parameter_shared=True , Model = None):

    print("-------------------Motion Net-------------------")

    '''1. Data_Loading - bvh_reader'''
    Normalization_factor, train_motion, train_label_motion , seed_timestep, pre_timestep, column, file_directory = br.Motion_Data_Preprocessing(time_step , seed_timestep , batch_Frame , TEST , Model)

    if TEST==True:
        print("<TEST>")
        data = OrderedDict()
        data['seed_motion'] = train_motion
        label = {'label_motion': train_label_motion}

        test_iter = mx.io.NDArrayIter(data=data, label=label)

    else:
        print("<Training>")
        data = OrderedDict()
        data['seed_motion'] = train_motion
        label = {'label_motion': train_label_motion}

        train_iter = mx.io.NDArrayIter(data=data, label=label, batch_size=batch_size, shuffle=False, last_batch_handle='pad')  # Motion data is complex and requires sequential learning to learn from easy examples. So shuffle = False ->In here, not using sequential learning

    if use_gpu:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)

    '''2. Network'''
    all_motion = mx.sym.Variable('seed_motion')
    label_motion = mx.sym.Variable('label_motion')

    seed_motion = mx.sym.slice_axis(data=all_motion , axis=1 , begin = 0 , end = seed_timestep)  # (batch , time , column)

    if TEST == True:
        pre_motion = mx.sym.reshape(mx.sym.slice_axis(data=all_motion, axis=1, begin=seed_timestep-1, end=seed_timestep),
                                shape=(1, -1))  # (batch=1,column) - first frame
    else:
        pre_motion = mx.sym.reshape(mx.sym.slice_axis(data=all_motion, axis=1, begin=seed_timestep-1, end=seed_timestep),
                                shape=(batch_size, -1))  # (batch,column) - first frame

    print("-------------------Network Shape--------------------")
    '''
    only if encoder's parameter_shared=True , parameter Encoder and decoder parameters are shared.
    '''
    e_cell , encoder_parameter = encoder(layer_number= num_layer , hidden_number=hidden_unit , Dropout_rate=0.0 , Zoneout_rate=0.0 , cell=cell , parameter_shared = parameter_shared ) # if parameter_shared = True, paramter = encoder's weights , else False = []

    if num_layer==1 and parameter_shared==True: #only if num_layer=1 , Both Residual = True and Residual = False are possible.
        d_cell = decoder(layer_number= num_layer , hidden_number=hidden_unit , output_number=column , Dropout_rate=0.0 , Zoneout_rate=0.0 , Residual = True , cell=cell , param=encoder_parameter)

    elif num_layer>1 and parameter_shared==True:# There is no way to share parameters with the encoder.
        d_cell = decoder(layer_number=num_layer, hidden_number=hidden_unit, output_number=column, Dropout_rate=0.0,Zoneout_rate=0.0, Residual=False, cell=cell, param=encoder_parameter)

    else: # parameter_shared=False , Both Residual = True and Residual = False are possible.
        d_cell = decoder(layer_number=num_layer, hidden_number=hidden_unit, output_number=column, Dropout_rate=0.0,Zoneout_rate=0.0, Residual=True, cell=cell, param=encoder_parameter)
    print("\n")

    _ , e_state = e_cell.unroll(length=seed_timestep , inputs=seed_motion , merge_outputs=True , layout='NTC')

    # customized by JG
    if num_layer==1:
        d_output, _ = d_cell.SingleLayer_feed_previous_unroll(length=pre_timestep, begin_state=e_state,inputs=pre_motion, merge_outputs=True, layout='NTC') # MultiLayer_feed_previous_unroll is also possible.
    else:
        d_output, _ = d_cell.MultiLayer_feed_previous_unroll(length=pre_timestep, begin_state=e_state, inputs=pre_motion, merge_outputs=True, layout='NTC') # MultiLayer_feed_previous_unroll is also possible.


    #output = mx.sym.LinearRegressionOutput(data = d_output , label=label_motion , grad_scale = 1)
    output = mx.sym.Custom(data=d_output, label=label_motion, grad_scale=1, name="LinearRegression", op_type='LinearRegression')

    digraph=mx.viz.plot_network(symbol=output,hide_weights=True)

    #why? batch_Frame>=10 ? -> graph 'plot' size too small for label
    if graphviz==True and TEST == True and batch_Frame>=10:
        digraph.view("{}_batch_Frame_TEST_Seq2Seq".format(batch_Frame)) #show graph
    elif graphviz==True and TEST == False and batch_Frame>=10:
        digraph.view("{}_batch_Frame_Training_Seq2Seq".format(batch_Frame)) #show graph

    print("-------------------Network Learning Parameter--------------------")
    print(output.list_arguments())
    print("\n")

    if TEST == False:
        mod = mx.module.Module(symbol=output, data_names=['seed_motion'], label_names=['label_motion'], context=ctx)
        print("-------------------Network Data Name--------------------")
        print(mod.data_names)
        print(mod.label_names)
        print("\n")
    else:
        # test mod
        test_mod = mx.mod.Module(symbol=output , data_names=['seed_motion'] , label_names=['label_motion'] , context=ctx)
        print("-------------------Network Data Name--------------------")
        print(test_mod.data_names)
        print(test_mod.label_names)
        print("\n")


    print("-------------------Network Data Shape--------------------")
    if TEST==False:
        print(train_iter.provide_data)
        print(train_iter.provide_label)
    else:
        print(test_iter.provide_data)
        print(test_iter.provide_label)
    print("\n")
    '''
    grad_req (str, list of str, dict of str to str) – 
    Requirement for gradient accumulation. Can be ‘write’, ‘add’, or ‘null’ 
    (default to ‘write’). Can be specified globally (str) or for each argument (list, dict).
    '''
    if TEST == False:

        mod.bind(data_shapes=train_iter.provide_data , label_shapes=train_iter.provide_label , for_training=True , shared_module=None , inputs_need_grad=False , force_rebind=False , grad_req='write')
        # weights load
        weights_path = 'weights/MotionNet-{}.params'.format(save_period)

        if os.path.exists(weights_path):
            print("load weights")
            mod.load_params(weights_path)
        else:
            print("init weights")
            mod.init_params(initializer=mx.initializer.Normal(sigma=0.01)) # very important

        start_time=time.time()
        print("-------------------Learning Start--------------------")

        if optimizer=='sgd':
            lr_sch = mx.lr_scheduler.FactorScheduler(step = lr_step, factor = lr_factor , stop_factor_lr = stop_factor_lr)
            mod.init_optimizer(optimizer=optimizer, optimizer_params={'learning_rate': learning_rate , 'lr_scheduler': lr_sch})
        else:
            mod.init_optimizer(optimizer=optimizer, optimizer_params={'learning_rate': learning_rate})

        metric = mx.metric.create(['mse'])

        for epoch in range(1, epoch + 1, 1):
            train_iter.reset()
            metric.reset()
            for batch in train_iter:

                if epoch % 2 == 0: # Only noise is added when it is an even number
                    '''1. Add noise to input - Data Augmentation'''
                    #random_normal
                    noise = mx.nd.random_normal(loc=0 , scale=5 , shape=(batch_size , seed_timestep+pre_timestep , column) , ctx=ctx) # random_normal
                    #random_uniform
                    #noise = mx.nd.random_uniform(low=-1 , high=1 , shape=(batch_size , seed_timestep+pre_timestep , column) ,ctx=ctx) # random_uniform
                    mod.forward(data_batch=mx.io.DataBatch(data = list([mx.nd.add(batch.data[0].as_in_context(ctx), noise)]), label= list(batch.label)), is_train=True)
                    #Data Order Transform (N,T,C)
                else:
                    mod.forward(data_batch=batch , is_train=True)

                mod.update_metric(metric,batch.label)
                mod.backward()
                mod.update()

            #print('epoch : {} , MSE : {}'.format(epoch,metric.get()))
            if epoch % 100 == 0:
                end_time=time.time()
                print("-------------------------------------------------------")
                print("{}_learning time : {}".format(epoch,end_time-start_time))
                print("-------------------------------------------------------")

            if epoch % 10000 == 0:
                if not os.path.exists("weights"):
                    os.makedirs("weights")

                print('Saving weights')
                mod.save_params("weights/MotionNet-{}.params".format(epoch))

            cal = mod.predict(eval_data=train_iter ,  merge_batches=True , reset=True, always_output_list=False).asnumpy() / Normalization_factor
            cost = cal - train_label_motion
            cost=(cost**2)/2
            cost=np.mean(cost)
            print('{} epoch '.format(epoch), end='')
            print("Joint Angle Square Error : {}".format(cost))

            if cost < cost_limit:

                if not os.path.exists("weights"):
                    os.makedirs("weights")

                print('Saving weights')
                mod.save_params("weights/MotionNet-{}.params".format(epoch))

                print("############################################################################################")
                print("End the learning.")
                print("############################################################################################")

                return "optimization completed"

        print("\n")

        print("-------------------Network Information--------------------")
        print(mod.data_shapes)
        print(mod.label_shapes)
        print(mod.output_shapes)
        print(mod.get_params())
        print(mod.get_outputs())
        print("{} learning optimization completed".format(epoch))
        print("\n")

    if TEST==True:

        test_mod.bind(data_shapes=test_iter.provide_data , label_shapes=test_iter.provide_label , for_training=False)

        # weights load
        weights_path = 'weights/MotionNet-{}.params'.format(save_period)
        if os.path.exists(weights_path):
            test_mod.load_params(weights_path)

            #order : (N , T(all time) , C)
            seed = test_iter.data[0][1].asnumpy()

            #order : (N , T(predict time) , C)
            prediction_motion = test_mod.predict(eval_data=test_iter , merge_batches=True , always_output_list=False).asnumpy()/Normalization_factor

            print("Test Prediction motion shape : {}".format(np.shape(prediction_motion)))

            #test cost
            cost = prediction_motion - train_label_motion
            cost=(cost**2)/2

            print(style.available)
            TimeStepError_Array=np.mean(cost,axis=(0,2)) # y-axis
            print(TimeStepError_Array)
            TimeStep = np.arange(1,pre_timestep+1,1) # x-axis

            #Dram Error graph
            style.use('seaborn')
            plt.figure(figsize=(9,4))
            bbox = dict(boxstyle = 'round' , fc = 'w' , ec = 'b' , lw = 2)
            #plt.plot(TimeStep , TimeStepError_Array , "r." , lw=3 ,label = "Error")
            plt.bar(TimeStep , TimeStepError_Array , width=0.7 ,label ='error', color = 'r')
            plt.annotate("Error Prevention" ,fontsize=14, xy = (60,1000) , xytext=(0,1000), textcoords='data' ,arrowprops={'color' : 'blue' , 'alpha' : 0.3 , 'arrowstyle' : "simple"}, bbox = bbox)
            plt.grid()
            plt.xlabel("Time", fontsize=14)
            plt.ylabel("Joint Angle Error" , fontsize=14)
            plt.ylim(0,4400)
            plt.legend(fontsize=15,loc='upper left')
            plt.title("Prediction Error Graph", fontdict={'fontsize': 15 , 'fontweight' : 5})
            print("cost graph saved")
            plt.savefig("Cost Graph.jpg")

            cost=np.mean(cost)
            print("prediction error : {}".format(cost))

            '''Creating a bvh file with predicted values -bvh_writer'''
            bw.Motion_Data_Making(seed[:,:seed_timestep] / Normalization_factor , prediction_motion , seed_timestep , pre_timestep , batch_Frame , frame_time , file_directory , Model)

            plt.show()
            return "Test completed"
        else:
            print("Can not test")
コード例 #3
0
def MotionNet(order=None,
              epoch=None,
              batch_size=None,
              save_period=None,
              cost_limit=None,
              optimizer=None,
              learning_rate=None,
              lr_step=None,
              lr_factor=None,
              stop_factor_lr=None,
              use_gpu=True,
              TEST=None,
              num_layer=None,
              cell=None,
              hidden_unit=None,
              time_step=None,
              seed_timestep=None,
              batch_Frame=None,
              frame_time=None,
              graphviz=None):

    print("-------------------Motion Net-------------------")
    '''1. Data_Loading - bvh_reader'''
    Normalization_factor, train_motion, train_label_motion, seed_timestep, pre_timestep, column, file_directory = br.Motion_Data_Preprocessing(
        time_step, seed_timestep, batch_Frame, TEST)

    if TEST == True:
        print("<TEST>")
        data = OrderedDict()
        data['seed_motion'] = train_motion
        label = {'label_motion': train_label_motion}

        test_iter = mx.io.NDArrayIter(data=data, label=label)

    else:
        print("<Training>")
        train_motion = train_motion[:batch_size]
        train_label_motion = train_label_motion[:batch_size]

        data = OrderedDict()
        data['seed_motion'] = train_motion
        label = {'label_motion': train_label_motion}

        train_iter = mx.io.NDArrayIter(
            data=data,
            label=label,
            batch_size=batch_size,
            shuffle=False,
            last_batch_handle='pad'
        )  # Motion data is complex and requires sequential learning to learn from easy examples. So shuffle = False

    if use_gpu:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    '''2. Network'''
    all_motion = mx.sym.Variable('seed_motion')
    label_motion = mx.sym.Variable('label_motion')

    seed_motion = mx.sym.slice_axis(data=all_motion,
                                    axis=1,
                                    begin=0,
                                    end=seed_timestep)  # (batch,time,column)

    if TEST == True:
        pre_motion = mx.sym.reshape(
            mx.sym.slice_axis(data=all_motion,
                              axis=1,
                              begin=seed_timestep,
                              end=seed_timestep + 1),
            shape=(1, -1))  # (batch=1,column) - first frame
    else:
        pre_motion = mx.sym.slice_axis(data=all_motion,
                                       axis=1,
                                       begin=seed_timestep - 1,
                                       end=-1)

    print("-------------------Network Shape--------------------")
    e_cell = encoder(layer_number=num_layer,
                     hidden_number=hidden_unit,
                     Dropout_rate=0.2,
                     cell=cell)
    d_cell = decoder(layer_number=num_layer,
                     hidden_number=hidden_unit,
                     output_number=column,
                     Dropout_rate=0.2,
                     cell=cell)
    print("\n")

    _, e_state = e_cell.unroll(length=seed_timestep,
                               inputs=seed_motion,
                               merge_outputs=True,
                               layout='NTC')  #(batch,hidden)

    #seq2seq in test
    if TEST == True:
        # customized by JG
        if num_layer == 1:
            d_output, _ = d_cell.SingleLayer_feed_previous_unroll(
                length=pre_timestep,
                begin_state=e_state,
                inputs=pre_motion,
                merge_outputs=True,
                layout='NTC'
            )  # MultiLayer_feed_previous_unroll is also possible.
        else:
            d_output, _ = d_cell.MultiLayer_feed_previous_unroll(
                length=pre_timestep,
                begin_state=e_state,
                inputs=pre_motion,
                merge_outputs=True,
                layout='NTC')

    #seq2seq in training
    else:
        d_output, _ = d_cell.unroll(length=pre_timestep,
                                    begin_state=e_state,
                                    inputs=pre_motion,
                                    merge_outputs=True,
                                    layout='NTC')

    output = mx.sym.LinearRegressionOutput(data=d_output, label=label_motion)

    digraph = mx.viz.plot_network(symbol=output, hide_weights=True)

    #why? batch_Frame>=10 ? -> graph 'plot' size too small for label
    if graphviz == True and TEST == True and batch_Frame >= 10:
        digraph.view(
            "{}_batch_Frame_TEST_Seq2Seq".format(batch_Frame))  #show graph
    elif graphviz == True and TEST == False and order == 1 and batch_Frame >= 10:
        digraph.view(
            "{}_batch_Frame_Training_Seq2Seq".format(batch_Frame))  #show graph

    print("-------------------Network Learning Parameter--------------------")
    print(output.list_arguments())
    print("\n")

    if TEST == False:
        mod = mx.module.Module(symbol=output,
                               data_names=['seed_motion'],
                               label_names=['label_motion'],
                               context=ctx)
        print("-------------------Network Data Name--------------------")
        print(mod.data_names)
        print(mod.label_names)
        print("\n")
    else:
        # test mod
        test_mod = mx.mod.Module(symbol=output,
                                 data_names=['seed_motion'],
                                 label_names=['label_motion'],
                                 context=ctx)
        print("-------------------Network Data Name--------------------")
        print(test_mod.data_names)
        print(test_mod.label_names)
        print("\n")

    print("-------------------Network Data Shape--------------------")
    if TEST == False:
        print(train_iter.provide_data)
        print(train_iter.provide_label)
    else:
        print(test_iter.provide_data)
        print(test_iter.provide_label)
    print("\n")
    '''
    grad_req (str, list of str, dict of str to str) – 
    Requirement for gradient accumulation. Can be ‘write’, ‘add’, or ‘null’ 
    (default to ‘write’). Can be specified globally (str) or for each argument (list, dict).
    '''
    if TEST == False:

        mod.bind(data_shapes=train_iter.provide_data,
                 label_shapes=train_iter.provide_label,
                 for_training=True,
                 shared_module=None,
                 inputs_need_grad=False,
                 force_rebind=False,
                 grad_req='write')

        # weights load
        weights_path = 'weights/MotionNet-{}th-{}.params'.format(
            order, save_period)

        if os.path.exists(weights_path):
            mod.load_params(weights_path)
        else:
            mod.init_params(initializer=mx.initializer.Xavier(
                rnd_type='gaussian', factor_type='avg', magnitude=1))

        start_time = time.time()
        print("-------------------Learning Start--------------------")

        if optimizer == 'sgd':
            lr_sch = mx.lr_scheduler.FactorScheduler(
                step=lr_step, factor=lr_factor, stop_factor_lr=stop_factor_lr)
            mod.init_optimizer(optimizer=optimizer,
                               optimizer_params={
                                   'learning_rate': learning_rate,
                                   'lr_scheduler': lr_sch
                               })
        else:
            mod.init_optimizer(
                optimizer=optimizer,
                optimizer_params={'learning_rate': learning_rate})

        metric = mx.metric.create(['mse'])
        for epoch in range(1, epoch + 1, 1):
            train_iter.reset()
            metric.reset()
            for batch in train_iter:

                mod.forward(batch, is_train=True)

                #Data Order Transform (N,T,C)
                mod.update_metric(metric, batch.label)

                mod.backward()
                mod.update()

            #print('Epoch : {} , MSE : {}'.format(epoch,metric.get()))

            if epoch % 100 == 0:
                end_time = time.time()
                print(
                    "-------------------------------------------------------")
                print("{}_learning time : {}".format(epoch,
                                                     end_time - start_time))
                print(
                    "-------------------------------------------------------")

            if epoch % 10000 == 0:
                if not os.path.exists("weights"):
                    os.makedirs("weights")

                print('Saving weights')
                mod.save_params("weights/MotionNet-{}.params".format(epoch))

            cal = mod.predict(eval_data=train_iter,
                              merge_batches=True,
                              reset=True,
                              always_output_list=False).asnumpy()
            cost = cal - train_label_motion
            cost = (cost**2) / 2
            cost = np.mean(cost)

            print('{} epoch '.format(epoch), end='')
            print("Joint Angle Square Error : {}".format(cost))

            if cost < cost_limit:

                if not os.path.exists("weights"):
                    os.makedirs("weights")

                print('Saving weights')
                mod.save_params("weights/MotionNet-{}th-{}.params".format(
                    order, epoch))

                print(
                    "############################################################################################"
                )
                print("End the learning.")
                print(
                    "############################################################################################"
                )

                #order : (N , T(all_time) , C)
                seed = train_iter.data[0][1].asnumpy()

                #order : (N , T(predict time) , C)
                prediction_motion = mod.predict(
                    eval_data=train_iter,
                    merge_batches=True,
                    reset=True,
                    always_output_list=False).asnumpy() / Normalization_factor
                '''Creating a bvh file with predicted values -bvh_writer'''
                bw.Motion_Data_Making(
                    seed[:, :seed_timestep] / Normalization_factor,
                    prediction_motion, seed_timestep, pre_timestep,
                    batch_Frame, frame_time, file_directory, TEST)

                return "completed", epoch
        print("\n")

        print("-------------------Network Information--------------------")
        print(mod.data_shapes)
        print(mod.label_shapes)
        print(mod.output_shapes)
        print(mod.get_params())
        print(mod.get_outputs())
        print("Optimization complete")
        print("\n")

    if TEST == True:

        test_mod.bind(data_shapes=test_iter.provide_data,
                      label_shapes=test_iter.provide_label,
                      for_training=False)
        # weights load
        weights_path = 'weights/MotionNet-{}th-{}.params'.format(
            order, save_period)
        if os.path.exists(weights_path):
            test_mod.load_params(weights_path)
            #order : (N , T(all time) , C)
            seed = test_iter.data[0][1].asnumpy()
            #order : (N , T(predict time) , C)
            prediction_motion = test_mod.predict(
                eval_data=test_iter,
                merge_batches=True,
                always_output_list=False).asnumpy() / Normalization_factor

            print("Test Prediction motion shape : {}".format(
                np.shape(prediction_motion)))

            #test cost
            cost = prediction_motion - train_label_motion
            cost = (cost**2) / 2
            cost = np.mean(cost)
            print("prediction error : {}".format(cost))
            '''Creating a bvh file with predicted values -bvh_writer'''
            bw.Motion_Data_Making(
                seed[:, :seed_timestep] / Normalization_factor,
                prediction_motion, seed_timestep, pre_timestep, batch_Frame,
                frame_time, file_directory, TEST)

        else:
            print("Can not test")
コード例 #4
0
import bvh_reader as br
import bvh_writer as bw

motion, time_step, batch_Frame, frame_Time, file_directory = br.Motion_Data_Preprocessing(
    time_step=90, batch_Frame=1)
bw.Motion_Data_Making(motion, time_step, batch_Frame, frame_Time,
                      file_directory)