Exemplo n.º 1
0
def main(_):
    config = get_config()
    config.class_size = 30
    config.feature_size = 45
    config.input_size = 45
    config.hidden_size = myHiddenSize
    config.keep_prob = myKeepProb

    eval_config = get_config()
    eval_config.class_size = 30
    eval_config.feature_size = 45
    eval_config.input_size = 45
    eval_config.hidden_size = myHiddenSize
    eval_config.keep_prob = myKeepProb

    eval_config2 = get_config()
    eval_config2.class_size = 30
    eval_config2.feature_size = 45
    eval_config2.input_size = 45
    eval_config2.hidden_size = myHiddenSize
    eval_config2.keep_prob = myKeepProb

    ####################################################################################################################
    DATA_PATH = os.path.join('Databases',
                             'UWA 3D Multiview Activity II Database')

    train_sklt0, train_label0 = Action_input.read(DATA_PATH, train_set, config)
    test_sklt0, test_label0 = Action_input.read(DATA_PATH, test_set1,
                                                eval_config)
    test_sklt02, test_label02 = Action_input.read(DATA_PATH, test_set2,
                                                  eval_config)

    # for i in range(len(train_sklt0)):
    #     np.save("npy_saver/train_sklt0/%03d_train_sklt0.npy" % i, np.asarray(train_sklt0[i]))
    # print("Save Complete")

    MAX_LENGTH = 0
    for batchNo in range(len(train_sklt0)):
        if len(train_sklt0[batchNo]) > MAX_LENGTH:
            MAX_LENGTH = len(train_sklt0[batchNo])
        else:
            pass

    for batchNo in range(len(test_sklt0)):
        if len(test_sklt0[batchNo]) > MAX_LENGTH:
            MAX_LENGTH = len(test_sklt0[batchNo])
        else:
            pass

    for batchNo in range(len(test_sklt02)):
        if len(test_sklt02[batchNo]) > MAX_LENGTH:
            MAX_LENGTH = len(test_sklt02[batchNo])
        else:
            pass

    print(MAX_LENGTH)
    config.num_steps = MAX_LENGTH
    eval_config.num_steps = MAX_LENGTH
    eval_config2.num_steps = MAX_LENGTH

    feature_train = feature_only_diff_0(train_sklt0, MAX_LENGTH, config)
    feature_test = feature_only_diff_0(test_sklt0, MAX_LENGTH, eval_config)
    feature_test2 = feature_only_diff_0(test_sklt02, MAX_LENGTH, eval_config2)

    # for i in range(len(train_sklt1)):
    #     np.save("temp_Data/%03d_train_sklt1.npy"%i,np.asarray(train_sklt1[i]) )

    del train_sklt0, test_sklt0, test_sklt02

    AS_train_label = one_hot_labeling(train_label0, config)
    AS_test_label = one_hot_labeling(test_label0, eval_config)
    AS_test_label2 = one_hot_labeling(test_label02, eval_config2)

    del train_label0, test_label0, test_label02

    print(feature_train.shape)
    print(feature_test.shape)
    print(feature_test2.shape)

    config.batch_size = np.int32(len(feature_train) /
                                 BatchDivider)  ### batch_modifier
    eval_config.batch_size = np.int32(len(feature_test))
    eval_config2.batch_size = np.int32(len(feature_test2))
    config.num_steps = np.int32(len(feature_train[0]))
    eval_config.num_steps = np.int32(len(feature_test[0]))
    eval_config2.num_steps = np.int32(len(feature_test2[0]))

    print(config.batch_size, eval_config.batch_size)
    print(
        "Total Training Set Length : %d, Traning Batch Size : %d, Eval Batch Size : %d"
        % (len(feature_train), config.batch_size, eval_config.batch_size))

    # TODO=========================================================================================== SAVED FILE PATH CONFIG

    csv_suffix = strftime("_%Y%m%d_%H%M.csv", localtime())
    folder_path = os.path.join(myFolderPath)  # folder_modifier

    checkpoint_path = os.path.join(folder_path,
                                   "NTU_{0}.ckpt".format(view_subject))
    timecsv_path = os.path.join(folder_path, "Auto" + csv_suffix)

    f = open(timecsv_path, 'w')
    csvWriter = csv.writer(f)

    # TODO=========================================================================================== LOAD BALANCING

    # TODO=========================================================================================== SESSION CONFIG

    sessConfig = tf.ConfigProto(log_device_placement=False)
    sessConfig.gpu_options.allow_growth = True

    writeConfig_tocsv = True

    if writeConfig_tocsv:
        csvWriter.writerow(
            ['DateTime:',
             strftime("%Y%m%d_%H:%M:%S", localtime())])
        csvWriter.writerow([])
        csvWriter.writerow([
            'Total Dataset Length',
            'Train Batch Divider',
            'Train Batch Size',
            'Eval Batch Size',
            'Eval Batch Size2',
        ])
        csvWriter.writerow([
            len(feature_train),
            len(feature_train) / config.batch_size, config.batch_size,
            eval_config.batch_size, eval_config2.batch_size
        ])

        csvWriter.writerow(['Control', 'Long 0'])
        csvWriter.writerow(['win_size', win_size[0]])
        csvWriter.writerow(['stride', stride[0]])
        csvWriter.writerow(['start_time', start_time[0]])
        csvWriter.writerow([])

    # TODO=========================================================================================== BUILD GRAPH
    with tf.Graph().as_default(), tf.Session(config=sessConfig) as session:
        with tf.device('/cpu:0'):
            initializer = tf.random_uniform_initializer(
                -config.init_scale, config.init_scale)

            with tf.variable_scope("model",
                                   reuse=None,
                                   initializer=initializer):
                m = MP_runner(is_training=True,
                              config=config,
                              labels=AS_train_label)
            print("\nTraining Model Established!\n")

            with tf.variable_scope("model",
                                   reuse=True,
                                   initializer=initializer):
                mtest = MP_runner(is_training=False,
                                  config=eval_config,
                                  labels=AS_test_label)

            with tf.variable_scope("model",
                                   reuse=True,
                                   initializer=initializer):
                mtest2 = MP_runner(is_training=False,
                                   config=eval_config2,
                                   labels=AS_test_label2)

            print("\nTesting Model Established!!\n")

            # summary_writer = tf.train.SummaryWriter('/home/inwoong/MSR_logs', graph=session.graph)

            init = tf.global_variables_initializer()  # TF ver 0.11
            # init = tf.global_variables_initializer() #TF ver 0.12
            session.run(init)

            saver = tf.train.Saver(tf.global_variables())

            # saver.restore(session, "./result_apply_170116/rNTU_view.ckpt-6000")
            print("Model restored.")

            stt_loop = time.time()
            print(strftime("%Y%m%d_%H:%M:%S", localtime()))

            csvWriter.writerow([
                'Time', 'Epoch #', 'Epoch Time', 'Train Accuracy', 'Train Cost'
            ])
            for i in range(config.max_max_epoch):

                stt_lr = time.time()

                if i == 0:
                    print("First Learning Rate is assigned!!")
                    m.assign_lr(session, config.learning_rate)
                elif i == config.max_epoch1:
                    m.assign_lr(session, config.learning_rate2)
                elif i == config.max_epoch2:
                    m.assign_lr(session, config.learning_rate3)
                elif i == config.max_epoch3:
                    m.assign_lr(session, config.learning_rate4)
                elif i == config.max_epoch4:  # 6000
                    print("6000 Learning Rate is assigned!!")
                    m.assign_lr(session, config.learning_rate5)
                elif i == config.max_epoch5:  # 10,000
                    m.assign_lr(session, config.learning_rate6)
                elif i == config.max_epoch6:  # 10,000
                    m.assign_lr(session, config.learning_rate7)

                stt_epoch = time.time()

                if i == 0:
                    print("I'm Ready for First Epoch")
                train_cost, train_accuracy, tr_p_l, tr_g_l = run_epoch(
                    session,
                    m,
                    feature_train,
                    AS_train_label,
                    m.train_op,
                    verbose=True)

                end_epoch = time.time()
                assert not np.isnan(
                    train_cost), 'Model diverged with loss = NaN'
                # if (i % 10) == 0:
                #     feed_tr = {m.input_data: feature_train[0 * config.batch_size:(0 + 1) * config.batch_size, :, :],
                #                m.targets: AS_train_label[0 * config.batch_size:(0 + 1) * config.batch_size, :]}
                #     logits = session.run(m.logits, feed_dict=feed_tr)
                #     print(logits)
                #     summary_str_tr = session.run(m.summary_op, feed_dict=feed_tr)
                #     summary_writer.add_summary(summary_str_tr, i)

                # Save the model checkpoint periodically.
                if i % 100 == 0 or (i + 1) == config.max_max_epoch:
                    # checkpoint_path = os.path.join("./view_model1+b989+h70", "NTU_view_TS-LSTM.ckpt")
                    saver.save(session, checkpoint_path, global_step=i)

                if i % 10 == 0:
                    end_loop = time.time()
                    strtime = strftime("%Y%m%d_%H:%M:%S", localtime())
                    print(strtime)
                    print(
                        "----------Epoch Time: %.3f, per Assign: %.3f, per Epoch: %.3f"
                        % ((end_loop - stt_loop), (stt_epoch - stt_lr),
                           (end_epoch - stt_epoch)))
                    print(
                        "Epoch: %d Learning rate: %.6f Train Accuracy: %.4f" %
                        (i, session.run(m.lr), train_accuracy))
                    # train_cost = train_cost * config.batch_size / len(feature_train)
                    print("Train Cost: %.6f" % (train_cost))
                    stt_loop = time.time()
                    print("\n")

                    csvWriter.writerow([
                        strtime, i, (end_epoch - stt_epoch), train_accuracy,
                        train_cost
                    ])

                if i % 100 == 0:
                    test_cost, test_accuracy, te_p_l, te_g_l = run_epoch(
                        session,
                        mtest,
                        feature_test,
                        AS_test_label,
                        tf.no_op(),
                        is_training=False)
                    test_cost2, test_accuracy2, te_p_l2, te_g_l2 = run_epoch(
                        session,
                        mtest2,
                        feature_test2,
                        AS_test_label2,
                        tf.no_op(),
                        is_training=False)
                    print("Test Accuracy: %.5f %.5f\n" %
                          (test_accuracy, test_accuracy2))
                    csvWriter.writerow(
                        ["Test Accuracy :", test_accuracy, test_accuracy2])

                    confusion_matrix = np.zeros(
                        [config.class_size, config.class_size + 1])
                    class_prob = np.zeros([config.class_size])
                    for j in range(len(te_g_l)):
                        confusion_matrix[te_g_l[j]][te_p_l[j]] += 1
                    for j in range(config.class_size):
                        class_prob[j] = confusion_matrix[j][j] / np.sum(
                            confusion_matrix[j][0:config.class_size])
                    for j in range(config.class_size):
                        confusion_matrix[j][config.class_size] = class_prob[j]

                    confusion_matrix2 = np.zeros(
                        [config.class_size, config.class_size + 1])
                    class_prob2 = np.zeros([config.class_size])
                    for j in range(len(te_g_l2)):
                        confusion_matrix2[te_g_l2[j]][te_p_l2[j]] += 1
                    for j in range(config.class_size):
                        class_prob2[j] = confusion_matrix2[j][j] / np.sum(
                            confusion_matrix2[j][0:config.class_size])
                    for j in range(config.class_size):
                        confusion_matrix2[j][
                            config.class_size] = class_prob2[j]

                    with open(folder_path + "/view-test-" + str(i) + ".csv",
                              "w") as csvfile:
                        csvwriter2 = csv.writer(csvfile)
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix[j])

                    with open(folder_path + "/view-test2-" + str(i) + ".csv",
                              "w") as csvfile:
                        csvwriter2 = csv.writer(csvfile)
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix2[j])

    f.close()
Exemplo n.º 2
0
def main(_):
    config = get_config()
    config.class_size = 10
    config.feature_size = 60
    config.input_size = 60
    config.hidden_size = myHiddenSize
    config.keep_prob = myKeepProb

    eval_config = get_config()
    eval_config.class_size = 10
    eval_config.feature_size = 60
    eval_config.input_size = 60
    eval_config.hidden_size = myHiddenSize
    eval_config.keep_prob = myKeepProb

    DATA_PATH = os.path.join('../NWUCLA_csv')

    train_set = [1]
    test_set = [2]

    train_sklt0, train_label0 = Action_input.read(DATA_PATH, train_set, config)
    test_sklt0, test_label0 = Action_input.read(DATA_PATH, test_set, eval_config)

    # for i in range(len(train_sklt0)):
    #     np.save("npy_saver/train_sklt0/%03d_train_sklt0.npy" % i, np.asarray(train_sklt0[i]))
    # print("Save Complete")

    MAX_LENGTH = 0
    for batchNo in range(len(train_sklt0)):
        if len(train_sklt0[batchNo]) > MAX_LENGTH:
            MAX_LENGTH = len(train_sklt0[batchNo])
        else:
            pass

    for batchNo in range(len(test_sklt0)):
        if len(test_sklt0[batchNo]) > MAX_LENGTH:
            MAX_LENGTH = len(test_sklt0[batchNo])
        else:
            pass

    print(MAX_LENGTH)
    config.num_steps = MAX_LENGTH
    eval_config.num_steps = MAX_LENGTH

    train_sklt1 = feature_only_diff_0(train_sklt0, MAX_LENGTH, config)
    test_sklt1 = feature_only_diff_0(test_sklt0, MAX_LENGTH, eval_config)

    train_sklt2 = body_rotation(train_sklt1)
    test_sklt2 = body_rotation(test_sklt1)

    # for i in range(len(train_sklt1)):
    #     np.save("temp_Data/%03d_train_sklt1.npy"%i,np.asarray(train_sklt1[i]) )

    del train_sklt0, test_sklt0

    feature_train = Pose_Motion(train_sklt2)
    feature_test = Pose_Motion(test_sklt2)
    AS_train_label = one_hot_labeling(train_label0, config)
    AS_test_label = one_hot_labeling(test_label0, eval_config)

    del train_sklt1, test_sklt1, train_label0, test_label0

    print(feature_train.shape)
    print(feature_test.shape)

    config.batch_size = np.int32(len(feature_train) / BatchDivider)  ### batch_modifier
    eval_config.batch_size = np.int32(len(feature_test))
    config.num_steps = np.int32(len(feature_train[0]))
    eval_config.num_steps = np.int32(len(feature_test[0]))

    print(config.batch_size, eval_config.batch_size)
    print("Total Training Set Length : %d, Traning Batch Size : %d, Eval Batch Size : %d"
          % (len(feature_train), config.batch_size, eval_config.batch_size))

    # TODO=========================================================================================== SAVED FILE PATH CONFIG

    csv_suffix = strftime("_%Y%m%d_%H%M.csv", localtime())
    folder_path = os.path.join(myFolderPath)  # folder_modifier

    checkpoint_path = os.path.join(folder_path, "NTU_{0}.ckpt".format(view_subject))
    timecsv_path = os.path.join(folder_path, "Auto" + csv_suffix)

    f = open(timecsv_path, 'w')
    csvWriter = csv.writer(f)

    # TODO=========================================================================================== LOAD BALANCING


    # TODO=========================================================================================== SESSION CONFIG

    sessConfig = tf.ConfigProto(log_device_placement=False)
    sessConfig.gpu_options.allow_growth = True

    writeConfig_tocsv = True

    if writeConfig_tocsv:
        csvWriter.writerow(['DateTime:', strftime("%Y%m%d_%H:%M:%S", localtime())])
        csvWriter.writerow([])
        csvWriter.writerow(['Total Dataset Length', 'Train Batch Divider', 'Train Batch Size', 'Eval Batch Size', ])
        csvWriter.writerow(
            [len(feature_train), len(feature_train) / config.batch_size, config.batch_size, eval_config.batch_size])

        csvWriter.writerow(['Control', 'Long 0'])
        csvWriter.writerow(['win_size', win_size[0]])
        csvWriter.writerow(['stride', stride[0]])
        csvWriter.writerow(
            ['start_time', start_time[0], start_time[1], start_time[2], start_time[3], start_time[4], start_time[5],
             start_time[6], start_time[7], start_time[8], start_time[9]])
        csvWriter.writerow([])

    # TODO=========================================================================================== BUILD GRAPH
    with tf.Graph().as_default(), tf.Session(config=sessConfig) as session:
        with tf.device('/cpu:0'):
            initializer = tf.random_uniform_initializer(-config.init_scale,
                                                        config.init_scale)

            with tf.variable_scope("model1", reuse=None, initializer=initializer):
                m = MP_runner(is_training=True, config=config, labels=AS_train_label, motion_diff=start_time[0])

            with tf.variable_scope("model2", reuse=None, initializer=initializer):
                m2 = MP_runner(is_training=True, config=config, labels=AS_train_label, motion_diff=start_time[1])

            with tf.variable_scope("model3", reuse=None, initializer=initializer):
                m3 = MP_runner(is_training=True, config=config, labels=AS_train_label, motion_diff=start_time[2])

            with tf.variable_scope("model4", reuse=None, initializer=initializer):
                m4 = MP_runner(is_training=True, config=config, labels=AS_train_label, motion_diff=start_time[3])

            with tf.variable_scope("model5", reuse=None, initializer=initializer):
                m5 = MP_runner(is_training=True, config=config, labels=AS_train_label, motion_diff=start_time[4])

            with tf.variable_scope("model6", reuse=None, initializer=initializer):
                m6 = MP_runner(is_training=True, config=config, labels=AS_train_label, motion_diff=start_time[5])

            with tf.variable_scope("model7", reuse=None, initializer=initializer):
                m7 = MP_runner(is_training=True, config=config, labels=AS_train_label, motion_diff=start_time[6])

            with tf.variable_scope("model8", reuse=None, initializer=initializer):
                m8 = MP_runner(is_training=True, config=config, labels=AS_train_label, motion_diff=start_time[7])

            with tf.variable_scope("model9", reuse=None, initializer=initializer):
                m9 = MP_runner(is_training=True, config=config, labels=AS_train_label, motion_diff=start_time[8])

            with tf.variable_scope("model10", reuse=None, initializer=initializer):
                m10 = MP_runner(is_training=True, config=config, labels=AS_train_label, motion_diff=start_time[9])

            print("\nTraining Model Established!\n")

            with tf.variable_scope("model1", reuse=True, initializer=initializer):
                mtest = MP_runner(is_training=False, config=eval_config, labels=AS_test_label, motion_diff=start_time[0])

            with tf.variable_scope("model2", reuse=True, initializer=initializer):
                mtest2 = MP_runner(is_training=False, config=eval_config, labels=AS_test_label, motion_diff=start_time[1])

            with tf.variable_scope("model3", reuse=True, initializer=initializer):
                mtest3 = MP_runner(is_training=False, config=eval_config, labels=AS_test_label, motion_diff=start_time[2])

            with tf.variable_scope("model4", reuse=True, initializer=initializer):
                mtest4 = MP_runner(is_training=False, config=eval_config, labels=AS_test_label, motion_diff=start_time[3])

            with tf.variable_scope("model5", reuse=True, initializer=initializer):
                mtest5 = MP_runner(is_training=False, config=eval_config, labels=AS_test_label, motion_diff=start_time[4])

            with tf.variable_scope("model6", reuse=True, initializer=initializer):
                mtest6 = MP_runner(is_training=False, config=eval_config, labels=AS_test_label, motion_diff=start_time[5])

            with tf.variable_scope("model7", reuse=True, initializer=initializer):
                mtest7 = MP_runner(is_training=False, config=eval_config, labels=AS_test_label, motion_diff=start_time[6])

            with tf.variable_scope("model8", reuse=True, initializer=initializer):
                mtest8 = MP_runner(is_training=False, config=eval_config, labels=AS_test_label, motion_diff=start_time[7])

            with tf.variable_scope("model9", reuse=True, initializer=initializer):
                mtest9 = MP_runner(is_training=False, config=eval_config, labels=AS_test_label, motion_diff=start_time[8])

            with tf.variable_scope("model10", reuse=True, initializer=initializer):
                mtest10 = MP_runner(is_training=False, config=eval_config, labels=AS_test_label, motion_diff=start_time[9])

            print("\nTesting Model Established!!\n")

            # summary_writer = tf.train.SummaryWriter('/home/inwoong/MSR_logs', graph=session.graph)

            init = tf.global_variables_initializer()  # TF ver 0.11
            # init = tf.global_variables_initializer() #TF ver 0.12
            session.run(init)

            saver = tf.train.Saver(tf.global_variables())

            # saver.restore(session, "./result_apply_170116/rNTU_view.ckpt-6000")
            print("Model restored.")

            stt_loop = time.time()
            print(strftime("%Y%m%d_%H:%M:%S", localtime()))

            csvWriter.writerow(
                ['Time', 'Epoch #', 'Epoch Time', 'Train Accuracy1', 'Train Cost1', 'Train Accuracy2', 'Train Cost2',
                 'Train Accuracy3', 'Train Cost3', 'Train Accuracy4', 'Train Cost4', 'Train Accuracy5', 'Train Cost5',
                 'Train Accuracy6', 'Train Cost6', 'Train Accuracy7', 'Train Cost7', 'Train Accuracy8', 'Train Cost8',
                 'Train Accuracy9', 'Train Cost9', 'Train Accuracy10', 'Train Cost10'])
            for i in range(config.max_max_epoch):

                stt_lr = time.time()

                if i == 0:
                    print("First Learning Rate is assigned!!")
                    m.assign_lr(session, config.learning_rate)
                    m2.assign_lr(session, config.learning_rate)
                    m3.assign_lr(session, config.learning_rate)
                    m4.assign_lr(session, config.learning_rate)
                    m5.assign_lr(session, config.learning_rate)
                    m6.assign_lr(session, config.learning_rate)
                    m7.assign_lr(session, config.learning_rate)
                    m8.assign_lr(session, config.learning_rate)
                    m9.assign_lr(session, config.learning_rate)
                    m10.assign_lr(session, config.learning_rate)
                elif i == config.max_epoch1:
                    m.assign_lr(session, config.learning_rate2)
                    m2.assign_lr(session, config.learning_rate2)
                    m3.assign_lr(session, config.learning_rate2)
                    m4.assign_lr(session, config.learning_rate2)
                    m5.assign_lr(session, config.learning_rate2)
                    m6.assign_lr(session, config.learning_rate2)
                    m7.assign_lr(session, config.learning_rate2)
                    m8.assign_lr(session, config.learning_rate2)
                    m9.assign_lr(session, config.learning_rate2)
                    m10.assign_lr(session, config.learning_rate2)

                stt_epoch = time.time()

                if i == 0:
                    print("I'm Ready for First Epoch")
                train_cost, train_accuracy, tr_p_l, tr_g_l = run_epoch(
                    session, m, feature_train, AS_train_label,
                    m.train_op,
                    verbose=True)
                train_cost2, train_accuracy2, tr_p_l2, tr_g_l2 = run_epoch(
                    session, m2, feature_train, AS_train_label,
                    m2.train_op,
                    verbose=True)
                train_cost3, train_accuracy3, tr_p_l3, tr_g_l3 = run_epoch(
                    session, m3, feature_train, AS_train_label,
                    m3.train_op,
                    verbose=True)
                train_cost4, train_accuracy4, tr_p_l4, tr_g_l4 = run_epoch(
                    session, m4, feature_train, AS_train_label,
                    m4.train_op,
                    verbose=True)
                train_cost5, train_accuracy5, tr_p_l5, tr_g_l5 = run_epoch(
                    session, m5, feature_train, AS_train_label,
                    m5.train_op,
                    verbose=True)
                train_cost6, train_accuracy6, tr_p_l6, tr_g_l6 = run_epoch(
                    session, m6, feature_train, AS_train_label,
                    m6.train_op,
                    verbose=True)
                train_cost7, train_accuracy7, tr_p_l7, tr_g_l7 = run_epoch(
                    session, m7, feature_train, AS_train_label,
                    m7.train_op,
                    verbose=True)
                train_cost8, train_accuracy8, tr_p_l8, tr_g_l8 = run_epoch(
                    session, m8, feature_train, AS_train_label,
                    m8.train_op,
                    verbose=True)
                train_cost9, train_accuracy9, tr_p_l9, tr_g_l9 = run_epoch(
                    session, m9, feature_train, AS_train_label,
                    m9.train_op,
                    verbose=True)
                train_cost10, train_accuracy10, tr_p_l10, tr_g_l10 = run_epoch(
                    session, m10, feature_train, AS_train_label,
                    m10.train_op,
                    verbose=True)

                end_epoch = time.time()
                assert not np.isnan(train_cost), 'Model1 diverged with loss = NaN'
                assert not np.isnan(train_cost2), 'Model2 diverged with loss = NaN'
                assert not np.isnan(train_cost3), 'Model3 diverged with loss = NaN'
                assert not np.isnan(train_cost4), 'Model4 diverged with loss = NaN'
                assert not np.isnan(train_cost5), 'Model5 diverged with loss = NaN'
                assert not np.isnan(train_cost6), 'Model6 diverged with loss = NaN'
                assert not np.isnan(train_cost7), 'Model7 diverged with loss = NaN'
                assert not np.isnan(train_cost8), 'Model8 diverged with loss = NaN'
                assert not np.isnan(train_cost9), 'Model9 diverged with loss = NaN'
                assert not np.isnan(train_cost10), 'Model10 diverged with loss = NaN'
                # if (i % 10) == 0:
                #     feed_tr = {m.input_data: feature_train[0 * config.batch_size:(0 + 1) * config.batch_size, :, :],
                #                m.targets: AS_train_label[0 * config.batch_size:(0 + 1) * config.batch_size, :]}
                #     logits = session.run(m.logits, feed_dict=feed_tr)
                #     print(logits)
                #     summary_str_tr = session.run(m.summary_op, feed_dict=feed_tr)
                #     summary_writer.add_summary(summary_str_tr, i)

                # Save the model checkpoint periodically.
                if i % 100 == 0 or (i + 1) == config.max_max_epoch:
                    # checkpoint_path = os.path.join("./view_model1+b989+h70", "NTU_view_TS-LSTM.ckpt")
                    saver.save(session, checkpoint_path, global_step=i)

                if i % 10 == 0:
                    end_loop = time.time()
                    strtime = strftime("%Y%m%d_%H:%M:%S", localtime())
                    print(strtime)
                    print("----------Epoch Time: %.3f, per Assign: %.3f, per Epoch: %.3f" % (
                        (end_loop - stt_loop), (stt_epoch - stt_lr), (end_epoch - stt_epoch)))
                    print(
                        "Epoch: %d Learning rate: %.6f Train Accuracy1: %.4f" % (i, session.run(m.lr), train_accuracy))
                    print("Train Cost1: %.6f" % (train_cost))
                    print("Epoch: %d Learning rate: %.6f Train Accuracy2: %.4f" % (
                    i, session.run(m2.lr), train_accuracy2))
                    print("Train Cost2: %.6f" % (train_cost2))
                    print(
                        "Epoch: %d Learning rate: %.6f Train Accuracy3: %.4f" % (
                        i, session.run(m3.lr), train_accuracy3))
                    print("Train Cost3: %.6f" % (train_cost3))
                    print(
                        "Epoch: %d Learning rate: %.6f Train Accuracy4: %.4f" % (
                        i, session.run(m4.lr), train_accuracy4))
                    print("Train Cost4: %.6f" % (train_cost4))
                    print(
                        "Epoch: %d Learning rate: %.6f Train Accuracy5: %.4f" % (
                        i, session.run(m5.lr), train_accuracy5))
                    print("Train Cost5: %.6f" % (train_cost5))
                    print(
                        "Epoch: %d Learning rate: %.6f Train Accuracy6: %.4f" % (
                        i, session.run(m6.lr), train_accuracy6))
                    print("Train Cost6: %.6f" % (train_cost6))
                    print(
                        "Epoch: %d Learning rate: %.6f Train Accuracy7: %.4f" % (
                        i, session.run(m7.lr), train_accuracy7))
                    print("Train Cost7: %.6f" % (train_cost7))
                    print(
                        "Epoch: %d Learning rate: %.6f Train Accuracy8: %.4f" % (
                        i, session.run(m8.lr), train_accuracy8))
                    print("Train Cost8: %.6f" % (train_cost8))
                    print(
                        "Epoch: %d Learning rate: %.6f Train Accuracy9: %.4f" % (
                        i, session.run(m9.lr), train_accuracy9))
                    print("Train Cost9: %.6f" % (train_cost9))
                    print(
                        "Epoch: %d Learning rate: %.6f Train Accuracy10: %.4f" % (
                        i, session.run(m10.lr), train_accuracy10))
                    print("Train Cost10: %.6f" % (train_cost10))
                    stt_loop = time.time()
                    print("\n")

                    csvWriter.writerow([strtime, i, (end_epoch - stt_epoch), train_accuracy, train_cost,
                                        train_accuracy2, train_cost2, train_accuracy3, train_cost3, train_accuracy4,
                                        train_cost4,
                                        train_accuracy5, train_cost5, train_accuracy6, train_cost6, train_accuracy7,
                                        train_cost7,
                                        train_accuracy8, train_cost8, train_accuracy9, train_cost9, train_accuracy10,
                                        train_cost10])

                if i % 100 == 0:
                    test_cost, test_accuracy, te_p_l, te_g_l = run_epoch(session,
                                                                         mtest,
                                                                         feature_test,
                                                                         AS_test_label,
                                                                         tf.no_op(), is_training=False)
                    test_cost2, test_accuracy2, te_p_l2, te_g_l2 = run_epoch(session,
                                                                             mtest2,
                                                                             feature_test,
                                                                             AS_test_label,
                                                                             tf.no_op(), is_training=False)
                    test_cost3, test_accuracy3, te_p_l3, te_g_l3 = run_epoch(session,
                                                                             mtest3,
                                                                             feature_test,
                                                                             AS_test_label,
                                                                             tf.no_op(), is_training=False)
                    test_cost4, test_accuracy4, te_p_l4, te_g_l4 = run_epoch(session,
                                                                             mtest4,
                                                                             feature_test,
                                                                             AS_test_label,
                                                                             tf.no_op(), is_training=False)
                    test_cost5, test_accuracy5, te_p_l5, te_g_l5 = run_epoch(session,
                                                                             mtest5,
                                                                             feature_test,
                                                                             AS_test_label,
                                                                             tf.no_op(), is_training=False)
                    test_cost6, test_accuracy6, te_p_l6, te_g_l6 = run_epoch(session,
                                                                             mtest6,
                                                                             feature_test,
                                                                             AS_test_label,
                                                                             tf.no_op(), is_training=False)
                    test_cost7, test_accuracy7, te_p_l7, te_g_l7 = run_epoch(session,
                                                                             mtest7,
                                                                             feature_test,
                                                                             AS_test_label,
                                                                             tf.no_op(), is_training=False)
                    test_cost8, test_accuracy8, te_p_l8, te_g_l8 = run_epoch(session,
                                                                             mtest8,
                                                                             feature_test,
                                                                             AS_test_label,
                                                                             tf.no_op(), is_training=False)
                    test_cost9, test_accuracy9, te_p_l9, te_g_l9 = run_epoch(session,
                                                                             mtest9,
                                                                             feature_test,
                                                                             AS_test_label,
                                                                             tf.no_op(), is_training=False)
                    test_cost10, test_accuracy10, te_p_l10, te_g_l10 = run_epoch(session,
                                                                                 mtest10,
                                                                                 feature_test,
                                                                                 AS_test_label,
                                                                                 tf.no_op(), is_training=False)
                    print("Test Accuracy1: %.5f" % (test_accuracy))
                    print("Test Accuracy2: %.5f" % (test_accuracy2))
                    print("Test Accuracy3: %.5f" % (test_accuracy3))
                    print("Test Accuracy4: %.5f" % (test_accuracy4))
                    print("Test Accuracy5: %.5f" % (test_accuracy5))
                    print("Test Accuracy6: %.5f" % (test_accuracy6))
                    print("Test Accuracy7: %.5f" % (test_accuracy7))
                    print("Test Accuracy8: %.5f" % (test_accuracy8))
                    print("Test Accuracy9: %.5f" % (test_accuracy9))
                    print("Test Accuracy10: %.5f\n" % (test_accuracy10))
                    csvWriter.writerow(
                        ["Test Accuracy :", test_accuracy, test_accuracy2, test_accuracy3, test_accuracy4,
                         test_accuracy5, test_accuracy6, test_accuracy7, test_accuracy8, test_accuracy9,
                         test_accuracy10])

                    confusion_matrix = np.zeros([config.class_size, config.class_size + 1])
                    confusion_matrix2 = np.zeros([config.class_size, config.class_size + 1])
                    confusion_matrix3 = np.zeros([config.class_size, config.class_size + 1])
                    confusion_matrix4 = np.zeros([config.class_size, config.class_size + 1])
                    confusion_matrix5 = np.zeros([config.class_size, config.class_size + 1])
                    confusion_matrix6 = np.zeros([config.class_size, config.class_size + 1])
                    confusion_matrix7 = np.zeros([config.class_size, config.class_size + 1])
                    confusion_matrix8 = np.zeros([config.class_size, config.class_size + 1])
                    confusion_matrix9 = np.zeros([config.class_size, config.class_size + 1])
                    confusion_matrix10 = np.zeros([config.class_size, config.class_size + 1])
                    class_prob = np.zeros([config.class_size])
                    class_prob2 = np.zeros([config.class_size])
                    class_prob3 = np.zeros([config.class_size])
                    class_prob4 = np.zeros([config.class_size])
                    class_prob5 = np.zeros([config.class_size])
                    class_prob6 = np.zeros([config.class_size])
                    class_prob7 = np.zeros([config.class_size])
                    class_prob8 = np.zeros([config.class_size])
                    class_prob9 = np.zeros([config.class_size])
                    class_prob10 = np.zeros([config.class_size])
                    for j in range(len(te_g_l)):
                        confusion_matrix[te_g_l[j]][te_p_l[j]] += 1
                        confusion_matrix2[te_g_l2[j]][te_p_l2[j]] += 1
                        confusion_matrix3[te_g_l3[j]][te_p_l3[j]] += 1
                        confusion_matrix4[te_g_l4[j]][te_p_l4[j]] += 1
                        confusion_matrix5[te_g_l5[j]][te_p_l5[j]] += 1
                        confusion_matrix6[te_g_l6[j]][te_p_l6[j]] += 1
                        confusion_matrix7[te_g_l7[j]][te_p_l7[j]] += 1
                        confusion_matrix8[te_g_l8[j]][te_p_l8[j]] += 1
                        confusion_matrix9[te_g_l9[j]][te_p_l9[j]] += 1
                        confusion_matrix10[te_g_l10[j]][te_p_l10[j]] += 1
                    for j in range(config.class_size):
                        class_prob[j] = confusion_matrix[j][j] / np.sum(confusion_matrix[j][0:config.class_size])
                        class_prob2[j] = confusion_matrix2[j][j] / np.sum(confusion_matrix2[j][0:config.class_size])
                        class_prob3[j] = confusion_matrix3[j][j] / np.sum(confusion_matrix3[j][0:config.class_size])
                        class_prob4[j] = confusion_matrix4[j][j] / np.sum(confusion_matrix4[j][0:config.class_size])
                        class_prob5[j] = confusion_matrix5[j][j] / np.sum(confusion_matrix5[j][0:config.class_size])
                        class_prob6[j] = confusion_matrix6[j][j] / np.sum(confusion_matrix6[j][0:config.class_size])
                        class_prob7[j] = confusion_matrix7[j][j] / np.sum(confusion_matrix7[j][0:config.class_size])
                        class_prob8[j] = confusion_matrix8[j][j] / np.sum(confusion_matrix8[j][0:config.class_size])
                        class_prob9[j] = confusion_matrix9[j][j] / np.sum(confusion_matrix9[j][0:config.class_size])
                        class_prob10[j] = confusion_matrix10[j][j] / np.sum(confusion_matrix10[j][0:config.class_size])

                    # for j in range(config.class_size):
                    #     confusion_matrix[j][config.class_size] = class_prob[j]
                    #     print class_prob[j]*100

                    with open(folder_path + "/view-test-" + str(i) + ".csv", "w") as csvfile:
                        csvwriter2 = csv.writer(csvfile)
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix[j])
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix2[j])
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix3[j])
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix4[j])
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix5[j])
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix6[j])
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix7[j])
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix8[j])
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix9[j])
                        for j in range(config.class_size):
                            csvwriter2.writerow(confusion_matrix10[j])

    f.close()