示例#1
0
 def split_data(self, proportions):
     data_size = self.X.shape[0]
     num_train = int(proportions[0]*data_size)
     num_test = int(proportions[1]*data_size)
     num_valid = data_size - num_train_num_test
     indices = range(data_size)
     np.random_shuffle(indices)
     indices_train = indices[0:num_train]
     indices_valid = indices[num_train:num_train+num_valid]
     indices_test = indices[num_train+num_valid:data_size]
     self.X_train = self.X[indices_train]
     self.X_valid = self.X[indices_valid]
     self.X_test = self.X[indices_test]
     self.y_train = self.y[indices_train]
     self.y_valid = self.y[indices_valid]
     self.y_test = self.y[indices_test]
     self.set_sizes()
示例#2
0
 def split_data(self, proportions):
     data_size = self.X.shape[0]
     num_train = int(proportions[0] * data_size)
     num_test = int(proportions[1] * data_size)
     num_valid = data_size - num_train_num_test
     indices = range(data_size)
     np.random_shuffle(indices)
     indices_train = indices[0:num_train]
     indices_valid = indices[num_train:num_train + num_valid]
     indices_test = indices[num_train + num_valid:data_size]
     self.X_train = self.X[indices_train]
     self.X_valid = self.X[indices_valid]
     self.X_test = self.X[indices_test]
     self.y_train = self.y[indices_train]
     self.y_valid = self.y[indices_valid]
     self.y_test = self.y[indices_test]
     self.set_sizes()
示例#3
0
    def load_labels (self, mode):
    	if mode == 'train':
    		txtname = os.path.join(self.data_path,'train.txt')
    	if mode =='test':
    		txtname  = os.path.join(self.data_path,'.txt')

    	with open(txtname,'r') as f:
    		image_ind = [x.strip() for x in f.readlines()]

    	labels = []
    	for img in image_ind:
    		label, num = self.load_data(ind)
    		if num == 0:
    			continue
    		imagename = os.path.join(self.data_path, 'Images', img + 'jpg')
    		labels.append({'imagename': imagename, 'label' = label})
    	np.random_shuffle(labels)
    	return labels
示例#4
0
def run_LAmbDA(gamma, delta, tau, prc_cut, bs_prc, do_prc, hidden_feats,
               lambda1, lambda2, lambda3):
    print(
        "gamma=%.4f, delta=%.4f, tau=%.4f, prc_cut=%.4f, bs_prc=%.4f, do_prc=%.4f, hidden_feats=%.4f, lambda1= %.4f, lambda2= %.4f, lambda3= %.4f"
        % (gamma, delta, tau, prc_cut, bs_prc, do_prc, hidden_feats, lambda1,
           lambda2, lambda3))
    global X, Y, Gnp, Dnp, train, test, prt
    D = tf.cast(Dnp, tf.float32)
    G = tf.cast(Gnp, tf.float32)
    hidden_feats = math.floor(hidden_feats)
    input_feats = X.shape[1]
    num_labls = G.shape.as_list()
    output_feats = num_labls[1]
    num_labls = num_labls[0]
    rowsums = np.sum(Gnp, axis=1)
    train2 = resample(prc_cut, Y, Gnp, train, gamma)
    bs = int(np.ceil(bs_prc * train2.size))
    xs = tf.placeholder(tf.float32, [None, X.shape[1], X.shape[2]])
    ys = tf.placeholder(tf.float32, [None, num_labls])
    layer1 = add_layer(xs,
                       input_feats,
                       hidden_feats,
                       activation_function=tf.sigmoid,
                       dropout_function=True,
                       rnn_layer=True,
                       lambda1=lambda1,
                       keep_prob1=do_prc)
    predict = add_layer(layer1,
                        hidden_feats,
                        output_feats,
                        activation_function=tf.nn.softmax,
                        dropout_function=False,
                        lambda1=lambda1)
    Cm = tf.matmul(tf.transpose(tf.matmul(ys, D)), predict + 0.1) / tf.reshape(
        tf.reduce_sum(tf.transpose(tf.matmul(ys, D)), 1), (-1, 1))
    mCm = tf.reshape(
        tf.reduce_mean(
            tf.cast(tf.matmul(tf.transpose(D), G) > 0, tf.float32) * Cm, 1),
        (-1, 1))
    yw = tf.multiply(predict + 0.1,
                     tf.matmul(tf.matmul(ys, D), tf.pow(mCm / Cm, tau)))
    ye = tf.multiply(tf.matmul(ys, G), yw)
    yt = tf.matmul(ys, tf.matmul(tf.transpose(ys), ye))
    ya = (delta * yt) + ((1 - delta) * ye)
    yn = tf.one_hot(tf.argmax(ya, axis=1), output_feats)
    Ct = tf.transpose(tf.matmul(tf.transpose(layer1), ys)) / tf.reshape(
        tf.reduce_sum(tf.transpose(ys + 0.01), 1), (-1, 1))
    E = tf.multiply(
        tf.matmul(tf.reshape(tf.reduce_sum(tf.square(Ct), 1),
                             (-1, 1)), tf.ones([1, num_labls])) +
        tf.matmul(tf.ones([num_labls, 1]),
                  tf.reshape(tf.reduce_sum(tf.square(Ct), 1), (1, -1))) -
        tf.multiply(tf.cast(2, tf.float32), tf.matmul(Ct, tf.transpose(Ct))),
        tf.ones([num_labls, num_labls]) - tf.eye(tf.cast(num_labls, tf.int32)))
    M1 = (tf.cast(tf.matmul(G, tf.transpose(G)) > 0, tf.float32) *
          (tf.ones([num_labls, num_labls]) -
           tf.cast(tf.matmul(D, tf.transpose(D)) > 0, tf.float32))) / (
               tf.matrix_band_part(
                   tf.matmul(tf.reshape(tf.reduce_sum(G, 1), (-1, 1)),
                             tf.reshape(tf.ones([1, num_labls]),
                                        (1, -1))), -1, 0) +
               tf.transpose(
                   tf.matrix_band_part(
                       tf.matmul(tf.reshape(tf.reduce_sum(G, 1), (-1, 1)),
                                 tf.reshape(tf.ones([1, num_labls]),
                                            (1, -1))), -1, 0)))
    M2 = tf.cast(tf.matmul(D, tf.transpose(D)) > 0, tf.float32) / (
        tf.matrix_band_part(
            tf.matmul(
                tf.reshape(
                    tf.reduce_sum(
                        tf.cast(tf.matmul(D, tf.transpose(D)) > 0, tf.float32),
                        1),
                    (-1, 1)), tf.reshape(tf.ones([1, num_labls]),
                                         (1, -1))), -1, 0) +
        tf.transpose(
            tf.matrix_band_part(
                tf.matmul(
                    tf.reshape(
                        tf.reduce_sum(
                            tf.cast(
                                tf.matmul(D, tf.transpose(D)) > 0, tf.float32),
                            1),
                        (-1, 1)), tf.reshape(tf.ones([1, num_labls]),
                                             (1, -1))), -1, 0)))
    #***********************************************************************
    # Cost function with new label matrix
    iter = 2000
    # Initialize to unambiguous labels
    G2 = np.copy(Gnp)
    G2[rowsums > 1, :] = 0
    YI = np.matmul(Y, G2)
    YIrs = np.sum(YI, axis=1)
    G2 = tf.cast(G2, tf.float32)
    yi = tf.matmul(ys, G2)
    trainI = train2[np.in1d(train2, np.where(YIrs == 1))]
    testI = test[np.in1d(test, np.where(YIrs == 1))]
    lossI = tf.reduce_mean(
        tf.reduce_sum(tf.square(yi - predict), reduction_indices=[1]))
    train_stepI = tf.train.AdamOptimizer(learning_rate=0.01,
                                         epsilon=1e-8).minimize(lossI)
    # LAmbDA optimization
    loss1 = tf.reduce_mean(
        tf.reduce_sum(tf.square(yn - predict), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(
        learning_rate=0.01,
        epsilon=1e-8).minimize(loss1 + lambda2 * tf.reduce_mean(E * M1) -
                               lambda3 * tf.reduce_mean(E * M2))
    #train_step = tf.train.AdamOptimizer(learning_rate=0.01,epsilon=1e-8).minimize(loss1+(lambda2*(tf.reduce_sum(E*M1)/tf.cast(tf.count_nonzero(M1>0),tf.float32)))-(lambda3*(tf.reduce_sum(E*M2)/tf.cast(tf.count_nonzero(M2>0),tf.float32))))
    init = tf.global_variables_initializer()
    trainI.astype(int)
    tensor_trainI = {xs: X[trainI, :, :], ys: Y[trainI, :]}
    tensor_testI = {xs: X[testI, :, :], ys: Y[testI, :]}
    tensor_train = {xs: X[train2[0:bs], :, :], ys: Y[train2[0:bs], :]}
    tensor_test = {xs: X[test, :, :], ys: Y[test, :]}
    # run training process
    sess = tf.Session()
    sess.run(init)
    for i in range(iter + 1):
        if i <= 1000:
            sess.run(train_stepI, feed_dict=tensor_trainI)
            if i % 10 == 0:
                print(
                    str(sess.run(lossI, feed_dict=tensor_trainI)) + ' ' +
                    str(sess.run(lossI, feed_dict=tensor_testI)))
        else:
            sess.run(train_step, feed_dict=tensor_train)
            if i % 10 == 0:
                print(
                    str(sess.run(loss1, feed_dict=tensor_train)) + ' ' +
                    str(sess.run(loss1, feed_dict=tensor_test)) + ' ' +
                    str(lambda2 * sess.run(tf.reduce_mean(E * M1),
                                           feed_dict=tensor_train)) + ' ' +
                    str(lambda3 * sess.run(tf.reduce_mean(E * M2),
                                           feed_dict=tensor_train)))
            elif i % 50 == 0:
                np.random_shuffle(train2)
                tensor_train = {
                    xs: X[train2[0:bs], :, :],
                    ys: Y[train2[0:bs], :]
                }
    if prt:
        blah = sess.run(predict, feed_dict=tensor_test)
        blah2 = sess.run(layer1, feed_dict=tensor_test)
        sio.savemat('preds_cv' + str(cv) + '.mat', {'preds': blah})
        sio.savemat('truth_cv' + str(cv) + '.mat', {'labels': Y[test, :]})
        sio.savemat('hidden_cv' + str(cv) + '.mat', {'hidden': blah2})
    print(
        "loss1=%.4f, gamma=%.4f, delta=%.4f, tau=%.4f, prc_cut=%.4f, bs_prc=%.4f, do_prc=%.4f, hidden_feats=%.4f, lambda1= %.4f, lambda2= %.4f, lambda3= %.4f"
        % (sess.run(loss1, feed_dict=tensor_test), gamma, delta, tau, prc_cut,
           bs_prc, do_prc, hidden_feats, lambda1, lambda2, lambda3))
    acc = sess.run(loss1, feed_dict=tensor_test)
    tf.reset_default_graph()
    return (acc)
def run_LAmbDA2(gamma, delta, tau, prc_cut, bs_prc, num_trees, max_nodes):
    global X, Y, Gnp, Dnp, train, test, prt, cv
    D = tf.cast(Dnp, tf.float32)
    G = tf.cast(Gnp, tf.float32)
    #optunity_it = optunity_it+1;
    num_trees = int(num_trees)
    max_nodes = int(max_nodes)
    prc_cut = int(np.ceil(prc_cut))
    print(
        "gamma=%.4f, delta=%.4f, tau=%.4f, prc_cut=%i, bs_prc=%.4f, num_trees=%i, max_nodes=%i"
        % (gamma, delta, tau, prc_cut, bs_prc, num_trees, max_nodes))
    input_feats = X.shape[1]
    num_labls = G.shape.as_list()
    output_feats = num_labls[1]
    #print(output_feats)
    num_labls = num_labls[0]
    rowsums = np.sum(Gnp, axis=1)
    train2 = resample(prc_cut, Y, Gnp, train, gamma)
    # Bug??
    bs = int(np.ceil(bs_prc * train2.size))
    xs = tf.placeholder(tf.float32, [None, input_feats])
    #ys = tf.placeholder(tf.float32, [None,num_labls])
    yin = tf.placeholder(tf.int32, [None])
    print("Vars loaded xs and ys created")
    hparams = tensor_forest.ForestHParams(num_classes=output_feats,
                                          num_features=input_feats,
                                          num_trees=num_trees,
                                          max_nodes=max_nodes).fill()
    print("Tensor forest hparams created")
    forest_graph = tensor_forest.RandomForestGraphs(hparams)
    print("Tensor forest graph created")
    train_op = forest_graph.training_graph(xs, yin)
    loss_op = forest_graph.training_loss(xs, yin)
    print("Loss and train ops created")
    predict, _, _ = forest_graph.inference_graph(xs)
    print("Tensor forest variables created through predict")
    accuracy_op = tf.reduce_mean(
        tf.reduce_sum(tf.square(tf.one_hot(yin, output_feats) - predict),
                      reduction_indices=[1]))
    print(
        tf.reduce_sum(tf.square(tf.one_hot(yin, output_feats) - predict),
                      reduction_indices=[1]))
    #predict = tf.one_hot(pred);
    print("Lambda specific variables created")
    # Creating training and testing steps
    G2 = np.copy(Gnp)
    G2[rowsums > 1, :] = 0
    YI = np.matmul(Y, G2)
    YIrs = np.sum(YI, axis=1)
    trainI = train2[np.in1d(train2, np.where(YIrs == 1))]
    print("data type trainI,", trainI.dtype)
    testI = test[np.in1d(test, np.where(YIrs == 1))]
    print("trainI testI created")
    #init_vars=tf.global_variables_initializer()
    init_vars = tf.group(
        tf.global_variables_initializer(),
        resources.initialize_resources(resources.shared_resources()))
    sess = tf.Session()
    sess.run(init_vars)
    print("Session started")
    #beep = sess.run(predict,feed_dict={xs:X[1:100,:]});
    #beep = sess.run(predict,feed_dict={xs:X[train2[0:bs],:]});
    tensor_trainI = {
        xs: X[trainI, :],
        yin: sess.run(tf.argmax(get_yi(rowsums, G2, Y[trainI, :]), axis=1))
    }
    print("tensor_trainI made")
    tensor_testI = {
        xs: X[testI, :],
        yin: sess.run(tf.argmax(get_yi(rowsums, G2, Y[testI, :]), axis=1))
    }
    print("tensor_testI made")
    tensor_train = {
        xs:
        X[train2[0:bs], :],
        yin:
        sess.run(
            tf.argmax(get_yn(
                sess.run(predict, feed_dict={xs: X[train2[0:bs], :]}),
                Y[train2[0:bs], :], delta, tau, output_feats),
                      axis=1))
    }
    print("tensor_train made")
    tensor_test = {
        xs:
        X[test, :],
        yin:
        sess.run(
            tf.argmax(get_yn(sess.run(predict, feed_dict={xs: X[test, :]}),
                             Y[test, :], delta, tau, output_feats),
                      axis=1))
    }
    print("tensor_test made")
    #**********************************
    #print("Loss and training steps created with sample tensors")
    # Setting params and initializing
    print("Beginning iterations")
    # Starting training iterations
    print(X.shape)
    for i in range(1, 101):
        if i < 50:
            sess.run(train_op, feed_dict=tensor_trainI)
            #print("ran train op")
            if i % 10 == 0:
                print(
                    str(sess.run(accuracy_op, feed_dict=tensor_trainI)) + ' ' +
                    str(sess.run(accuracy_op, feed_dict=tensor_testI)))
        else:
            sess.run(train_op, feed_dict=tensor_train)
            if i % 10 == 0:
                print(
                    str(sess.run(accuracy_op, feed_dict=tensor_train)) + ' ' +
                    str(sess.run(accuracy_op, feed_dict=tensor_test)))
            elif i % 10 == 0:
                np.random_shuffle(train2)
                tensor_train = {
                    xs:
                    X[train2[0:bs], :],
                    yin:
                    sess.run(
                        get_yn(
                            sess.run(predict,
                                     feed_dict={xs: X[train2[0:bs], :]}),
                            Y[train2[0:bs], :], delta, tau, output_feats))
                }
    if prt:
        blah = sess.run(predict, feed_dict=tensor_test)
        sio.savemat('preds_cv' + str(cv) + '.mat', {'preds': blah})
        sio.savemat('truth_cv' + str(cv) + '.mat', {'labels': Y[test, :]})
    acc = sess.run(accuracy_op, feed_dict=tensor_test)
    print(
        "loss1=%.4f, gamma=%.4f, delta=%.4f, tau=%.4f, prc_cut=%i, bs_prc=%.4f, num_trees=%i, max_nodes=%i"
        % (acc, gamma, delta, tau, prc_cut, bs_prc, num_trees, max_nodes))
    tf.reset_default_graph()
    return (acc)
示例#6
0
def run_LAmbDA(gamma, delta, tau, prc_cut, bs_prc, do_prc, hidden_feats,
               lambda1, lambda2, lambda3):
    print(
        "gamma=%.4f, delta=%.4f, tau=%.4f, prc_cut=%.4f, bs_prc=%.4f, do_prc=%.4f, hidden_feats=%.4f, lambda1= %.4f, lambda2= %.4f, lambda3= %.4f"
        % (gamma, delta, tau, prc_cut, bs_prc, do_prc, hidden_feats, lambda1,
           lambda2, lambda3))
    global X, Y, Gnp, Dnp, train, test, prt
    D = tf.cast(Dnp, tf.float32)
    G = tf.cast(Gnp, tf.float32)
    hidden_feats = int(math.floor(hidden_feats))
    input_feats = X.shape[1]
    num_labls = G.shape.as_list()
    output_feats = num_labls[1]
    num_labls = num_labls[0]
    rowsums = np.sum(Gnp, axis=1)
    #print(G.shape)
    train2 = resample(prc_cut, Y, Gnp, train, gamma)
    bs = int(np.ceil(bs_prc * train2.size))
    xs = tf.placeholder(tf.float32, [None, input_feats])
    ys = tf.placeholder(tf.float32, [None, num_labls])
    #print(input_feats)													# Testing
    #print(hidden_feats)													# Testing
    layer1_1 = add_layer(xs,
                         input_feats,
                         hidden_feats,
                         activation_function=tf.sigmoid,
                         dropout_function=True,
                         lambda1=lambda1,
                         keep_prob1=do_prc)
    predict_1 = add_layer(layer1_1,
                          hidden_feats,
                          output_feats,
                          activation_function=tf.nn.softmax,
                          dropout_function=False,
                          lambda1=lambda1)
    layer1_2 = add_layer(xs,
                         input_feats,
                         hidden_feats,
                         activation_function=tf.sigmoid,
                         dropout_function=True,
                         lambda1=lambda1,
                         keep_prob1=do_prc)
    predict_2 = add_layer(layer1_2,
                          hidden_feats,
                          output_feats,
                          activation_function=tf.nn.softmax,
                          dropout_function=False,
                          lambda1=lambda1)
    layer1_3 = add_layer(xs,
                         input_feats,
                         hidden_feats,
                         activation_function=tf.sigmoid,
                         dropout_function=True,
                         lambda1=lambda1,
                         keep_prob1=do_prc)
    predict_3 = add_layer(layer1_3,
                          hidden_feats,
                          output_feats,
                          activation_function=tf.nn.softmax,
                          dropout_function=False,
                          lambda1=lambda1)
    layer1_4 = add_layer(xs,
                         input_feats,
                         hidden_feats,
                         activation_function=tf.sigmoid,
                         dropout_function=True,
                         lambda1=lambda1,
                         keep_prob1=do_prc)
    predict_4 = add_layer(layer1_4,
                          hidden_feats,
                          output_feats,
                          activation_function=tf.nn.softmax,
                          dropout_function=False,
                          lambda1=lambda1)
    layer1_5 = add_layer(xs,
                         input_feats,
                         hidden_feats,
                         activation_function=tf.sigmoid,
                         dropout_function=True,
                         lambda1=lambda1,
                         keep_prob1=do_prc)
    predict_5 = add_layer(layer1_5,
                          hidden_feats,
                          output_feats,
                          activation_function=tf.nn.softmax,
                          dropout_function=False,
                          lambda1=lambda1)
    yn_1 = get_yn(predict_1, ys, delta, tau, output_feats)
    yn_2 = get_yn(predict_2, ys, delta, tau, output_feats)
    yn_3 = get_yn(predict_3, ys, delta, tau, output_feats)
    yn_4 = get_yn(predict_4, ys, delta, tau, output_feats)
    yn_5 = get_yn(predict_5, ys, delta, tau, output_feats)
    E_1 = get_Eucl(layer1_1, ys, num_labls)
    E_2 = get_Eucl(layer1_2, ys, num_labls)
    E_3 = get_Eucl(layer1_3, ys, num_labls)
    E_4 = get_Eucl(layer1_4, ys, num_labls)
    E_5 = get_Eucl(layer1_5, ys, num_labls)
    M1 = (tf.cast(tf.matmul(G, tf.transpose(G)) > 0, tf.float32) *
          (tf.ones([num_labls, num_labls]) -
           tf.cast(tf.matmul(D, tf.transpose(D)) > 0, tf.float32))) / (
               tf.matrix_band_part(
                   tf.matmul(tf.reshape(tf.reduce_sum(G, 1), (-1, 1)),
                             tf.reshape(tf.ones([1, num_labls]),
                                        (1, -1))), -1, 0) +
               tf.transpose(
                   tf.matrix_band_part(
                       tf.matmul(tf.reshape(tf.reduce_sum(G, 1), (-1, 1)),
                                 tf.reshape(tf.ones([1, num_labls]),
                                            (1, -1))), -1, 0)))
    M2 = tf.cast(tf.matmul(D, tf.transpose(D)) > 0, tf.float32) / (
        tf.matrix_band_part(
            tf.matmul(
                tf.reshape(
                    tf.reduce_sum(
                        tf.cast(tf.matmul(D, tf.transpose(D)) > 0, tf.float32),
                        1),
                    (-1, 1)), tf.reshape(tf.ones([1, num_labls]),
                                         (1, -1))), -1, 0) +
        tf.transpose(
            tf.matrix_band_part(
                tf.matmul(
                    tf.reshape(
                        tf.reduce_sum(
                            tf.cast(
                                tf.matmul(D, tf.transpose(D)) > 0, tf.float32),
                            1),
                        (-1, 1)), tf.reshape(tf.ones([1, num_labls]),
                                             (1, -1))), -1, 0)))
    #***********************************************************************
    # Cost function with new label matrix
    iter = 2000
    # Initialize to unambiguous labels
    G2 = np.copy(Gnp)
    G2[rowsums > 1, :] = 0
    YI = np.matmul(Y, G2)
    #print(YI)
    YIrs = np.sum(YI, axis=1)
    print(YIrs.shape)
    G2 = tf.cast(G2, tf.float32)
    yi = tf.matmul(ys, G2)
    trainI = train2[np.in1d(train2, np.where(YIrs == 1))]
    testI = test[np.in1d(test, np.where(YIrs == 1))]
    lossI_1 = tf.reduce_mean(
        tf.reduce_sum(tf.square(yi - predict_1), reduction_indices=[1]))
    lossI_2 = tf.reduce_mean(
        tf.reduce_sum(tf.square(yi - predict_2), reduction_indices=[1]))
    lossI_3 = tf.reduce_mean(
        tf.reduce_sum(tf.square(yi - predict_3), reduction_indices=[1]))
    lossI_4 = tf.reduce_mean(
        tf.reduce_sum(tf.square(yi - predict_4), reduction_indices=[1]))
    lossI_5 = tf.reduce_mean(
        tf.reduce_sum(tf.square(yi - predict_5), reduction_indices=[1]))
    lossI_All = tf.reduce_mean(
        tf.reduce_sum(tf.square(yi - (
            (predict_1 + predict_2 + predict_3 + predict_4 + predict_5) / 5)),
                      reduction_indices=[1]))
    train_stepI_1 = tf.train.AdamOptimizer(learning_rate=0.01,
                                           epsilon=1e-8).minimize(lossI_1)
    train_stepI_2 = tf.train.AdamOptimizer(learning_rate=0.01,
                                           epsilon=1e-8).minimize(lossI_2)
    train_stepI_3 = tf.train.AdamOptimizer(learning_rate=0.01,
                                           epsilon=1e-8).minimize(lossI_3)
    train_stepI_4 = tf.train.AdamOptimizer(learning_rate=0.01,
                                           epsilon=1e-8).minimize(lossI_4)
    train_stepI_5 = tf.train.AdamOptimizer(learning_rate=0.01,
                                           epsilon=1e-8).minimize(lossI_5)
    # LAmbDA optimization
    loss1_1 = tf.reduce_mean(
        tf.reduce_sum(tf.square(yn_1 - predict_1), reduction_indices=[1]))
    loss1_2 = tf.reduce_mean(
        tf.reduce_sum(tf.square(yn_2 - predict_2), reduction_indices=[1]))
    loss1_3 = tf.reduce_mean(
        tf.reduce_sum(tf.square(yn_3 - predict_3), reduction_indices=[1]))
    loss1_4 = tf.reduce_mean(
        tf.reduce_sum(tf.square(yn_4 - predict_4), reduction_indices=[1]))
    loss1_5 = tf.reduce_mean(
        tf.reduce_sum(tf.square(yn_5 - predict_5), reduction_indices=[1]))
    loss1_All = tf.reduce_mean(
        tf.reduce_sum(tf.square(((yn_1 + yn_2 + yn_3 + yn_4 + yn_5) / 5) - (
            (predict_1 + predict_2 + predict_3 + predict_4 + predict_5) / 5)),
                      reduction_indices=[1]))
    train_step_1 = tf.train.AdamOptimizer(
        learning_rate=0.01,
        epsilon=1e-8).minimize(loss1_1 + lambda2 * tf.reduce_mean(E_1 * M1) -
                               lambda3 * tf.reduce_mean(E_1 * M2))
    train_step_2 = tf.train.AdamOptimizer(
        learning_rate=0.01,
        epsilon=1e-8).minimize(loss1_2 + lambda2 * tf.reduce_mean(E_2 * M1) -
                               lambda3 * tf.reduce_mean(E_2 * M2))
    train_step_3 = tf.train.AdamOptimizer(
        learning_rate=0.01,
        epsilon=1e-8).minimize(loss1_3 + lambda2 * tf.reduce_mean(E_3 * M1) -
                               lambda3 * tf.reduce_mean(E_3 * M2))
    train_step_4 = tf.train.AdamOptimizer(
        learning_rate=0.01,
        epsilon=1e-8).minimize(loss1_4 + lambda2 * tf.reduce_mean(E_4 * M1) -
                               lambda3 * tf.reduce_mean(E_4 * M2))
    train_step_5 = tf.train.AdamOptimizer(
        learning_rate=0.01,
        epsilon=1e-8).minimize(loss1_5 + lambda2 * tf.reduce_mean(E_5 * M1) -
                               lambda3 * tf.reduce_mean(E_5 * M2))
    train_step_All = tf.train.AdamOptimizer(
        learning_rate=0.01, epsilon=1e-8
    ).minimize(loss1_All + lambda2 * tf.reduce_mean(
        ((E_1 + E_2 + E_3 + E_4 + E_5) / 5) * M1) - lambda3 * tf.reduce_mean(
            ((E_1 + E_2 + E_3 + E_4 + E_5) / 5) * M2))  #******Testing********
    #train_step = tf.train.AdamOptimizer(learning_rate=0.01,epsilon=1e-8).minimize(loss1+(lambda2*(tf.reduce_sum(E*M1)/tf.cast(tf.count_nonzero(M1>0),tf.float32)))-(lambda3*(tf.reduce_sum(E*M2)/tf.cast(tf.count_nonzero(M2>0),tf.float32))))
    init = tf.global_variables_initializer()
    trainI.astype(int)
    tensor_trainI = {xs: X[trainI, :], ys: Y[trainI, :]}
    tensor_testI = {xs: X[testI, :], ys: Y[testI, :]}
    tensor_train = {xs: X[train2[0:bs], :], ys: Y[train2[0:bs], :]}
    tensor_test = {xs: X[test, :], ys: Y[test, :]}
    # run training process
    sess = tf.Session()
    sess.run(init)
    for i in range(iter + 1):
        if i <= 1000:  # changed
            sess.run(train_stepI_1, feed_dict=tensor_trainI)
            sess.run(train_stepI_2, feed_dict=tensor_trainI)
            sess.run(train_stepI_3, feed_dict=tensor_trainI)
            sess.run(train_stepI_4, feed_dict=tensor_trainI)
            sess.run(train_stepI_5, feed_dict=tensor_trainI)
            if i % 10 == 0:
                print(
                    str(sess.run(lossI_All, feed_dict=tensor_trainI)) + ' ' +
                    str(sess.run(lossI_All, feed_dict=tensor_testI)))
        else:
            sess.run(train_step_1, feed_dict=tensor_train)
            sess.run(train_step_2, feed_dict=tensor_train)
            sess.run(train_step_3, feed_dict=tensor_train)
            sess.run(train_step_4, feed_dict=tensor_train)
            sess.run(train_step_5, feed_dict=tensor_train)
            #sess.run(train_step_All, feed_dict=tensor_train)		#************Testing********************
            if i % 10 == 0:
                print(
                    str(sess.run(loss1_All, feed_dict=tensor_train)) + ' ' +
                    str(sess.run(loss1_All, feed_dict=tensor_test)) + ' ' +
                    str(lambda2 * sess.run(tf.reduce_mean((
                        (E_1 + E_2 + E_3 + E_4 + E_5) / 5) * M1),
                                           feed_dict=tensor_train)) + ' ' +
                    str(lambda3 * sess.run(tf.reduce_mean((
                        (E_1 + E_2 + E_3 + E_4 + E_5) / 5) * M2),
                                           feed_dict=tensor_train)))
            elif i % 50 == 0:
                np.random_shuffle(train2)
                tensor_train = {xs: X[train2[0:bs], :], ys: Y[train2[0:bs], :]}
    if prt:
        blah = sess.run(
            (predict_1 + predict_2 + predict_3 + predict_4 + predict_5) / 5,
            feed_dict=tensor_test)
        blah2_1 = sess.run(layer1_1, feed_dict=tensor_test)
        blah2_2 = sess.run(layer1_1, feed_dict=tensor_test)
        blah2_3 = sess.run(layer1_1, feed_dict=tensor_test)
        blah2_4 = sess.run(layer1_1, feed_dict=tensor_test)
        blah2_5 = sess.run(layer1_1, feed_dict=tensor_test)
        sio.savemat('preds_cv' + str(cv) + '.mat', {'preds': blah})
        sio.savemat('truth_cv' + str(cv) + '.mat', {'labels': Y[test, :]})
        sio.savemat(
            'hidden_cv' + str(cv) + '.mat', {
                'hidden1': blah2_1,
                'hidden2': blah2_2,
                'hidden3': blah2_3,
                'hidden4': blah2_4,
                'hidden5': blah2_5
            })
    print(
        "loss1_All=%.4f, gamma=%.4f, delta=%.4f, tau=%.4f, prc_cut=%.4f, bs_prc=%.4f, do_prc=%.4f, hidden_feats=%.4f, lambda1= %.4f, lambda2= %.4f, lambda3= %.4f"
        % (sess.run(loss1_All, feed_dict=tensor_test), gamma, delta, tau,
           prc_cut, bs_prc, do_prc, hidden_feats, lambda1, lambda2, lambda3))
    acc = sess.run(loss1_All, feed_dict=tensor_test)
    tf.reset_default_graph()
    return (acc)