def model_architecture(para):
    # Description: build model architecture (build data flow graphs)
    # Input: global parameter instance
    # Return: Placeholder Dictionary
    inputPC = tf.placeholder(tf.float32, [None, para.pointNumber, 3])
    inputGraph = tf.placeholder(tf.float32, [None, para.pointNumber * para.pointNumber])
    l2Graph = tf.placeholder(tf.float32, [None, para.clusterNumberL1 * para.clusterNumberL1])
    outputLabel = tf.placeholder(tf.float32, [None, para.outputClassN])
    batch_size = tf.placeholder(tf.int32)

    batch_index_l1 = tf.placeholder(tf.int32, [None, para.clusterNumberL1 * para.nearestNeighborL1])
    # batch_index_l2 = tf.placeholder(tf.int32, [None, para.clusterNumberL2 * para.nearestNeighborL2])

    scaledLaplacian = tf.reshape(inputGraph, [-1, para.pointNumber, para.pointNumber])
    l2_scaledLaplacian = tf.reshape(l2Graph, [-1, para.clusterNumberL1, para.clusterNumberL1])

    weights = tf.placeholder(tf.float32, [None])
    lr = tf.placeholder(tf.float32)
    keep_prob_1 = tf.placeholder(tf.float32)
    keep_prob_2 = tf.placeholder(tf.float32)

    # gcn layer 1
    gcn_1 = gcnLayer(inputPC, scaledLaplacian, pointNumber=para.pointNumber, inputFeatureN=3,
                     outputFeatureN=para.gcn_1_filter_n,
                     chebyshev_order=para.chebyshev_1_Order)
    gcn_1_output = tf.nn.dropout(gcn_1, keep_prob=keep_prob_1)
    gcn_1_pooling = graph_cluster_maxpooling(batch_index_l1, gcn_1_output, batch_size=batch_size,
                                             M=para.clusterNumberL1, k=para.nearestNeighborL1, n=para.gcn_1_filter_n)
    
    globalFeatures_1 = tf.reduce_max(gcn_1_pooling, axis=1)
    print gcn_1_pooling

    gcn_2 = gcnLayer(gcn_1_pooling, l2_scaledLaplacian, pointNumber=para.clusterNumberL1,
                     inputFeatureN=para.gcn_1_filter_n,
                     outputFeatureN=para.gcn_2_filter_n, chebyshev_order=para.chebyshev_1_Order)

    gcn_2_output = tf.nn.dropout(gcn_2, keep_prob=keep_prob_1)
    # gcn_2_pooling = graph_cluster_maxpooling(batch_index_l2, gcn_2_output, batch_size=batch_size,
    # M=para.clusterNumberL2, k=para.nearestNeighborL2, n=para.gcn_2_filter_n)
    gcn_2_pooling = gcn_2_output
    print gcn_2_pooling

    globalFeatures = tf.reduce_max(gcn_2_pooling, axis=1)
    print globalFeatures


    globalFeatures = tf.nn.dropout(globalFeatures, keep_prob=keep_prob_2)
    print("The global feature is {}".format(globalFeatures))

    #final_concat_features = tf.concat([globalFeatures_1, globalFeatures], axis=1)
    #final_concat_features = globalFeatures
    globalFeatureN = para.gcn_2_filter_n*1

    # fully connected layer 1
    fc_layer_1 = fullyConnected(globalFeatures, inputFeatureN=globalFeatureN, outputFeatureN=para.fc_1_n)
    fc_layer_1 = tf.nn.relu(fc_layer_1)
    fc_layer_1 = tf.nn.dropout(fc_layer_1, keep_prob = keep_prob_2)
    print("The output of the first fc layer is {}".format(fc_layer_1))

    # fully connected layer 2
    fc_layer_2 = fullyConnected(fc_layer_1, inputFeatureN=para.fc_1_n, outputFeatureN=para.outputClassN)
    print("The output of the second fc layer is {}".format(fc_layer_2))

    # =================================Define loss===========================
    predictSoftMax = tf.nn.softmax(fc_layer_2)
    predictLabels = tf.argmax(predictSoftMax, axis=1)
    loss = tf.nn.softmax_cross_entropy_with_logits(logits=fc_layer_2, labels=outputLabel)
    loss = tf.multiply(loss, weights)
    loss = tf.reduce_mean(loss)

    vars = tf.trainable_variables()
    loss_reg = tf.add_n([tf.nn.l2_loss(v) for v in vars if 'bias' not in v.name]) * 8e-6  # best: 8 #last: 10
    loss_total = loss + loss_reg

    correct_prediction = tf.equal(predictLabels, tf.argmax(outputLabel, axis=1))
    acc = tf.cast(correct_prediction, tf.float32)
    acc = tf.reduce_mean(acc)

    train = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_total)

    total_parameters = 0
    for variable in tf.trainable_variables():
        # shape is an array of tf.Dimension
        shape = variable.get_shape()
        variable_parametes = 1
        for dim in shape:
            variable_parametes *= dim.value
        total_parameters += variable_parametes
    print('Total parameters number is {}'.format(total_parameters))

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    trainOperaion = {'train': train, 'loss': loss, 'acc': acc, 'loss_total': loss_total ,'loss_reg': loss_reg, 'inputPC': inputPC,
                     'inputGraph': inputGraph, 'l2Graph': l2Graph, 'outputLabel': outputLabel, 'weights': weights,
                     'predictLabels': predictLabels, 'batch_index_l1': batch_index_l1,
                     'keep_prob_1': keep_prob_1, 'keep_prob_2': keep_prob_2, 'lr': lr, 'batch_size': batch_size}
    return trainOperaion, sess
def model_architecture(para):
    # Description: build model architecture (build data flow graphs)
    # Input: global parameter instance
    # Return: Placeholder Dictionary
    inputPC = tf.placeholder(tf.float32, [None, para.pointNumber, 3])
    inputGraph = tf.placeholder(tf.float32,
                                [None, para.pointNumber * para.pointNumber])
    l2Graph = tf.placeholder(
        tf.float32, [None, para.clusterNumberL1 * para.clusterNumberL1])
    outputLabel = tf.placeholder(tf.float32, [None, para.outputClassN])
    batch_size = tf.placeholder(tf.int32)

    batch_index_l1 = tf.placeholder(
        tf.int32, [None, para.clusterNumberL1 * para.nearestNeighborL1])
    # batch_index_l2 = tf.placeholder(tf.int32, [None, para.clusterNumberL2 * para.nearestNeighborL2])

    scaledLaplacian = tf.reshape(inputGraph,
                                 [-1, para.pointNumber, para.pointNumber])
    l2_scaledLaplacian = tf.reshape(
        l2Graph, [-1, para.clusterNumberL1, para.clusterNumberL1])

    weights = tf.placeholder(tf.float32, [None])
    lr = tf.placeholder(tf.float32)
    keep_prob_1 = tf.placeholder(tf.float32)
    keep_prob_2 = tf.placeholder(tf.float32)

    # gcn layer 1
    gcn_1 = gcnLayer(inputPC,
                     scaledLaplacian,
                     pointNumber=para.pointNumber,
                     inputFeatureN=3,
                     outputFeatureN=para.gcn_1_filter_n,
                     chebyshev_order=para.chebyshev_1_Order)
    gcn_1_output = tf.nn.dropout(gcn_1, keep_prob=keep_prob_1)
    gcn_1_pooling = graph_cluster_maxpooling(batch_index_l1,
                                             gcn_1_output,
                                             batch_size=batch_size,
                                             M=para.clusterNumberL1,
                                             k=para.nearestNeighborL1,
                                             n=para.gcn_1_filter_n)

    globalFeatures_1 = tf.reduce_max(gcn_1_pooling, axis=1)
    print gcn_1_pooling

    gcn_2 = gcnLayer(gcn_1_pooling,
                     l2_scaledLaplacian,
                     pointNumber=para.clusterNumberL1,
                     inputFeatureN=para.gcn_1_filter_n,
                     outputFeatureN=para.gcn_2_filter_n,
                     chebyshev_order=para.chebyshev_1_Order)

    gcn_2_output = tf.nn.dropout(gcn_2, keep_prob=keep_prob_1)
    # gcn_2_pooling = graph_cluster_maxpooling(batch_index_l2, gcn_2_output, batch_size=batch_size,
    # M=para.clusterNumberL2, k=para.nearestNeighborL2, n=para.gcn_2_filter_n)
    gcn_2_pooling = gcn_2_output
    print gcn_2_pooling

    globalFeatures = tf.reduce_max(gcn_2_pooling, axis=1)
    print globalFeatures

    globalFeatures = tf.nn.dropout(globalFeatures, keep_prob=keep_prob_2)
    print("The global feature is {}".format(globalFeatures))

    #final_concat_features = tf.concat([globalFeatures_1, globalFeatures], axis=1)
    #final_concat_features = globalFeatures
    globalFeatureN = para.gcn_2_filter_n * 1

    # fully connected layer 1
    fc_layer_1 = fullyConnected(globalFeatures,
                                inputFeatureN=globalFeatureN,
                                outputFeatureN=para.fc_1_n)
    fc_layer_1 = tf.nn.relu(fc_layer_1)
    fc_layer_1 = tf.nn.dropout(fc_layer_1, keep_prob=keep_prob_2)
    print("The output of the first fc layer is {}".format(fc_layer_1))

    # fully connected layer 2
    fc_layer_2 = fullyConnected(fc_layer_1,
                                inputFeatureN=para.fc_1_n,
                                outputFeatureN=para.outputClassN)
    print("The output of the second fc layer is {}".format(fc_layer_2))

    # =================================Define loss===========================
    predictSoftMax = tf.nn.softmax(fc_layer_2)
    predictLabels = tf.argmax(predictSoftMax, axis=1)
    loss = tf.nn.softmax_cross_entropy_with_logits(logits=fc_layer_2,
                                                   labels=outputLabel)
    loss = tf.multiply(loss, weights)
    loss = tf.reduce_mean(loss)

    vars = tf.trainable_variables()
    loss_reg = tf.add_n(
        [tf.nn.l2_loss(v)
         for v in vars if 'bias' not in v.name]) * 8e-6  # best: 8 #last: 10
    loss_total = loss + loss_reg

    correct_prediction = tf.equal(predictLabels, tf.argmax(outputLabel,
                                                           axis=1))
    acc = tf.cast(correct_prediction, tf.float32)
    acc = tf.reduce_mean(acc)

    train = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_total)

    total_parameters = 0
    for variable in tf.trainable_variables():
        # shape is an array of tf.Dimension
        shape = variable.get_shape()
        variable_parametes = 1
        for dim in shape:
            variable_parametes *= dim.value
        total_parameters += variable_parametes
    print('Total parameters number is {}'.format(total_parameters))

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    trainOperaion = {
        'train': train,
        'loss': loss,
        'acc': acc,
        'loss_total': loss_total,
        'loss_reg': loss_reg,
        'inputPC': inputPC,
        'inputGraph': inputGraph,
        'l2Graph': l2Graph,
        'outputLabel': outputLabel,
        'weights': weights,
        'predictLabels': predictLabels,
        'batch_index_l1': batch_index_l1,
        'keep_prob_1': keep_prob_1,
        'keep_prob_2': keep_prob_2,
        'lr': lr,
        'batch_size': batch_size
    }
    return trainOperaion, sess
def model_architecture(para):
    inputPC = tf.placeholder(tf.float32, [None, para.pointNumber, 3])
    inputGraph = tf.placeholder(tf.float32, [None, para.pointNumber * para.pointNumber])
    outputLabel = tf.placeholder(tf.float32, [None, para.outputClassN])

    scaledLaplacian = tf.reshape(inputGraph, [-1, para.pointNumber, para.pointNumber])

    weights = tf.placeholder(tf.float32, [None])
    lr = tf.placeholder(tf.float32)
    keep_prob_1 = tf.placeholder(tf.float32)
    keep_prob_2 = tf.placeholder(tf.float32)

    # gcn layer 1
    gcn_1 = gcnLayer(inputPC, scaledLaplacian, pointNumber=para.pointNumber, inputFeatureN=3,
                     outputFeatureN=para.gcn_1_filter_n,
                     chebyshev_order=para.chebyshev_1_Order)
    gcn_1_output = tf.nn.dropout(gcn_1, keep_prob=keep_prob_1)
    gcn_1_pooling = globalPooling(gcn_1_output, featureNumber=para.gcn_1_filter_n)
    print("The output of the first gcn layer is {}".format(gcn_1_pooling))
    print gcn_1_pooling

    # gcn_layer_2
    
    gcn_2 = gcnLayer(gcn_1_output, scaledLaplacian, pointNumber=para.pointNumber, inputFeatureN=para.gcn_1_filter_n,
                     outputFeatureN=para.gcn_2_filter_n,
                     chebyshev_order=para.chebyshev_2_Order)
    gcn_2_output = tf.nn.dropout(gcn_2, keep_prob=keep_prob_1)
    gcn_2_pooling = globalPooling(gcn_2_output, featureNumber=para.gcn_2_filter_n)
    print("The output of the second gcn layer is {}".format(gcn_2_pooling))
    
    #gcn_layer_3
    '''
    gcn_3 = gcnLayer(gcn_2_output, scaledLaplacian, pointNumber=para.pointNumber, inputFeatureN=para.gcn_2_filter_n,
                     outputFeatureN=para.gcn_3_filter_n,
                     chebyshev_order=para.chebyshev_2_Order)
    gcn_3_output = tf.nn.dropout(gcn_3, keep_prob=keep_prob_1)
    gcn_3_pooling = globalPooling(gcn_3_output, featureNumber=para.gcn_3_filter_n)
    print("The output of the second gcn layer is {}".format(gcn_2_pooling))
    '''

    # concatenate global features
    #globalFeatures = gcn_3_pooling
    globalFeatures = tf.concat([gcn_1_pooling, gcn_2_pooling], axis=1)
    globalFeatures = tf.nn.dropout(globalFeatures, keep_prob=keep_prob_2)
    print("The global feature is {}".format(globalFeatures))
    #globalFeatureN = para.gcn_2_filter_n*2
    globalFeatureN = (para.gcn_1_filter_n + para.gcn_2_filter_n)*2 

    # fully connected layer 1
    fc_layer_1 = fullyConnected(globalFeatures, inputFeatureN=globalFeatureN, outputFeatureN=para.fc_1_n)
    fc_layer_1 = tf.nn.relu(fc_layer_1)
    fc_layer_1 = tf.nn.dropout(fc_layer_1, keep_prob=keep_prob_2)
    print("The output of the first fc layer is {}".format(fc_layer_1))

    # fully connected layer 2
    fc_layer_2 = fullyConnected(fc_layer_1, inputFeatureN=para.fc_1_n, outputFeatureN=para.outputClassN)
    print("The output of the second fc layer is {}".format(fc_layer_2))

    # =================================Define loss===========================
    predictSoftMax = tf.nn.softmax(fc_layer_2)
    predictLabels = tf.argmax(predictSoftMax, axis=1)
    loss = tf.nn.softmax_cross_entropy_with_logits(logits=fc_layer_2, labels=outputLabel)
    loss = tf.multiply(loss, weights)
    loss = tf.reduce_mean(loss)

    vars = tf.trainable_variables()
    loss_reg = tf.add_n([tf.nn.l2_loss(v) for v in vars if 'bias' not in v.name]) * 8e-6  # best: 8 #last: 10
    loss_total = loss + loss_reg

    correct_prediction = tf.equal(predictLabels, tf.argmax(outputLabel, axis=1))
    acc = tf.cast(correct_prediction, tf.float32)
    acc = tf.reduce_mean(acc)

    train = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_total)

    total_parameters = 0
    for variable in tf.trainable_variables():
        # shape is an array of tf.Dimension
        shape = variable.get_shape()
        variable_parametes = 1
        for dim in shape:
            variable_parametes *= dim.value
        total_parameters += variable_parametes
    print('Total parameters number is {}'.format(total_parameters))
    
    trainOperaion = {'train': train, 'loss_total':loss_total,'loss': loss, 'acc': acc, 'loss_reg': loss_reg, 'inputPC': inputPC,
                     'inputGraph': inputGraph, 'outputLabel': outputLabel, 'weights': weights,
                     'predictLabels': predictLabels,
                     'keep_prob_1': keep_prob_1, 'keep_prob_2': keep_prob_2, 'lr': lr}

    return trainOperaion