Example #1
0
    trainOperaion = model_architecture(para)
    #init = tf.global_variables_initializer()
    #sess = tf.Session()
    #sess.run(init)
    # ================================Load data===============================
    if para.dataset == 'ModelNet40':
        inputTrain, trainLabel, inputTest, testLabel = load_data(
            pointNumber, samplingType)
    elif para.dataset == 'ModelNet10':
        ModelNet10_dir = '/raid60/yingxue.zhang2/ICASSP_code/data/'
        with open(ModelNet10_dir + 'input_data', 'rb') as handle:
            a = pickle.load(handle)
        inputTrain, trainLabel, inputTest, testLabel = a
    else:
        print "Please enter a valid dataset"
    scaledLaplacianTrain, scaledLaplacianTest = prepareData(
        inputTrain, inputTest, neighborNumber, pointNumber)

    # ===============================Train model ================================
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    saver = tf.train.Saver()
    learningRate = para.learningRate

    modelDir = para.modelDir
    save_model_path = modelDir + "model_" + para.fileName
    weight_dict = weight_dict_fc(trainLabel, para)
    testLabelWhole = []
    for i in range(len(testLabel)):
        labels = testLabel[i]
        [testLabelWhole.append(j) for j in labels]
Example #2
0
    'poolIndex_1':
    tf.placeholder(tf.int32, [None, para.vertexNumG2 * para.poolNumG1],
                   name='poolIndex1'),
    'poolIndex_2':
    tf.placeholder(tf.int32, [None, para.vertexNumG3 * para.poolNumG2],
                   name='poolIndex2'),
    'poolIndex_3':
    tf.placeholder(tf.int32, [None, para.vertexNumG4 * para.poolNumG3],
                   name='poolIndex3')
    # 'lr': tf.placeholder(tf.float32, name='lr'),
}
# ================================Load data===============================
inputTrain, trainLabel, inputTest, testLabel = read_data.load_data(
    para.vertexNumG1, para.samplingType, para.dataDir)
# layer_1: (1)graph generate
scaledLaplacianTrain, scaledLaplacianTest = read_data.prepareData(
    inputTrain, inputTest, para.edgeNumG1, para.vertexNumG1, para.dataDir)
train_weight_dict = utils.train_weight_dict(trainLabel, para)
eval_weight_dict = utils.eval_weight_dict(testLabel)
# ================================Create model===============================
model = RPGCN(para, placeholders, logging=True)
# =============================Initialize session=============================
sess = tf.Session()
# ==============================Init variables===============================
if para.restoreModel:
    model.load(para.ckptDir, sess)
else:
    sess.run(tf.global_variables_initializer())
# =============================Graph Visualizing=============================
TIMESTAMP = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
merged_summary = tf.summary.merge_all()
# train log
samplingType = 'farthest_sampling'
pointNumber = para.pointNumber
neighborNumber = para.neighborNumber
print 'Hyper-parameter:'
print 'The point number and the nearest neighbor number is {} and {}'.format(para.pointNumber, para.neighborNumber)
print 'The first and second layer filter number is {} and {}'.format(para.gcn_1_filter_n, para.gcn_2_filter_n)
print 'The resolution for second layer is {} and the point number in cluster is {}'.format(para.clusterNumberL1, para.nearestNeighborL1)
print 'The fc neuron number is {} and the output number is {}'.format(para.fc_1_n, para.outputClassN)
print 'The Chebyshev polynomial order for each layer are {} and {}'.format(para.chebyshev_1_Order, para.chebyshev_2_Order)
print 'The weighting scheme is {} and the weighting scaler is {}'.format(para.weighting_scheme, para.weight_scaler)

# ===============================Build model=============================
trainOperaion, sess = model_architecture(para)
# ================================Load data===============================
inputTrain, trainLabel, inputTest, testLabel = load_data(pointNumber, samplingType)
scaledLaplacianTrain, scaledLaplacianTest = prepareData(inputTrain, inputTest, neighborNumber, pointNumber)
# ===============================Train model ================================

saver = tf.train.Saver()
learningRate = para.learningRate

modelDir = para.modelDir
save_model_path = modelDir + "model_" + para.fileName
weight_dict = weight_dict_fc(trainLabel, para)

testLabelWhole = []
for i in range(len(testLabel)):
    labels = testLabel[i]
    [testLabelWhole.append(j) for j in labels]
testLabelWhole = np.asarray(testLabelWhole)
para.info()
para.log()
# ============================Define placeholders==========================
placeholders = {
    'isTraining':
    tf.placeholder(tf.bool, name='is_training'),
    'coordinate':
    tf.placeholder(tf.float32, [None, para.pointNumber, para.input_data_dim],
                   name='coordinate'),
    'label':
    tf.placeholder(tf.float32, [None, para.outputClassN], name='label'),
}
# ================================Load data===============================
inputTrain, trainLabel, inputTest, testLabel = read_data.load_data(
    para.pointNumber, para.samplingType, para.dataDir)
scaledLaplacianTrain, scaledLaplacianTest = read_data.prepareData(
    inputTrain, inputTest, para.neighborNumber, para.pointNumber, para.dataDir)
# ================================Create model===============================
model = models.PointNet(para, placeholders, logging=True)
# =============================Initialize session============================
sess = tf.Session()
# ==============================Init variables===============================
if para.restoreModel:
    model.load(para.ckptDir, sess)
else:
    sess.run(tf.global_variables_initializer())
# =============================Graph Visualizing=============================
TIMESTAMP = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
merged_summary = tf.summary.merge_all()
# train log
train_log_dir = "tensorboard/train/" + TIMESTAMP
train_writer = tf.summary.FileWriter(train_log_dir)