def create_network(points, batchIds, features, numInputFeatures, batchSize, k, isTraining, multiConv = True, useMC = True): ############################################ Compute point hierarchy mPointHierarchy = PointHierarchy(points, features, batchIds, [], "MCNormS_PH", batchSize) ############################################ Convolutions mConvBuilder = ConvolutionBuilder(KDEWindow=0.2) # Convolution 1 convFeatures1 = mConvBuilder.create_convolution( convName="Conv_1", inPointHierarchy=mPointHierarchy, inPointLevel=0, inFeatures=features, inNumFeatures=numInputFeatures, outNumFeatures=k, convRadius=0.15, multiFeatureConv=True) #BatchNorm and RELU convFeatures1 = batch_norm_RELU_drop_out("BN_RELU", convFeatures1, isTraining, False, False) # Convolution 2 normals = mConvBuilder.create_convolution( convName="Conv_2", inPointHierarchy=mPointHierarchy, inPointLevel=0, inFeatures=convFeatures1, inNumFeatures=k, outNumFeatures=3, convRadius=0.15, multiFeatureConv=True) return normals
def compute_initial_pts(points, batchIds, features, batchSize): mPointHierarchy = PointHierarchy(points, features, batchIds, [0.01], "MCGIInit_PH1", batchSize, relativeRadius=False) return mPointHierarchy.sampledIndexs_[0], mPointHierarchy.points_[1]
def create_point_hierarchy_output(points, batchIds, features, batchSize, relRad=False): ############################################ Create the point hierarchy mPointHierarchy = PointHierarchy(inPoints=points, inFeatures=features, inBatchIds=batchIds, radiusList=[], hierarchyName="MCPtDeNoise_MPH_2", batchSize=batchSize, relativeRadius=relRad) return mPointHierarchy
def create_point_hierarchy_input(points, batchIds, features, batchSize, radiusList=[0.025, 0.05], relRad=False, hierarchyName="MCPtDeNoise_MPH_1"): ############################################ Create the point hierarchy mPointHierarchy = PointHierarchy(inPoints=points, inFeatures=features, inBatchIds=batchIds, radiusList=radiusList, hierarchyName=hierarchyName, batchSize=batchSize, relativeRadius=relRad) return mPointHierarchy
def ptH(radii, scope, batchSize, numFeatures=3): inPts = tf.placeholder(tf.float32, [None, 3], name='ptH_points') inFeatures = tf.placeholder(tf.float32, [None, numFeatures], name='ptH_features') inBatchIds = tf.placeholder(tf.int32, [None, 1], name='ptH_batchIds') feed_ptH = [inPts, inFeatures, inBatchIds] outPointHierarchy = PointHierarchy(inPts, inFeatures, inBatchIds, radii, scope, batchSize) PtHierPoints = [[]] + outPointHierarchy.points_[1:5] PtHierSampledPts = outPointHierarchy.sampledIndexs_[0:4] PtHierFeatures = [[]] + [outPointHierarchy.features_[1]] PtHierBatchIds = [[]] + outPointHierarchy.batchIds_[1:5] PtHierAabbMin = outPointHierarchy.aabbMin_ PtHierAabbMax = outPointHierarchy.aabbMax_ outPtH = [ PtHierPoints, PtHierSampledPts, PtHierFeatures, PtHierBatchIds, PtHierAabbMin, PtHierAabbMax ] return outPtH, feed_ptH
def create_network(points, batchIds, features, numInputFeatures, batchSize, k, isTraining, multiConv=True, useMC=True): ############################################ Compute point hierarchy mPointHierarchy = PointHierarchy(points, features, batchIds, [0.1, 0.4], "MCNorm_PH", batchSize) ############################################ Convolutions mConvBuilder = ConvolutionBuilder(KDEWindow=0.25) ############################################ Encoder # First Convolution convFeatures1 = mConvBuilder.create_convolution( convName="Conv_1", inPointHierarchy=mPointHierarchy, inPointLevel=0, inFeatures=features, inNumFeatures=numInputFeatures, outNumFeatures=k, convRadius=0.1, multiFeatureConv=True) # First Pooling bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_In_BN", convFeatures1, isTraining, False, False) bnConvFeatures1 = conv_1x1("Reduce_Pool_1", bnConvFeatures1, k, k * 2) bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_Out_BN", bnConvFeatures1, isTraining, False, False) poolFeatures1 = mConvBuilder.create_convolution( convName="Pool_1", inPointHierarchy=mPointHierarchy, inPointLevel=0, outPointLevel=1, inFeatures=bnConvFeatures1, inNumFeatures=k * 2, convRadius=0.2, KDEWindow=0.2) # Second Convolution bnPoolFeatures1 = batch_norm_RELU_drop_out("Conv_2_In_BN", poolFeatures1, isTraining, False, False) convFeatures2 = mConvBuilder.create_convolution( convName="Conv_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, inFeatures=bnPoolFeatures1, inNumFeatures=k * 2, convRadius=0.4) convFeatures2 = tf.concat([poolFeatures1, convFeatures2], 1) # Second Pooling bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_In_BN", convFeatures2, isTraining, False, False) bnConvFeatures2 = conv_1x1("Reduce_Pool_2", bnConvFeatures2, k * 4, k * 4) bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_Out_BN", bnConvFeatures2, isTraining, False, False) poolFeatures2 = mConvBuilder.create_convolution( convName="Pool_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, outPointLevel=2, inFeatures=bnConvFeatures2, inNumFeatures=k * 4, convRadius=0.8, KDEWindow=0.2) # Third Convolution bnPoolFeatures2 = batch_norm_RELU_drop_out("Conv_3_In_BN", poolFeatures2, isTraining, False, False) convFeatures3 = mConvBuilder.create_convolution( convName="Conv_3", inPointHierarchy=mPointHierarchy, inPointLevel=2, inFeatures=bnPoolFeatures2, inNumFeatures=k * 4, convRadius=math.sqrt(3)) convFeatures3 = tf.concat([poolFeatures2, convFeatures3], 1) ##################################################### Multi-hierarchy sampling # Second upsampling bnFeatures3 = batch_norm_RELU_drop_out("Up_2_3_BN", convFeatures3, isTraining, False, False) upFeatures2_3 = mConvBuilder.create_convolution( convName="Up_2_3", inPointHierarchy=mPointHierarchy, inPointLevel=2, outPointLevel=1, inFeatures=bnFeatures3, inNumFeatures=k * 8, convRadius=0.8) deConvFeatures2 = tf.concat([upFeatures2_3, convFeatures2], 1) deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_In_BN", deConvFeatures2, isTraining, False, False) deConvFeatures2 = conv_1x1("DeConv_2_Reduce", deConvFeatures2, k * 12, k * 4) deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_Out_BN", deConvFeatures2, isTraining, False, False) deConvFeatures2 = mConvBuilder.create_convolution( convName="DeConv_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, inFeatures=deConvFeatures2, inNumFeatures=k * 4, convRadius=0.4) # First upsampling bnDeConvFeatures2 = batch_norm_RELU_drop_out("Up_1_2_BN", deConvFeatures2, isTraining, False, False) upFeatures1_2 = mConvBuilder.create_convolution( convName="Up_1_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, outPointLevel=0, inFeatures=bnDeConvFeatures2, inNumFeatures=k * 4, convRadius=0.2) deConvFeatures1 = tf.concat([upFeatures1_2, convFeatures1], 1) deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_In_BN", deConvFeatures1, isTraining, False, False) deConvFeatures1 = conv_1x1("DeConv_1_Reduce", deConvFeatures1, k * 5, k * 2) deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_Out_BN", deConvFeatures1, isTraining, False, False) normals = mConvBuilder.create_convolution(convName="DeConv_1", inPointHierarchy=mPointHierarchy, inPointLevel=0, inFeatures=deConvFeatures1, inNumFeatures=k * 2, outNumFeatures=3, convRadius=0.1, multiFeatureConv=True) return normals
def create_network(points, batchIds, features, numInputFeatures, batchSize, k, numOutCat, isTraining, keepProbConv, keepProbFull, useConvDropOut=False, useDropOutFull=True): ############################################ Compute point hierarchy mPointHierarchy = PointHierarchy(points, features, batchIds, [0.1, 0.4, math.sqrt(3.0) + 0.1], "MCClassS_PH", batchSize) ############################################ Convolutions mConvBuilder = ConvolutionBuilder(KDEWindow=0.2) #### Convolution 1 convFeatures1 = mConvBuilder.create_convolution( convName="Conv_1", inPointHierarchy=mPointHierarchy, inPointLevel=0, outPointLevel=1, inFeatures=features, inNumFeatures=numInputFeatures, outNumFeatures=k, convRadius=0.2, multiFeatureConv=True) #### Convolution 2 convFeatures1 = batch_norm_RELU_drop_out("Reduce_1_In_BN", convFeatures1, isTraining, useConvDropOut, keepProbConv) convFeatures1 = conv_1x1("Reduce_1", convFeatures1, k, k * 2) convFeatures1 = batch_norm_RELU_drop_out("Reduce_1_Out_BN", convFeatures1, isTraining, useConvDropOut, keepProbConv) convFeatures2 = mConvBuilder.create_convolution( convName="Conv_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, outPointLevel=2, inFeatures=convFeatures1, inNumFeatures=k * 2, convRadius=0.8) #### Convolution 3 convFeatures2 = batch_norm_RELU_drop_out("Reduce_2_In_BN", convFeatures2, isTraining, useConvDropOut, keepProbConv) convFeatures2 = conv_1x1("Reduce_2", convFeatures2, k * 2, k * 4) convFeatures2 = batch_norm_RELU_drop_out("Reduce_2_Out_BN", convFeatures2, isTraining, useConvDropOut, keepProbConv) convFeatures3 = mConvBuilder.create_convolution( convName="Conv_3", inPointHierarchy=mPointHierarchy, inPointLevel=2, outPointLevel=3, inFeatures=convFeatures2, inNumFeatures=k * 4, convRadius=math.sqrt(3.0) + 0.1) #Fully connected MLP - Global features. finalInput = batch_norm_RELU_drop_out("BNRELUDROP_final", convFeatures3, isTraining, useConvDropOut, keepProbConv) finalLogits = MLP_2_hidden(finalInput, k * 4, k * 2, k, numOutCat, "Final_Logits", keepProbFull, isTraining, useDropOutFull) return finalLogits
def create_network(points, batchIds, features, catLabels, numInputFeatures, numCats, numParts, batchSize, k, isTraining, keepProbConv, keepProbFull, useConvDropOut=False, useDropOutFull=True): ############################################ Compute point hierarchy mPointHierarchy = PointHierarchy(points, features, batchIds, [0.025, 0.1, 0.4], "MCSeg_PH", batchSize) ############################################ Convolutions mConvBuilder = ConvolutionBuilder(KDEWindow=0.25) ############################################ Encoder # First Convolution convFeatures1 = mConvBuilder.create_convolution( convName="Conv_1", inPointHierarchy=mPointHierarchy, inPointLevel=0, inFeatures=features, inNumFeatures=numInputFeatures, outNumFeatures=k, convRadius=0.03, multiFeatureConv=True) # First Pooling bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_In_BN", convFeatures1, isTraining, useConvDropOut, keepProbConv) bnConvFeatures1 = conv_1x1("Reduce_Pool_1", bnConvFeatures1, k, k * 2) bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_Out_BN", bnConvFeatures1, isTraining, useConvDropOut, keepProbConv) poolFeatures1 = mConvBuilder.create_convolution( convName="Pool_1", inPointHierarchy=mPointHierarchy, inPointLevel=0, outPointLevel=1, inFeatures=bnConvFeatures1, inNumFeatures=k * 2, convRadius=0.05, KDEWindow=0.2) # Second Convolution bnPoolFeatures1 = batch_norm_RELU_drop_out("Conv_2_In_BN", poolFeatures1, isTraining, useConvDropOut, keepProbConv) convFeatures2 = mConvBuilder.create_convolution( convName="Conv_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, inFeatures=bnPoolFeatures1, inNumFeatures=k * 2, convRadius=0.1) convFeatures2 = tf.concat([poolFeatures1, convFeatures2], 1) # Second Pooling bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_In_BN", convFeatures2, isTraining, useConvDropOut, keepProbConv) bnConvFeatures2 = conv_1x1("Reduce_Pool_2", bnConvFeatures2, k * 4, k * 4) bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_Out_BN", bnConvFeatures2, isTraining, useConvDropOut, keepProbConv) poolFeatures2 = mConvBuilder.create_convolution( convName="Pool_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, outPointLevel=2, inFeatures=bnConvFeatures2, inNumFeatures=k * 4, convRadius=0.2, KDEWindow=0.2) # Third Convolution bnPoolFeatures2 = batch_norm_RELU_drop_out("Conv_3_In_BN", poolFeatures2, isTraining, useConvDropOut, keepProbConv) convFeatures3 = mConvBuilder.create_convolution( convName="Conv_3", inPointHierarchy=mPointHierarchy, inPointLevel=2, inFeatures=bnPoolFeatures2, inNumFeatures=k * 4, convRadius=0.4) convFeatures3 = tf.concat([poolFeatures2, convFeatures3], 1) # Third Pooling bnConvFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_In_BN", convFeatures3, isTraining, useConvDropOut, keepProbConv) bnConvFeatures3 = conv_1x1("Reduce_Pool_3", bnConvFeatures3, k * 8, k * 8) bnConvFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_Out_BN", bnConvFeatures3, isTraining, useConvDropOut, keepProbConv) poolFeatures3 = mConvBuilder.create_convolution( convName="Pool_3", inPointHierarchy=mPointHierarchy, inPointLevel=2, outPointLevel=3, inFeatures=bnConvFeatures3, inNumFeatures=k * 8, convRadius=0.8, KDEWindow=0.2) # Fourth Convolution bnPoolFeatures3 = batch_norm_RELU_drop_out("Conv_4_In_BN", poolFeatures3, isTraining, useConvDropOut, keepProbConv) convFeatures4 = mConvBuilder.create_convolution( convName="Conv_4", inPointHierarchy=mPointHierarchy, inPointLevel=3, inFeatures=bnPoolFeatures3, inNumFeatures=k * 8, convRadius=math.sqrt(3.0) + 0.1) convFeatures4 = tf.concat([poolFeatures3, convFeatures4], 1) ############################################ Decoder # Third upsampling bnConvFeatures4 = batch_norm_RELU_drop_out("Up_3_4_BN", convFeatures4, isTraining, useConvDropOut, keepProbConv) upFeatures3_4 = mConvBuilder.create_convolution( convName="Up_3_4", inPointHierarchy=mPointHierarchy, inPointLevel=3, outPointLevel=2, inFeatures=bnConvFeatures4, inNumFeatures=k * 16, convRadius=math.sqrt(3.0) + 0.1) deConvFeatures3 = tf.concat([upFeatures3_4, convFeatures3], 1) deConvFeatures3 = batch_norm_RELU_drop_out("DeConv_3_Reduce_In_BN", deConvFeatures3, isTraining, useConvDropOut, keepProbConv) deConvFeatures3 = conv_1x1("DeConv_3_Reduce", deConvFeatures3, k * 24, k * 8) deConvFeatures3 = batch_norm_RELU_drop_out("DeConv_3_Reduce_Out_BN", deConvFeatures3, isTraining, useConvDropOut, keepProbConv) deConvFeatures3 = mConvBuilder.create_convolution( convName="DeConv_3", inPointHierarchy=mPointHierarchy, inPointLevel=2, inFeatures=deConvFeatures3, inNumFeatures=k * 8, convRadius=0.4) # Second upsampling bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up_2_3_BN", deConvFeatures3, isTraining, useConvDropOut, keepProbConv) upFeatures2_3 = mConvBuilder.create_convolution( convName="Up_2_3", inPointHierarchy=mPointHierarchy, inPointLevel=2, outPointLevel=1, inFeatures=bnDeConvFeatures3, inNumFeatures=k * 8, convRadius=0.2) deConvFeatures2 = tf.concat([upFeatures2_3, convFeatures2], 1) deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_In_BN", deConvFeatures2, isTraining, useConvDropOut, keepProbConv) deConvFeatures2 = conv_1x1("DeConv_2_Reduce", deConvFeatures2, k * 12, k * 4) deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_Out_BN", deConvFeatures2, isTraining, useConvDropOut, keepProbConv) deConvFeatures2 = mConvBuilder.create_convolution( convName="DeConv_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, inFeatures=deConvFeatures2, inNumFeatures=k * 4, convRadius=0.1) # First multiple upsamplings bnDeConvFeatures2 = batch_norm_RELU_drop_out("Up_1_2_BN", deConvFeatures2, isTraining, useConvDropOut, keepProbConv) upFeatures1_2 = mConvBuilder.create_convolution( convName="Up_1_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, outPointLevel=0, inFeatures=bnDeConvFeatures2, inNumFeatures=k * 4, convRadius=0.05) bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up_1_3_BN", deConvFeatures3, isTraining, useConvDropOut, keepProbConv) upFeatures1_3 = mConvBuilder.create_convolution( convName="Up_1_3", inPointHierarchy=mPointHierarchy, inPointLevel=2, outPointLevel=0, inFeatures=bnDeConvFeatures3, inNumFeatures=k * 8, convRadius=0.2) deConvFeatures1 = tf.concat([upFeatures1_2, upFeatures1_3, convFeatures1], 1) deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_In_BN", deConvFeatures1, isTraining, useConvDropOut, keepProbConv) deConvFeatures1 = conv_1x1("DeConv_1_Reduce", deConvFeatures1, k * 13, k * 4) deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_Out_BN", deConvFeatures1, isTraining, useConvDropOut, keepProbConv) deConvFeatures1 = mConvBuilder.create_convolution( convName="DeConv_1", inPointHierarchy=mPointHierarchy, inPointLevel=0, inFeatures=deConvFeatures1, inNumFeatures=k * 4, convRadius=0.03) # Fully connected MLP - Global features. finalInput = batch_norm_RELU_drop_out("BNRELUDROP_hier_final", deConvFeatures1, isTraining, useConvDropOut, keepProbConv) #Convert cat labels catLabelOneHot = tf.one_hot(catLabels, numCats, on_value=1.0, off_value=0.0) catLabelOneHot = tf.reshape(catLabelOneHot, [-1, numCats]) finalInput = tf.concat([catLabelOneHot, finalInput], 1) finalLogits = MLP_2_hidden(finalInput, k * 4 + numCats, k * 4, k * 2, numParts, "Final_Logits", keepProbFull, isTraining, useDropOutFull, useInitBN=False) return finalLogits
def create_network(points, batchIds, features, numInputFeatures, numSem, batchSize, k, isTraining, keepProbConv, keepProbFull, useConvDropOut=False, useDropOutFull=True): ############################################ Compute point hierarchy mPointHierarchy = PointHierarchy(points, features, batchIds, [0.1, 0.2, 0.4, 0.8], "MCSegScanNet_PH", batchSize, False) ############################################ Convolutions mConvBuilder = ConvolutionBuilder(KDEWindow=0.25, relativeRadius=False) ############################################ Encoder # Init pooling poolFeatures0 = mConvBuilder.create_convolution( convName="Pool_0", inPointHierarchy=mPointHierarchy, inPointLevel=0, outPointLevel=1, inFeatures=features, inNumFeatures=numInputFeatures, outNumFeatures=k, convRadius=0.1, KDEWindow=0.2, multiFeatureConv=True) # First Convolution bnPoolFeatures0 = batch_norm_RELU_drop_out("Conv_1_In_BN", poolFeatures0, isTraining, useConvDropOut, keepProbConv) convFeatures1 = mConvBuilder.create_convolution( convName="Conv_1", inPointHierarchy=mPointHierarchy, inPointLevel=1, inFeatures=bnPoolFeatures0, inNumFeatures=k, convRadius=0.4) convFeatures1 = tf.concat([poolFeatures0, convFeatures1], 1) # First Pooling bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_In_BN", convFeatures1, isTraining, useConvDropOut, keepProbConv) bnConvFeatures1 = conv_1x1("Reduce_Pool_1", bnConvFeatures1, k * 2, k * 2) bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_Out_BN", bnConvFeatures1, isTraining, useConvDropOut, keepProbConv) poolFeatures1 = mConvBuilder.create_convolution( convName="Pool_1", inPointHierarchy=mPointHierarchy, inPointLevel=1, outPointLevel=2, inFeatures=bnConvFeatures1, inNumFeatures=k * 2, convRadius=0.4, KDEWindow=0.2) # Second Convolution bnPoolFeatures1 = batch_norm_RELU_drop_out("Conv_2_In_BN", poolFeatures1, isTraining, useConvDropOut, keepProbConv) convFeatures2 = mConvBuilder.create_convolution( convName="Conv_2", inPointHierarchy=mPointHierarchy, inPointLevel=2, inFeatures=bnPoolFeatures1, inNumFeatures=k * 2, convRadius=0.8) convFeatures2 = tf.concat([poolFeatures1, convFeatures2], 1) # Second Pooling bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_In_BN", convFeatures2, isTraining, useConvDropOut, keepProbConv) bnConvFeatures2 = conv_1x1("Reduce_Pool_2", bnConvFeatures2, k * 4, k * 4) bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_Out_BN", bnConvFeatures2, isTraining, useConvDropOut, keepProbConv) poolFeatures2 = mConvBuilder.create_convolution( convName="Pool_2", inPointHierarchy=mPointHierarchy, inPointLevel=2, outPointLevel=3, inFeatures=bnConvFeatures2, inNumFeatures=k * 4, convRadius=0.8, KDEWindow=0.2) # Third Convolution bnPoolFeatures2 = batch_norm_RELU_drop_out("Conv_3_In_BN", poolFeatures2, isTraining, useConvDropOut, keepProbConv) convFeatures3 = mConvBuilder.create_convolution( convName="Conv_3", inPointHierarchy=mPointHierarchy, inPointLevel=3, inFeatures=bnPoolFeatures2, inNumFeatures=k * 4, convRadius=1.6) convFeatures3 = tf.concat([poolFeatures2, convFeatures3], 1) # Third Pooling bnConvFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_In_BN", convFeatures3, isTraining, useConvDropOut, keepProbConv) bnConvFeatures3 = conv_1x1("Reduce_Pool_3", bnConvFeatures3, k * 8, k * 8) bnConvFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_Out_BN", bnConvFeatures3, isTraining, useConvDropOut, keepProbConv) poolFeatures3 = mConvBuilder.create_convolution( convName="Pool_3", inPointHierarchy=mPointHierarchy, inPointLevel=3, outPointLevel=4, inFeatures=bnConvFeatures3, inNumFeatures=k * 8, convRadius=1.6, KDEWindow=0.2) # Fourth Convolution bnPoolFeatures3 = batch_norm_RELU_drop_out("Conv_4_In_BN", poolFeatures3, isTraining, useConvDropOut, keepProbConv) convFeatures4 = mConvBuilder.create_convolution( convName="Conv_4", inPointHierarchy=mPointHierarchy, inPointLevel=4, inFeatures=bnPoolFeatures3, inNumFeatures=k * 8, convRadius=5.0) convFeatures4 = tf.concat([poolFeatures3, convFeatures4], 1) ############################################ Decoder # Third upsampling bnConvFeatures4 = batch_norm_RELU_drop_out("Up3_4_Reduce_In_BN", convFeatures4, isTraining, useConvDropOut, keepProbConv) bnConvFeatures4 = conv_1x1("Up3_4_Reduce", bnConvFeatures4, k * 16, k * 8) bnConvFeatures4 = batch_norm_RELU_drop_out("Up3_4_Reduce_Out_BN", bnConvFeatures4, isTraining, useConvDropOut, keepProbConv) upFeatures3_4 = mConvBuilder.create_convolution( convName="Up_3_4", inPointHierarchy=mPointHierarchy, inPointLevel=4, outPointLevel=3, inFeatures=bnConvFeatures4, inNumFeatures=k * 8, convRadius=1.6) upFeatures3_4 = tf.concat([upFeatures3_4, convFeatures3], 1) deConvFeatures3 = batch_norm_RELU_drop_out("DeConv_3_Reduce_In_BN", upFeatures3_4, isTraining, useConvDropOut, keepProbConv) deConvFeatures3 = conv_1x1("DeConv_3_Reduce", deConvFeatures3, k * 16, k * 8) deConvFeatures3 = batch_norm_RELU_drop_out("DeConv_3_Reduce_Out_BN", deConvFeatures3, isTraining, useConvDropOut, keepProbConv) deConvFeatures3 = mConvBuilder.create_convolution( convName="DeConv_3", inPointHierarchy=mPointHierarchy, inPointLevel=3, inFeatures=deConvFeatures3, inNumFeatures=k * 8, convRadius=1.6) # Second upsampling bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up2_3_Reduce_In_BN", deConvFeatures3, isTraining, useConvDropOut, keepProbConv) bnDeConvFeatures3 = conv_1x1("Up2_3_Reduce", bnDeConvFeatures3, k * 8, k * 4) bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up2_3_Reduce_Out_BN", bnDeConvFeatures3, isTraining, useConvDropOut, keepProbConv) upFeatures2_3 = mConvBuilder.create_convolution( convName="Up_2_3", inPointHierarchy=mPointHierarchy, inPointLevel=3, outPointLevel=2, inFeatures=bnDeConvFeatures3, inNumFeatures=k * 4, convRadius=0.8) upFeatures2_3 = tf.concat([upFeatures2_3, convFeatures2], 1) deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_In_BN", upFeatures2_3, isTraining, useConvDropOut, keepProbConv) deConvFeatures2 = conv_1x1("DeConv_2_Reduce", deConvFeatures2, k * 8, k * 4) deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_Out_BN", deConvFeatures2, isTraining, useConvDropOut, keepProbConv) deConvFeatures2 = mConvBuilder.create_convolution( convName="DeConv_2", inPointHierarchy=mPointHierarchy, inPointLevel=2, inFeatures=deConvFeatures2, inNumFeatures=k * 4, convRadius=0.8) # First multiple upsamplings bnDeConvFeatures2 = batch_norm_RELU_drop_out("Up1_2_Reduce_In_BN", deConvFeatures2, isTraining, useConvDropOut, keepProbConv) bnDeConvFeatures2 = conv_1x1("Up1_2_Reduce", bnDeConvFeatures2, k * 4, k * 2) bnDeConvFeatures2 = batch_norm_RELU_drop_out("Up1_2_Reduce_Out_BN", bnDeConvFeatures2, isTraining, useConvDropOut, keepProbConv) upFeatures1_2 = mConvBuilder.create_convolution( convName="Up_1_2", inPointHierarchy=mPointHierarchy, inPointLevel=2, outPointLevel=1, inFeatures=bnDeConvFeatures2, inNumFeatures=k * 2, convRadius=0.4) bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up1_3_Reduce_In_BN", deConvFeatures3, isTraining, useConvDropOut, keepProbConv) bnDeConvFeatures3 = conv_1x1("Up1_3_Reduce", bnDeConvFeatures3, k * 8, k * 2) bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up1_3_Reduce_Out_BN", bnDeConvFeatures3, isTraining, useConvDropOut, keepProbConv) upFeatures1_3 = mConvBuilder.create_convolution( convName="Up_1_3", inPointHierarchy=mPointHierarchy, inPointLevel=3, outPointLevel=1, inFeatures=bnDeConvFeatures3, inNumFeatures=k * 2, convRadius=0.8) bnDeConvFeatures4 = batch_norm_RELU_drop_out("Up1_4_Reduce_In_BN", convFeatures4, isTraining, useConvDropOut, keepProbConv) bnDeConvFeatures4 = conv_1x1("Up1_4_Reduce", bnDeConvFeatures4, k * 16, k * 2) bnDeConvFeatures4 = batch_norm_RELU_drop_out("Up1_4_Reduce_Out_BN", bnDeConvFeatures4, isTraining, useConvDropOut, keepProbConv) upFeatures1_4 = mConvBuilder.create_convolution( convName="Up_1_4", inPointHierarchy=mPointHierarchy, inPointLevel=4, outPointLevel=1, inFeatures=bnDeConvFeatures4, inNumFeatures=k * 2, convRadius=1.6) upFeatures1 = tf.concat( [upFeatures1_4, upFeatures1_3, upFeatures1_2, convFeatures1], 1) deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_In_BN", upFeatures1, isTraining, useConvDropOut, keepProbConv) deConvFeatures1 = conv_1x1("DeConv_1_Reduce", deConvFeatures1, k * 8, k * 4) deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_Out_BN", deConvFeatures1, isTraining, useConvDropOut, keepProbConv) deConvFeatures1 = mConvBuilder.create_convolution( convName="DeConv_1", inPointHierarchy=mPointHierarchy, inPointLevel=1, inFeatures=deConvFeatures1, inNumFeatures=k * 4, convRadius=0.4) deConvFeatures1 = tf.concat([ upFeatures1_4, upFeatures1_3, upFeatures1_2, convFeatures1, deConvFeatures1 ], 1) # Final upsampling upFeaturesFinal = batch_norm_RELU_drop_out("Up_Final_Reduce_In_BN", deConvFeatures1, isTraining, useConvDropOut, keepProbConv) upFeaturesFinal = conv_1x1("Up_Final_Reduce", upFeaturesFinal, k * 12, k * 4) upFeaturesFinal = batch_norm_RELU_drop_out("Up_Final_Reduce_Out_BN", upFeaturesFinal, isTraining, useConvDropOut, keepProbConv) finalFeatures = mConvBuilder.create_convolution( convName="Up_0_1", inPointHierarchy=mPointHierarchy, inPointLevel=1, outPointLevel=0, inFeatures=upFeaturesFinal, inNumFeatures=k * 4, convRadius=0.1) # Fully connected MLP - Global features. finalInput = batch_norm_RELU_drop_out("BNRELUDROP_hier_final", finalFeatures, isTraining, useConvDropOut, keepProbConv) finalLogits = MLP_2_hidden(finalInput, k * 4, k * 4, k * 2, numSem, "Final_Logits", keepProbFull, isTraining, useDropOutFull, useInitBN=False) return finalLogits
def create_network(points, features, batchIds, batchSize, numInputFeatures, k, numOutCat, numOutputs, isTraining, keepProbConv, keepProbFull, useConvDropOut=False, useDropOutFull=True, useRenorm=False, BNMomentum=0.99, activation='relu'): if activation == 'relu': from MCNetworkUtils import batch_norm_RELU_drop_out as batch_norm_activation_drop_out elif activation == 'leakyrelu': from MCNetworkUtils import batch_norm_leakyRELU_drop_out as batch_norm_activation_drop_out elif activation == 'mish': from MCNetworkUtils import batch_norm_mish_drop_out as batch_norm_activation_drop_out else: print('\nError: Unknown activation function: %s\n' % (activation)) mPointHierarchy = PointHierarchy( points, features, batchIds, [0.025, 0.1, 0.4, math.sqrt(3.0) + 0.1], "MCSphere", batchSize) ############################################ Convolutions mConvBuilder = ConvolutionBuilder(KDEWindow=0.25) # Zeroth Pooling convFeatures0 = conv_1x1("Reduce_Pool_0", features, numInputFeatures, k * 2) convFeatures0 = batch_norm_activation_drop_out("Reduce_Pool_0_Out_BN", convFeatures0, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) poolFeatures0 = mConvBuilder.create_convolution( convName="Pool_0", inPointHierarchy=mPointHierarchy, inPointLevel=0, outPointLevel=1, inFeatures=convFeatures0, inNumFeatures=k * 2, convRadius=0.05, KDEWindow=0.2) # First Pooling convFeatures1 = batch_norm_activation_drop_out("Reduce_Pool_1_In_BN", poolFeatures0, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) convFeatures1 = conv_1x1("Reduce_Pool_1", convFeatures1, k * 2, k * 4) convFeatures1 = batch_norm_activation_drop_out("Reduce_Pool_1_Out_BN", convFeatures1, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) poolFeatures1 = mConvBuilder.create_convolution( convName="Pool_1", inPointHierarchy=mPointHierarchy, inPointLevel=1, outPointLevel=2, inFeatures=convFeatures1, inNumFeatures=k * 4, convRadius=0.2, KDEWindow=0.2) # Second Pooling convFeatures2 = batch_norm_activation_drop_out("Reduce_Pool_2_In_BN", poolFeatures1, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) convFeatures2 = conv_1x1("Reduce_Pool_2", convFeatures2, k * 4, k * 16) convFeatures2 = batch_norm_activation_drop_out("Reduce_Pool_2_Out_BN", convFeatures2, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) poolFeatures2 = mConvBuilder.create_convolution( convName="Pool_2", inPointHierarchy=mPointHierarchy, inPointLevel=2, outPointLevel=3, inFeatures=convFeatures2, inNumFeatures=k * 16, convRadius=0.3, KDEWindow=0.2) # Third Pooling convFeatures3 = batch_norm_activation_drop_out("Reduce_Pool_3_In_BN", poolFeatures2, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) convFeatures3 = conv_1x1("Reduce_Pool_3", convFeatures3, k * 16, k * 32) convFeatures3 = batch_norm_activation_drop_out("Reduce_Pool_3_Out_BN", convFeatures3, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) poolFeatures3 = mConvBuilder.create_convolution( convName="Pool_3", inPointHierarchy=mPointHierarchy, inPointLevel=3, outPointLevel=4, inFeatures=convFeatures3, inNumFeatures=k * 32, convRadius=math.sqrt(3.0) + 0.1, KDEWindow=0.2) #Image decoder - Global features. encoderOutput = batch_norm_activation_drop_out("BNRELUDROP_finalencoder", poolFeatures3, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) encoderOutput = tf.reshape(encoderOutput, [-1, 1, 1, k * 32]) finalPredictions = [] for vq_i in range(numOutputs): deconvLayer0 = tf.keras.layers.Conv2DTranspose( k * 16, [4, 4], strides=(1, 1), data_format='channels_last', input_shape=[1, 1, k * 32], name='deconvFeatures0' + str(vq_i)) deconvFeatures0 = deconvLayer0(encoderOutput) deconvFeatures0 = resblock_2d(deconvFeatures0, k * 16, [3, 3], 2) deconvFeatures0 = batch_norm_activation_drop_out( "BNRELUDROP_deconv0" + str(vq_i), deconvFeatures0, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) deconvLayer1 = tf.keras.layers.Conv2DTranspose( k * 4, [4, 4], strides=(4, 4), data_format='channels_last', input_shape=[4, 4, k * 16], name='deconvFeatures0' + str(vq_i)) deconvFeatures1 = deconvLayer1(deconvFeatures0) deconvFeatures1 = resblock_2d(deconvFeatures1, k * 4, [3, 3], 2) deconvFeatures1 = batch_norm_activation_drop_out( "BNRELUDROP_deconv1" + str(vq_i), deconvFeatures1, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) deconvLayer2 = tf.keras.layers.Conv2DTranspose( 1, [2, 2], strides=(2, 2), data_format='channels_last', input_shape=[16, 16, k * 4], name='deconvFeatures0' + str(vq_i)) predicted_images = deconvLayer2(deconvFeatures1) # predicted_images = tf.squeeze(predicted_images) finalPredictions.append(tf.reshape(predicted_images, [-1, 32, 32])) return finalPredictions
def create_network(points, features, batchIds, batchSize, numInputFeatures, k, numOutCat, numOutputs, isTraining, keepProbConv, keepProbFull, useConvDropOut=False, useDropOutFull=True, useRenorm=False, BNMomentum=0.99, activation='relu'): if activation == 'relu': from MCNetworkUtils import batch_norm_RELU_drop_out as batch_norm_activation_drop_out elif activation == 'leakyrelu': from MCNetworkUtils import batch_norm_leakyRELU_drop_out as batch_norm_activation_drop_out elif activation == 'mish': from MCNetworkUtils import batch_norm_mish_drop_out as batch_norm_activation_drop_out else: print('\nError: Unknown activation function: %s\n' % (activation)) mPointHierarchy = PointHierarchy( points, features, batchIds, [0.025, 0.1, 0.4, math.sqrt(3.0) + 0.1], "MCSphere", batchSize) ############################################ Convolutions mConvBuilder = ConvolutionBuilder(KDEWindow=0.25) # Zeroth Pooling convFeatures0 = conv_1x1("Reduce_Pool_0", features, numInputFeatures, k * 2) convFeatures0 = batch_norm_activation_drop_out("Reduce_Pool_0_Out_BN", convFeatures0, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) poolFeatures0 = mConvBuilder.create_convolution( convName="Pool_0", inPointHierarchy=mPointHierarchy, inPointLevel=0, outPointLevel=1, inFeatures=convFeatures0, inNumFeatures=k * 2, convRadius=0.05, KDEWindow=0.2) # First Pooling convFeatures1 = batch_norm_activation_drop_out("Reduce_Pool_1_In_BN", poolFeatures0, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) convFeatures1 = conv_1x1("Reduce_Pool_1", convFeatures1, k * 2, k * 4) convFeatures1 = batch_norm_activation_drop_out("Reduce_Pool_1_Out_BN", convFeatures1, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) poolFeatures1 = mConvBuilder.create_convolution( convName="Pool_1", inPointHierarchy=mPointHierarchy, inPointLevel=1, outPointLevel=2, inFeatures=convFeatures1, inNumFeatures=k * 4, convRadius=0.2, KDEWindow=0.2) # Second Pooling convFeatures2 = batch_norm_activation_drop_out("Reduce_Pool_2_In_BN", poolFeatures1, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) convFeatures2 = conv_1x1("Reduce_Pool_2", convFeatures2, k * 4, k * 16) convFeatures2 = batch_norm_activation_drop_out("Reduce_Pool_2_Out_BN", convFeatures2, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) poolFeatures2 = mConvBuilder.create_convolution( convName="Pool_2", inPointHierarchy=mPointHierarchy, inPointLevel=2, outPointLevel=3, inFeatures=convFeatures2, inNumFeatures=k * 16, convRadius=0.3, KDEWindow=0.2) # Third Pooling convFeatures3 = batch_norm_activation_drop_out("Reduce_Pool_3_In_BN", poolFeatures2, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) convFeatures3 = conv_1x1("Reduce_Pool_3", convFeatures3, k * 16, k * 32) convFeatures3 = batch_norm_activation_drop_out("Reduce_Pool_3_Out_BN", convFeatures3, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) poolFeatures3 = mConvBuilder.create_convolution( convName="Pool_3", inPointHierarchy=mPointHierarchy, inPointLevel=3, outPointLevel=4, inFeatures=convFeatures3, inNumFeatures=k * 32, convRadius=math.sqrt(3.0) + 0.1, KDEWindow=0.2) #Fully connected MLP - Global features. finalInput = batch_norm_activation_drop_out("BNRELUDROP_final", poolFeatures3, isTraining, useConvDropOut, keepProbConv, useRenorm, BNMomentum) final = [] for vq_i in range(numOutputs): Predictions = MLP_2_hidden(finalInput, k * 32, k * 16, k * 8, numOutCat, "Final_Predictions" + str(vq_i), keepProbFull, isTraining, useDropOutFull, useRenorm=useRenorm, BNMomentum=BNMomentum) final.append(Predictions) finalPredictions = tf.reshape(tf.stack(final, axis=1), [-1, 3 * numOutputs]) return finalPredictions
def create_network(points, batchIds, features, points2, batchIds2, features2, outputs, batchSize, k, isTraining, bnMomentum, brnMomentum, brnClipping, keepProbConv, keepProbFull, useConvDropOut=False, useDropOutFull=True, dataset=0): def BN_NL_DP_Conv(layerName, inFeatures): inFeatures = tf.layers.batch_normalization(inputs=inFeatures, momentum=bnMomentum, trainable=True, training=isTraining, name=layerName + "_BN", renorm=True, renorm_clipping=brnClipping, renorm_momentum=brnMomentum) inFeatures = tf.nn.leaky_relu(inFeatures) if useConvDropOut: inFeatures = tf.nn.dropout(inFeatures, keepProbConv) return inFeatures def BN_NL_DP_F(layerName, inFeatures): inFeatures = tf.layers.batch_normalization(inputs=inFeatures, momentum=bnMomentum, trainable=True, training=isTraining, name=layerName + "_BN", renorm=True, renorm_clipping=brnClipping, renorm_momentum=brnMomentum) inFeatures = tf.nn.leaky_relu(inFeatures) if useDropOutFull: inFeatures = tf.nn.dropout(inFeatures, keepProbFull) return inFeatures ############################################ Compute point hierarchy mPointHierarchy1 = PointHierarchy(points, features, batchIds, [0.05, 0.1, 0.2], "MCGI_PH1", batchSize, relativeRadius=False) mPointHierarchy2 = PointHierarchy(points2, features2, batchIds2, [], "MCGI_PH2", batchSize, relativeRadius=False) ############################################ Convolutions mConvBuilder = ConvolutionBuilder(KDEWindow=0.25, relativeRadius=False) resultColor = [] with tf.variable_scope("feature_channel_scope", reuse=tf.AUTO_REUSE): for i in range(3): if dataset == 1: numInFeatures = 5 inputFeatures = features[:, 0:3] inputFeatures = tf.concat( [inputFeatures, features[:, 3 + i:3 + i + 1]], axis=1) inputFeatures = tf.concat( [inputFeatures, features[:, 6 + i:6 + i + 1]], axis=1) inputFeatures2 = features2[:, 0:3] elif dataset == 2: numInFeatures = 7 inputFeatures = features[:, 0:3] inputFeatures = tf.concat( [inputFeatures, features[:, 3 + i:3 + i + 1]], axis=1) inputFeatures = tf.concat( [inputFeatures, features[:, 6 + i:6 + i + 1]], axis=1) inputFeatures = tf.concat( [inputFeatures, features[:, 9 + i:9 + i + 1]], axis=1) inputFeatures = tf.concat([inputFeatures, features[:, 12:13]], axis=1) inputFeatures2 = features2[:, 0:3] inputFeatures2 = tf.concat( [inputFeatures2, features2[:, 3 + i:3 + i + 1]], axis=1) inputFeatures2 = tf.concat( [inputFeatures2, features2[:, 6 + i:6 + i + 1]], axis=1) inputFeatures2 = tf.concat( [inputFeatures2, features2[:, 9 + i:9 + i + 1]], axis=1) inputFeatures2 = tf.concat( [inputFeatures2, features2[:, 12:13]], axis=1) else: numInFeatures = 3 inputFeatures = features inputFeatures2 = features2 ############################################ Encoder # First convolution convFeatures1 = mConvBuilder.create_convolution( convName="Conv_1", inPointHierarchy=mPointHierarchy1, inPointLevel=0, inFeatures=inputFeatures, inNumFeatures=numInFeatures, outNumFeatures=k, convRadius=0.025, multiFeatureConv=True) # First pooling bnConvFeatures1 = BN_NL_DP_Conv("Reduce_Pool_1_In_BN", convFeatures1) bnConvFeatures1 = conv_1x1("Reduce_Pool_1", bnConvFeatures1, k, k * 2) bnConvFeatures1 = BN_NL_DP_Conv("Reduce_Pool_1_Out_BN", bnConvFeatures1) poolFeatures1 = mConvBuilder.create_convolution( convName="Pool_1", inPointHierarchy=mPointHierarchy1, inPointLevel=0, outPointLevel=1, inFeatures=bnConvFeatures1, inNumFeatures=k * 2, convRadius=0.05, KDEWindow=0.2) # Second convolution bnPoolFeatures1 = BN_NL_DP_Conv("Reduce_Conv_2_In_BN", poolFeatures1) bnPoolFeatures1 = conv_1x1("Reduce_Conv_2", bnPoolFeatures1, k * 2, k * 2) bnPoolFeatures1 = BN_NL_DP_Conv("Reduce_Conv_2_Out_BN", bnPoolFeatures1) convFeatures2 = mConvBuilder.create_convolution( convName="Conv_2", inPointHierarchy=mPointHierarchy1, inPointLevel=1, inFeatures=bnPoolFeatures1, inNumFeatures=k * 2, convRadius=0.1) convFeatures2 = tf.concat([poolFeatures1, convFeatures2], 1) # Second pooling bnConvFeatures2 = BN_NL_DP_Conv("Reduce_Pool_2_In_BN", convFeatures2) bnConvFeatures2 = conv_1x1("Reduce_Pool_2", bnConvFeatures2, k * 4, k * 4) bnConvFeatures2 = BN_NL_DP_Conv("Reduce_Pool_2_Out_BN", bnConvFeatures2) poolFeatures2 = mConvBuilder.create_convolution( convName="Pool_2", inPointHierarchy=mPointHierarchy1, inPointLevel=1, outPointLevel=2, inFeatures=bnConvFeatures2, inNumFeatures=k * 4, convRadius=0.2, KDEWindow=0.2) # Third convolution bnPoolFeatures2 = BN_NL_DP_Conv("Reduce_Conv_3_In_BN", poolFeatures2) bnPoolFeatures2 = conv_1x1("Reduce_Conv_3", bnPoolFeatures2, k * 4, k * 4) bnPoolFeatures2 = BN_NL_DP_Conv("Reduce_Conv_3_Out_BN", bnPoolFeatures2) convFeatures3 = mConvBuilder.create_convolution( convName="Conv_3", inPointHierarchy=mPointHierarchy1, inPointLevel=2, inFeatures=bnPoolFeatures2, inNumFeatures=k * 4, convRadius=0.2) convFeatures3 = tf.concat([poolFeatures2, convFeatures3], 1) # Third pooling bnConvFeatures3 = BN_NL_DP_Conv("Reduce_Pool_3_In_BN", convFeatures3) bnConvFeatures3 = conv_1x1("Reduce_Pool_3", bnConvFeatures3, k * 8, k * 8) bnConvFeatures3 = BN_NL_DP_Conv("Reduce_Pool_3_Out_BN", bnConvFeatures3) poolFeatures3 = mConvBuilder.create_convolution( convName="Pool_3", inPointHierarchy=mPointHierarchy1, inPointLevel=2, outPointLevel=3, inFeatures=bnConvFeatures3, inNumFeatures=k * 8, convRadius=0.4, KDEWindow=0.2) # Fourth convolution bnPoolFeatures3 = BN_NL_DP_Conv("Reduce_Conv_4_In_BN", poolFeatures3) bnPoolFeatures3 = conv_1x1("Reduce_Conv_4", bnPoolFeatures3, k * 8, k * 8) bnPoolFeatures3 = BN_NL_DP_Conv("Reduce_Conv_4_Out_BN", bnPoolFeatures3) convFeatures4 = mConvBuilder.create_convolution( convName="Conv_4", inPointHierarchy=mPointHierarchy1, inPointLevel=3, inFeatures=bnPoolFeatures3, inNumFeatures=k * 8, convRadius=1.0, KDEWindow=0.2) ############################################ Decoder # Third upsampling bnConvFeatures4 = BN_NL_DP_Conv("Up_3_BN", convFeatures4) upFeatures3 = mConvBuilder.create_convolution( convName="Up_3", inPointHierarchy=mPointHierarchy1, inPointLevel=3, outPointLevel=2, inFeatures=bnConvFeatures4, inNumFeatures=k * 8, convRadius=0.4, KDEWindow=0.2) upFeatures3 = tf.concat([convFeatures3, upFeatures3], 1) upFeatures3 = BN_NL_DP_Conv("Up_3_Reduce_BN", upFeatures3) upFeatures3 = conv_1x1("Up_3_Reduce", upFeatures3, k * 16, k * 4) # Second upsampling bnUpFeatures3 = BN_NL_DP_Conv("Up_2_BN", upFeatures3) upFeatures2 = mConvBuilder.create_convolution( convName="Up_2", inPointHierarchy=mPointHierarchy1, inPointLevel=2, outPointLevel=1, inFeatures=bnUpFeatures3, inNumFeatures=k * 4, convRadius=0.2, KDEWindow=0.2) upFeatures2 = tf.concat([convFeatures2, upFeatures2], 1) upFeatures2 = BN_NL_DP_Conv("Up_2_Reduce_BN", upFeatures2) upFeatures2 = conv_1x1("Up_2_Reduce", upFeatures2, k * 8, k * 2) # First upsampling bnUpFeatures2 = BN_NL_DP_Conv("Up_1_2_BN", upFeatures2) upFeatures1 = mConvBuilder.create_convolution( convName="Up_1_2", inPointHierarchy=mPointHierarchy1, inPointLevel=1, outPointLevel=0, inFeatures=bnUpFeatures2, inNumFeatures=k * 2, convRadius=0.1, KDEWindow=0.2) upFeatures1 = tf.concat([convFeatures1, upFeatures1], 1) upFeatures1 = BN_NL_DP_Conv("Up_1_Reduce_BN", upFeatures1) upFeatures1 = conv_1x1("Up_1_Reduce", upFeatures1, k * 3, k) bnUpFeatures1 = BN_NL_DP_Conv("Up_1_BN", upFeatures1) finalFeatures = mConvBuilder.create_convolution( convName="Final_Conv", inPointHierarchy=mPointHierarchy1, outPointHierarchy=mPointHierarchy2, inPointLevel=0, outPointLevel=0, inFeatures=bnUpFeatures1, inNumFeatures=k, convRadius=0.05, KDEWindow=0.2) finalFeatures = BN_NL_DP_F("Final_MLP1_BN", finalFeatures) finalFeatures = tf.concat([finalFeatures, inputFeatures2], 1) finalFeatures = conv_1x1("Final_MLP1", finalFeatures, k + numInFeatures, k) finalFeatures = BN_NL_DP_F("Final_MLP2_BN", finalFeatures) predVals = conv_1x1("Final_MLP2", finalFeatures, k, 1) resultColor.append(predVals) return tf.concat(resultColor, axis=1)
def create_network(points, batchIds, features, numInputFeatures, batchSize, k, numOutCat, isTraining, keepProbConv, keepProbFull, useConvDropOut=False, useDropOutFull=True): ############################################ Compute point hierarchy mPointHierarchy = PointHierarchy(points, features, batchIds, [0.1, 0.4, math.sqrt(3.0) + 0.1], "MCClassH_PH", batchSize) ############################################ Convolutions mConvBuilder = ConvolutionBuilder(KDEWindow=0.25) ############################################ LOGITS 1 ############################################ First level # Convolution convFeatures1 = mConvBuilder.create_convolution( convName="Conv_1", inPointHierarchy=mPointHierarchy, inPointLevel=0, inFeatures=features, inNumFeatures=numInputFeatures, outNumFeatures=k, convRadius=0.1, multiFeatureConv=True) # Pooling convFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_In_BN", convFeatures1, isTraining, useConvDropOut, keepProbConv) convFeatures1 = conv_1x1("Reduce_Pool_1", convFeatures1, k, k * 2) convFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_Out_BN", convFeatures1, isTraining, useConvDropOut, keepProbConv) poolFeatures1 = mConvBuilder.create_convolution( convName="Pool_1", inPointHierarchy=mPointHierarchy, inPointLevel=0, outPointLevel=1, inFeatures=convFeatures1, inNumFeatures=k * 2, convRadius=0.2, KDEWindow=0.2) ############################################ Second level convolutions #### Convolution bnPoolFeatures1 = batch_norm_RELU_drop_out("Reduce_Conv_2_In_BN", poolFeatures1, isTraining, useConvDropOut, keepProbConv) bnPoolFeatures1 = conv_1x1("Reduce_Conv_2", bnPoolFeatures1, k * 2, k * 2) bnPoolFeatures1 = batch_norm_RELU_drop_out("Reduce_Conv_2_Out_BN", bnPoolFeatures1, isTraining, useConvDropOut, keepProbConv) convFeatures2 = mConvBuilder.create_convolution( convName="Conv_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, inFeatures=bnPoolFeatures1, inNumFeatures=k * 2, convRadius=0.4) convFeatures2 = tf.concat([poolFeatures1, convFeatures2], 1) # Pooling convFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_In_BN", convFeatures2, isTraining, useConvDropOut, keepProbConv) convFeatures2 = conv_1x1("Reduce_Pool_2", convFeatures2, k * 4, k * 8) convFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_Out_BN", convFeatures2, isTraining, useConvDropOut, keepProbConv) poolFeatures2 = mConvBuilder.create_convolution( convName="Pool_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, outPointLevel=2, inFeatures=convFeatures2, inNumFeatures=k * 8, convRadius=0.8, KDEWindow=0.2) ############################################ Third level convolutions # Convolution bnPoolFeatures2 = batch_norm_RELU_drop_out("Reduce_Conv_3_In_BN", poolFeatures2, isTraining, useConvDropOut, keepProbConv) bnPoolFeatures2 = conv_1x1("Reduce_Conv_3", bnPoolFeatures2, k * 8, k * 8) bnPoolFeatures2 = batch_norm_RELU_drop_out("Reduce_Conv_3_Out_BN", bnPoolFeatures2, isTraining, useConvDropOut, keepProbConv) convFeatures3 = mConvBuilder.create_convolution( convName="Conv_3", inPointHierarchy=mPointHierarchy, inPointLevel=2, inFeatures=bnPoolFeatures2, inNumFeatures=k * 8, convRadius=1.2) convFeatures3 = tf.concat([poolFeatures2, convFeatures3], 1) # Pooling convFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_In_BN", convFeatures3, isTraining, useConvDropOut, keepProbConv) convFeatures3 = conv_1x1("Reduce_Pool_3", convFeatures3, k * 16, k * 32) convFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_Out_BN", convFeatures3, isTraining, useConvDropOut, keepProbConv) poolFeatures3 = mConvBuilder.create_convolution( convName="Pool_3", inPointHierarchy=mPointHierarchy, inPointLevel=2, outPointLevel=3, inFeatures=convFeatures3, inNumFeatures=k * 32, convRadius=math.sqrt(3.0) + 0.1, KDEWindow=0.2) #Fully connected MLP - Global features. finalInput = batch_norm_RELU_drop_out("BNRELUDROP_final", poolFeatures3, isTraining, useConvDropOut, keepProbConv) finalLogits1 = MLP_2_hidden(finalInput, k * 32, k * 16, k * 8, numOutCat, "Final_Logits", keepProbFull, isTraining, useDropOutFull) ############################################ LOGITS 2 ############################################ Second level convolutions #### Convolution convFeatures22 = mConvBuilder.create_convolution( convName="Conv_2_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, inFeatures=mPointHierarchy.features_[1], inNumFeatures=numInputFeatures, outNumFeatures=k * 2, convRadius=0.4, multiFeatureConv=True) # Pooling convFeatures22 = batch_norm_RELU_drop_out("Reduce_Pool_2_2_In_BN", convFeatures22, isTraining, useConvDropOut, keepProbConv) convFeatures22 = conv_1x1("Reduce_Pool_2_2", convFeatures22, k * 2, k * 8) convFeatures22 = batch_norm_RELU_drop_out("Reduce_Pool_2_2_Out_BN", convFeatures22, isTraining, useConvDropOut, keepProbConv) poolFeatures22 = mConvBuilder.create_convolution( convName="Pool_2_2", inPointHierarchy=mPointHierarchy, inPointLevel=1, outPointLevel=2, inFeatures=convFeatures22, inNumFeatures=k * 8, convRadius=0.8, KDEWindow=0.2) ############################################ Third level convolutions # Convolution bnPoolFeatures22 = batch_norm_RELU_drop_out("Reduce_Conv_3_2_In_BN", poolFeatures22, isTraining, useConvDropOut, keepProbConv) bnPoolFeatures22 = conv_1x1("Reduce_Conv_3_2", bnPoolFeatures22, k * 8, k * 8) bnPoolFeatures22 = batch_norm_RELU_drop_out("Reduce_Conv_3_2_Out_BN", bnPoolFeatures22, isTraining, useConvDropOut, keepProbConv) convFeatures32 = mConvBuilder.create_convolution( convName="Conv_3_2", inPointHierarchy=mPointHierarchy, inPointLevel=2, inFeatures=bnPoolFeatures22, inNumFeatures=k * 8, convRadius=1.2) convFeatures32 = tf.concat([poolFeatures22, convFeatures32], 1) # Pooling convFeatures32 = batch_norm_RELU_drop_out("Reduce_Pool_3_2_In_BN", convFeatures32, isTraining, useConvDropOut, keepProbConv) convFeatures32 = conv_1x1("Reduce_Pool_3_2", convFeatures32, k * 16, k * 32) convFeatures32 = batch_norm_RELU_drop_out("Reduce_Pool_3_2_Out_BN", convFeatures32, isTraining, useConvDropOut, keepProbConv) poolFeatures32 = mConvBuilder.create_convolution( convName="Pool_3_2", inPointHierarchy=mPointHierarchy, inPointLevel=2, outPointLevel=3, inFeatures=convFeatures32, inNumFeatures=k * 32, convRadius=math.sqrt(3.0) + 0.1, KDEWindow=0.2) #Fully connected MLP - Global features. finalInput2 = batch_norm_RELU_drop_out("2BNRELUDROP_final", poolFeatures32, isTraining, useConvDropOut, keepProbConv) finalLogits2 = MLP_2_hidden(finalInput2, k * 32, k * 16, k * 8, numOutCat, "2Final_Logits", keepProbFull, isTraining, useDropOutFull) ############################################ PATH DROPOUT counter = tf.constant(0.0, dtype=tf.float32) probability = tf.random_uniform([1]) mask1 = tf.less_equal(probability[0], tf.constant(0.66)) mask1 = tf.maximum(tf.cast(mask1, tf.float32), tf.cast(tf.logical_not(isTraining), tf.float32)) counter = tf.add(counter, mask1) finalLogits1 = tf.scalar_mul(mask1, finalLogits1) mask2 = tf.greater_equal(probability[0], tf.constant(0.33)) mask2 = tf.maximum(tf.cast(mask2, tf.float32), tf.cast(tf.logical_not(isTraining), tf.float32)) counter = tf.add(counter, mask2) finalLogits2 = tf.scalar_mul(mask2, finalLogits2) counter = tf.multiply(tf.constant((2.0), dtype=tf.float32), tf.reciprocal(counter)) return tf.scalar_mul(counter, tf.add(finalLogits1, finalLogits2))