Пример #1
0
def create_network(points, batchIds, features, numInputFeatures, batchSize, k, isTraining, multiConv = True, useMC = True):

    ############################################  Compute point hierarchy
    mPointHierarchy = PointHierarchy(points, features, batchIds, [], "MCNormS_PH", batchSize)

    ############################################ Convolutions
    mConvBuilder = ConvolutionBuilder(KDEWindow=0.2)

    # Convolution 1
    convFeatures1 = mConvBuilder.create_convolution(
        convName="Conv_1", 
        inPointHierarchy=mPointHierarchy,
        inPointLevel=0, 
        inFeatures=features, 
        inNumFeatures=numInputFeatures, 
        outNumFeatures=k,
        convRadius=0.15,
        multiFeatureConv=True)

    #BatchNorm and RELU
    convFeatures1 = batch_norm_RELU_drop_out("BN_RELU", convFeatures1, isTraining, False, False)
    
    # Convolution 2
    normals = mConvBuilder.create_convolution(
        convName="Conv_2", 
        inPointHierarchy=mPointHierarchy,
        inPointLevel=0, 
        inFeatures=convFeatures1, 
        inNumFeatures=k, 
        outNumFeatures=3,
        convRadius=0.15,
        multiFeatureConv=True)

    return normals
Пример #2
0
def create_network(points,
                   batchIds,
                   features,
                   numInputFeatures,
                   batchSize,
                   k,
                   isTraining,
                   multiConv=True,
                   useMC=True):

    ############################################  Compute point hierarchy
    mPointHierarchy = PointHierarchy(points, features, batchIds, [0.1, 0.4],
                                     "MCNorm_PH", batchSize)

    ############################################ Convolutions
    mConvBuilder = ConvolutionBuilder(KDEWindow=0.25)

    ############################################ Encoder

    # First Convolution
    convFeatures1 = mConvBuilder.create_convolution(
        convName="Conv_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=0,
        inFeatures=features,
        inNumFeatures=numInputFeatures,
        outNumFeatures=k,
        convRadius=0.1,
        multiFeatureConv=True)

    # First Pooling
    bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_In_BN",
                                               convFeatures1, isTraining,
                                               False, False)
    bnConvFeatures1 = conv_1x1("Reduce_Pool_1", bnConvFeatures1, k, k * 2)
    bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_Out_BN",
                                               bnConvFeatures1, isTraining,
                                               False, False)
    poolFeatures1 = mConvBuilder.create_convolution(
        convName="Pool_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=0,
        outPointLevel=1,
        inFeatures=bnConvFeatures1,
        inNumFeatures=k * 2,
        convRadius=0.2,
        KDEWindow=0.2)

    # Second Convolution
    bnPoolFeatures1 = batch_norm_RELU_drop_out("Conv_2_In_BN", poolFeatures1,
                                               isTraining, False, False)
    convFeatures2 = mConvBuilder.create_convolution(
        convName="Conv_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        inFeatures=bnPoolFeatures1,
        inNumFeatures=k * 2,
        convRadius=0.4)
    convFeatures2 = tf.concat([poolFeatures1, convFeatures2], 1)

    # Second Pooling
    bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_In_BN",
                                               convFeatures2, isTraining,
                                               False, False)
    bnConvFeatures2 = conv_1x1("Reduce_Pool_2", bnConvFeatures2, k * 4, k * 4)
    bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_Out_BN",
                                               bnConvFeatures2, isTraining,
                                               False, False)
    poolFeatures2 = mConvBuilder.create_convolution(
        convName="Pool_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        outPointLevel=2,
        inFeatures=bnConvFeatures2,
        inNumFeatures=k * 4,
        convRadius=0.8,
        KDEWindow=0.2)

    # Third Convolution
    bnPoolFeatures2 = batch_norm_RELU_drop_out("Conv_3_In_BN", poolFeatures2,
                                               isTraining, False, False)
    convFeatures3 = mConvBuilder.create_convolution(
        convName="Conv_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        inFeatures=bnPoolFeatures2,
        inNumFeatures=k * 4,
        convRadius=math.sqrt(3))
    convFeatures3 = tf.concat([poolFeatures2, convFeatures3], 1)

    ##################################################### Multi-hierarchy sampling

    # Second upsampling
    bnFeatures3 = batch_norm_RELU_drop_out("Up_2_3_BN", convFeatures3,
                                           isTraining, False, False)
    upFeatures2_3 = mConvBuilder.create_convolution(
        convName="Up_2_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        outPointLevel=1,
        inFeatures=bnFeatures3,
        inNumFeatures=k * 8,
        convRadius=0.8)
    deConvFeatures2 = tf.concat([upFeatures2_3, convFeatures2], 1)
    deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_In_BN",
                                               deConvFeatures2, isTraining,
                                               False, False)
    deConvFeatures2 = conv_1x1("DeConv_2_Reduce", deConvFeatures2, k * 12,
                               k * 4)
    deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_Out_BN",
                                               deConvFeatures2, isTraining,
                                               False, False)
    deConvFeatures2 = mConvBuilder.create_convolution(
        convName="DeConv_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        inFeatures=deConvFeatures2,
        inNumFeatures=k * 4,
        convRadius=0.4)

    # First upsampling
    bnDeConvFeatures2 = batch_norm_RELU_drop_out("Up_1_2_BN", deConvFeatures2,
                                                 isTraining, False, False)
    upFeatures1_2 = mConvBuilder.create_convolution(
        convName="Up_1_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        outPointLevel=0,
        inFeatures=bnDeConvFeatures2,
        inNumFeatures=k * 4,
        convRadius=0.2)
    deConvFeatures1 = tf.concat([upFeatures1_2, convFeatures1], 1)
    deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_In_BN",
                                               deConvFeatures1, isTraining,
                                               False, False)
    deConvFeatures1 = conv_1x1("DeConv_1_Reduce", deConvFeatures1, k * 5,
                               k * 2)
    deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_Out_BN",
                                               deConvFeatures1, isTraining,
                                               False, False)
    normals = mConvBuilder.create_convolution(convName="DeConv_1",
                                              inPointHierarchy=mPointHierarchy,
                                              inPointLevel=0,
                                              inFeatures=deConvFeatures1,
                                              inNumFeatures=k * 2,
                                              outNumFeatures=3,
                                              convRadius=0.1,
                                              multiFeatureConv=True)

    return normals
Пример #3
0
def create_network(points,
                   batchIds,
                   features,
                   numInputFeatures,
                   batchSize,
                   k,
                   numOutCat,
                   isTraining,
                   keepProbConv,
                   keepProbFull,
                   useConvDropOut=False,
                   useDropOutFull=True):

    ############################################ Compute point hierarchy
    mPointHierarchy = PointHierarchy(points, features, batchIds,
                                     [0.1, 0.4, math.sqrt(3.0) + 0.1],
                                     "MCClassS_PH", batchSize)

    ############################################ Convolutions
    mConvBuilder = ConvolutionBuilder(KDEWindow=0.2)

    #### Convolution 1
    convFeatures1 = mConvBuilder.create_convolution(
        convName="Conv_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=0,
        outPointLevel=1,
        inFeatures=features,
        inNumFeatures=numInputFeatures,
        outNumFeatures=k,
        convRadius=0.2,
        multiFeatureConv=True)

    #### Convolution 2
    convFeatures1 = batch_norm_RELU_drop_out("Reduce_1_In_BN", convFeatures1,
                                             isTraining, useConvDropOut,
                                             keepProbConv)
    convFeatures1 = conv_1x1("Reduce_1", convFeatures1, k, k * 2)
    convFeatures1 = batch_norm_RELU_drop_out("Reduce_1_Out_BN", convFeatures1,
                                             isTraining, useConvDropOut,
                                             keepProbConv)
    convFeatures2 = mConvBuilder.create_convolution(
        convName="Conv_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        outPointLevel=2,
        inFeatures=convFeatures1,
        inNumFeatures=k * 2,
        convRadius=0.8)

    #### Convolution 3
    convFeatures2 = batch_norm_RELU_drop_out("Reduce_2_In_BN", convFeatures2,
                                             isTraining, useConvDropOut,
                                             keepProbConv)
    convFeatures2 = conv_1x1("Reduce_2", convFeatures2, k * 2, k * 4)
    convFeatures2 = batch_norm_RELU_drop_out("Reduce_2_Out_BN", convFeatures2,
                                             isTraining, useConvDropOut,
                                             keepProbConv)
    convFeatures3 = mConvBuilder.create_convolution(
        convName="Conv_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        outPointLevel=3,
        inFeatures=convFeatures2,
        inNumFeatures=k * 4,
        convRadius=math.sqrt(3.0) + 0.1)

    #Fully connected MLP - Global features.
    finalInput = batch_norm_RELU_drop_out("BNRELUDROP_final", convFeatures3,
                                          isTraining, useConvDropOut,
                                          keepProbConv)
    finalLogits = MLP_2_hidden(finalInput, k * 4, k * 2, k, numOutCat,
                               "Final_Logits", keepProbFull, isTraining,
                               useDropOutFull)

    return finalLogits
Пример #4
0
def create_network(points,
                   batchIds,
                   features,
                   catLabels,
                   numInputFeatures,
                   numCats,
                   numParts,
                   batchSize,
                   k,
                   isTraining,
                   keepProbConv,
                   keepProbFull,
                   useConvDropOut=False,
                   useDropOutFull=True):

    ############################################  Compute point hierarchy
    mPointHierarchy = PointHierarchy(points, features, batchIds,
                                     [0.025, 0.1, 0.4], "MCSeg_PH", batchSize)

    ############################################ Convolutions
    mConvBuilder = ConvolutionBuilder(KDEWindow=0.25)

    ############################################ Encoder

    # First Convolution
    convFeatures1 = mConvBuilder.create_convolution(
        convName="Conv_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=0,
        inFeatures=features,
        inNumFeatures=numInputFeatures,
        outNumFeatures=k,
        convRadius=0.03,
        multiFeatureConv=True)

    # First Pooling
    bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_In_BN",
                                               convFeatures1, isTraining,
                                               useConvDropOut, keepProbConv)
    bnConvFeatures1 = conv_1x1("Reduce_Pool_1", bnConvFeatures1, k, k * 2)
    bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_Out_BN",
                                               bnConvFeatures1, isTraining,
                                               useConvDropOut, keepProbConv)
    poolFeatures1 = mConvBuilder.create_convolution(
        convName="Pool_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=0,
        outPointLevel=1,
        inFeatures=bnConvFeatures1,
        inNumFeatures=k * 2,
        convRadius=0.05,
        KDEWindow=0.2)

    # Second Convolution
    bnPoolFeatures1 = batch_norm_RELU_drop_out("Conv_2_In_BN", poolFeatures1,
                                               isTraining, useConvDropOut,
                                               keepProbConv)
    convFeatures2 = mConvBuilder.create_convolution(
        convName="Conv_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        inFeatures=bnPoolFeatures1,
        inNumFeatures=k * 2,
        convRadius=0.1)
    convFeatures2 = tf.concat([poolFeatures1, convFeatures2], 1)

    # Second Pooling
    bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_In_BN",
                                               convFeatures2, isTraining,
                                               useConvDropOut, keepProbConv)
    bnConvFeatures2 = conv_1x1("Reduce_Pool_2", bnConvFeatures2, k * 4, k * 4)
    bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_Out_BN",
                                               bnConvFeatures2, isTraining,
                                               useConvDropOut, keepProbConv)
    poolFeatures2 = mConvBuilder.create_convolution(
        convName="Pool_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        outPointLevel=2,
        inFeatures=bnConvFeatures2,
        inNumFeatures=k * 4,
        convRadius=0.2,
        KDEWindow=0.2)

    # Third Convolution
    bnPoolFeatures2 = batch_norm_RELU_drop_out("Conv_3_In_BN", poolFeatures2,
                                               isTraining, useConvDropOut,
                                               keepProbConv)
    convFeatures3 = mConvBuilder.create_convolution(
        convName="Conv_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        inFeatures=bnPoolFeatures2,
        inNumFeatures=k * 4,
        convRadius=0.4)
    convFeatures3 = tf.concat([poolFeatures2, convFeatures3], 1)

    # Third Pooling
    bnConvFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_In_BN",
                                               convFeatures3, isTraining,
                                               useConvDropOut, keepProbConv)
    bnConvFeatures3 = conv_1x1("Reduce_Pool_3", bnConvFeatures3, k * 8, k * 8)
    bnConvFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_Out_BN",
                                               bnConvFeatures3, isTraining,
                                               useConvDropOut, keepProbConv)
    poolFeatures3 = mConvBuilder.create_convolution(
        convName="Pool_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        outPointLevel=3,
        inFeatures=bnConvFeatures3,
        inNumFeatures=k * 8,
        convRadius=0.8,
        KDEWindow=0.2)

    # Fourth Convolution
    bnPoolFeatures3 = batch_norm_RELU_drop_out("Conv_4_In_BN", poolFeatures3,
                                               isTraining, useConvDropOut,
                                               keepProbConv)
    convFeatures4 = mConvBuilder.create_convolution(
        convName="Conv_4",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=3,
        inFeatures=bnPoolFeatures3,
        inNumFeatures=k * 8,
        convRadius=math.sqrt(3.0) + 0.1)
    convFeatures4 = tf.concat([poolFeatures3, convFeatures4], 1)

    ############################################ Decoder

    # Third upsampling
    bnConvFeatures4 = batch_norm_RELU_drop_out("Up_3_4_BN", convFeatures4,
                                               isTraining, useConvDropOut,
                                               keepProbConv)
    upFeatures3_4 = mConvBuilder.create_convolution(
        convName="Up_3_4",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=3,
        outPointLevel=2,
        inFeatures=bnConvFeatures4,
        inNumFeatures=k * 16,
        convRadius=math.sqrt(3.0) + 0.1)
    deConvFeatures3 = tf.concat([upFeatures3_4, convFeatures3], 1)
    deConvFeatures3 = batch_norm_RELU_drop_out("DeConv_3_Reduce_In_BN",
                                               deConvFeatures3, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures3 = conv_1x1("DeConv_3_Reduce", deConvFeatures3, k * 24,
                               k * 8)
    deConvFeatures3 = batch_norm_RELU_drop_out("DeConv_3_Reduce_Out_BN",
                                               deConvFeatures3, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures3 = mConvBuilder.create_convolution(
        convName="DeConv_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        inFeatures=deConvFeatures3,
        inNumFeatures=k * 8,
        convRadius=0.4)

    # Second upsampling
    bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up_2_3_BN", deConvFeatures3,
                                                 isTraining, useConvDropOut,
                                                 keepProbConv)
    upFeatures2_3 = mConvBuilder.create_convolution(
        convName="Up_2_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        outPointLevel=1,
        inFeatures=bnDeConvFeatures3,
        inNumFeatures=k * 8,
        convRadius=0.2)
    deConvFeatures2 = tf.concat([upFeatures2_3, convFeatures2], 1)
    deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_In_BN",
                                               deConvFeatures2, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures2 = conv_1x1("DeConv_2_Reduce", deConvFeatures2, k * 12,
                               k * 4)
    deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_Out_BN",
                                               deConvFeatures2, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures2 = mConvBuilder.create_convolution(
        convName="DeConv_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        inFeatures=deConvFeatures2,
        inNumFeatures=k * 4,
        convRadius=0.1)

    # First multiple upsamplings
    bnDeConvFeatures2 = batch_norm_RELU_drop_out("Up_1_2_BN", deConvFeatures2,
                                                 isTraining, useConvDropOut,
                                                 keepProbConv)
    upFeatures1_2 = mConvBuilder.create_convolution(
        convName="Up_1_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        outPointLevel=0,
        inFeatures=bnDeConvFeatures2,
        inNumFeatures=k * 4,
        convRadius=0.05)
    bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up_1_3_BN", deConvFeatures3,
                                                 isTraining, useConvDropOut,
                                                 keepProbConv)
    upFeatures1_3 = mConvBuilder.create_convolution(
        convName="Up_1_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        outPointLevel=0,
        inFeatures=bnDeConvFeatures3,
        inNumFeatures=k * 8,
        convRadius=0.2)
    deConvFeatures1 = tf.concat([upFeatures1_2, upFeatures1_3, convFeatures1],
                                1)
    deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_In_BN",
                                               deConvFeatures1, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures1 = conv_1x1("DeConv_1_Reduce", deConvFeatures1, k * 13,
                               k * 4)
    deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_Out_BN",
                                               deConvFeatures1, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures1 = mConvBuilder.create_convolution(
        convName="DeConv_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=0,
        inFeatures=deConvFeatures1,
        inNumFeatures=k * 4,
        convRadius=0.03)

    # Fully connected MLP - Global features.
    finalInput = batch_norm_RELU_drop_out("BNRELUDROP_hier_final",
                                          deConvFeatures1, isTraining,
                                          useConvDropOut, keepProbConv)
    #Convert cat labels
    catLabelOneHot = tf.one_hot(catLabels,
                                numCats,
                                on_value=1.0,
                                off_value=0.0)
    catLabelOneHot = tf.reshape(catLabelOneHot, [-1, numCats])
    finalInput = tf.concat([catLabelOneHot, finalInput], 1)
    finalLogits = MLP_2_hidden(finalInput,
                               k * 4 + numCats,
                               k * 4,
                               k * 2,
                               numParts,
                               "Final_Logits",
                               keepProbFull,
                               isTraining,
                               useDropOutFull,
                               useInitBN=False)

    return finalLogits
Пример #5
0
def create_network(points,
                   batchIds,
                   features,
                   numInputFeatures,
                   numSem,
                   batchSize,
                   k,
                   isTraining,
                   keepProbConv,
                   keepProbFull,
                   useConvDropOut=False,
                   useDropOutFull=True):

    ############################################  Compute point hierarchy
    mPointHierarchy = PointHierarchy(points, features, batchIds,
                                     [0.1, 0.2, 0.4, 0.8], "MCSegScanNet_PH",
                                     batchSize, False)

    ############################################ Convolutions
    mConvBuilder = ConvolutionBuilder(KDEWindow=0.25, relativeRadius=False)

    ############################################ Encoder

    # Init pooling
    poolFeatures0 = mConvBuilder.create_convolution(
        convName="Pool_0",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=0,
        outPointLevel=1,
        inFeatures=features,
        inNumFeatures=numInputFeatures,
        outNumFeatures=k,
        convRadius=0.1,
        KDEWindow=0.2,
        multiFeatureConv=True)

    # First Convolution
    bnPoolFeatures0 = batch_norm_RELU_drop_out("Conv_1_In_BN", poolFeatures0,
                                               isTraining, useConvDropOut,
                                               keepProbConv)
    convFeatures1 = mConvBuilder.create_convolution(
        convName="Conv_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        inFeatures=bnPoolFeatures0,
        inNumFeatures=k,
        convRadius=0.4)
    convFeatures1 = tf.concat([poolFeatures0, convFeatures1], 1)

    # First Pooling
    bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_In_BN",
                                               convFeatures1, isTraining,
                                               useConvDropOut, keepProbConv)
    bnConvFeatures1 = conv_1x1("Reduce_Pool_1", bnConvFeatures1, k * 2, k * 2)
    bnConvFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_Out_BN",
                                               bnConvFeatures1, isTraining,
                                               useConvDropOut, keepProbConv)
    poolFeatures1 = mConvBuilder.create_convolution(
        convName="Pool_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        outPointLevel=2,
        inFeatures=bnConvFeatures1,
        inNumFeatures=k * 2,
        convRadius=0.4,
        KDEWindow=0.2)

    # Second Convolution
    bnPoolFeatures1 = batch_norm_RELU_drop_out("Conv_2_In_BN", poolFeatures1,
                                               isTraining, useConvDropOut,
                                               keepProbConv)
    convFeatures2 = mConvBuilder.create_convolution(
        convName="Conv_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        inFeatures=bnPoolFeatures1,
        inNumFeatures=k * 2,
        convRadius=0.8)
    convFeatures2 = tf.concat([poolFeatures1, convFeatures2], 1)

    # Second Pooling
    bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_In_BN",
                                               convFeatures2, isTraining,
                                               useConvDropOut, keepProbConv)
    bnConvFeatures2 = conv_1x1("Reduce_Pool_2", bnConvFeatures2, k * 4, k * 4)
    bnConvFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_Out_BN",
                                               bnConvFeatures2, isTraining,
                                               useConvDropOut, keepProbConv)
    poolFeatures2 = mConvBuilder.create_convolution(
        convName="Pool_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        outPointLevel=3,
        inFeatures=bnConvFeatures2,
        inNumFeatures=k * 4,
        convRadius=0.8,
        KDEWindow=0.2)

    # Third Convolution
    bnPoolFeatures2 = batch_norm_RELU_drop_out("Conv_3_In_BN", poolFeatures2,
                                               isTraining, useConvDropOut,
                                               keepProbConv)
    convFeatures3 = mConvBuilder.create_convolution(
        convName="Conv_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=3,
        inFeatures=bnPoolFeatures2,
        inNumFeatures=k * 4,
        convRadius=1.6)
    convFeatures3 = tf.concat([poolFeatures2, convFeatures3], 1)

    # Third Pooling
    bnConvFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_In_BN",
                                               convFeatures3, isTraining,
                                               useConvDropOut, keepProbConv)
    bnConvFeatures3 = conv_1x1("Reduce_Pool_3", bnConvFeatures3, k * 8, k * 8)
    bnConvFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_Out_BN",
                                               bnConvFeatures3, isTraining,
                                               useConvDropOut, keepProbConv)
    poolFeatures3 = mConvBuilder.create_convolution(
        convName="Pool_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=3,
        outPointLevel=4,
        inFeatures=bnConvFeatures3,
        inNumFeatures=k * 8,
        convRadius=1.6,
        KDEWindow=0.2)

    # Fourth Convolution
    bnPoolFeatures3 = batch_norm_RELU_drop_out("Conv_4_In_BN", poolFeatures3,
                                               isTraining, useConvDropOut,
                                               keepProbConv)
    convFeatures4 = mConvBuilder.create_convolution(
        convName="Conv_4",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=4,
        inFeatures=bnPoolFeatures3,
        inNumFeatures=k * 8,
        convRadius=5.0)
    convFeatures4 = tf.concat([poolFeatures3, convFeatures4], 1)

    ############################################ Decoder

    # Third upsampling
    bnConvFeatures4 = batch_norm_RELU_drop_out("Up3_4_Reduce_In_BN",
                                               convFeatures4, isTraining,
                                               useConvDropOut, keepProbConv)
    bnConvFeatures4 = conv_1x1("Up3_4_Reduce", bnConvFeatures4, k * 16, k * 8)
    bnConvFeatures4 = batch_norm_RELU_drop_out("Up3_4_Reduce_Out_BN",
                                               bnConvFeatures4, isTraining,
                                               useConvDropOut, keepProbConv)
    upFeatures3_4 = mConvBuilder.create_convolution(
        convName="Up_3_4",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=4,
        outPointLevel=3,
        inFeatures=bnConvFeatures4,
        inNumFeatures=k * 8,
        convRadius=1.6)
    upFeatures3_4 = tf.concat([upFeatures3_4, convFeatures3], 1)
    deConvFeatures3 = batch_norm_RELU_drop_out("DeConv_3_Reduce_In_BN",
                                               upFeatures3_4, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures3 = conv_1x1("DeConv_3_Reduce", deConvFeatures3, k * 16,
                               k * 8)
    deConvFeatures3 = batch_norm_RELU_drop_out("DeConv_3_Reduce_Out_BN",
                                               deConvFeatures3, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures3 = mConvBuilder.create_convolution(
        convName="DeConv_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=3,
        inFeatures=deConvFeatures3,
        inNumFeatures=k * 8,
        convRadius=1.6)

    # Second upsampling
    bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up2_3_Reduce_In_BN",
                                                 deConvFeatures3, isTraining,
                                                 useConvDropOut, keepProbConv)
    bnDeConvFeatures3 = conv_1x1("Up2_3_Reduce", bnDeConvFeatures3, k * 8,
                                 k * 4)
    bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up2_3_Reduce_Out_BN",
                                                 bnDeConvFeatures3, isTraining,
                                                 useConvDropOut, keepProbConv)
    upFeatures2_3 = mConvBuilder.create_convolution(
        convName="Up_2_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=3,
        outPointLevel=2,
        inFeatures=bnDeConvFeatures3,
        inNumFeatures=k * 4,
        convRadius=0.8)
    upFeatures2_3 = tf.concat([upFeatures2_3, convFeatures2], 1)
    deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_In_BN",
                                               upFeatures2_3, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures2 = conv_1x1("DeConv_2_Reduce", deConvFeatures2, k * 8,
                               k * 4)
    deConvFeatures2 = batch_norm_RELU_drop_out("DeConv_2_Reduce_Out_BN",
                                               deConvFeatures2, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures2 = mConvBuilder.create_convolution(
        convName="DeConv_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        inFeatures=deConvFeatures2,
        inNumFeatures=k * 4,
        convRadius=0.8)

    # First multiple upsamplings
    bnDeConvFeatures2 = batch_norm_RELU_drop_out("Up1_2_Reduce_In_BN",
                                                 deConvFeatures2, isTraining,
                                                 useConvDropOut, keepProbConv)
    bnDeConvFeatures2 = conv_1x1("Up1_2_Reduce", bnDeConvFeatures2, k * 4,
                                 k * 2)
    bnDeConvFeatures2 = batch_norm_RELU_drop_out("Up1_2_Reduce_Out_BN",
                                                 bnDeConvFeatures2, isTraining,
                                                 useConvDropOut, keepProbConv)
    upFeatures1_2 = mConvBuilder.create_convolution(
        convName="Up_1_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        outPointLevel=1,
        inFeatures=bnDeConvFeatures2,
        inNumFeatures=k * 2,
        convRadius=0.4)
    bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up1_3_Reduce_In_BN",
                                                 deConvFeatures3, isTraining,
                                                 useConvDropOut, keepProbConv)
    bnDeConvFeatures3 = conv_1x1("Up1_3_Reduce", bnDeConvFeatures3, k * 8,
                                 k * 2)
    bnDeConvFeatures3 = batch_norm_RELU_drop_out("Up1_3_Reduce_Out_BN",
                                                 bnDeConvFeatures3, isTraining,
                                                 useConvDropOut, keepProbConv)
    upFeatures1_3 = mConvBuilder.create_convolution(
        convName="Up_1_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=3,
        outPointLevel=1,
        inFeatures=bnDeConvFeatures3,
        inNumFeatures=k * 2,
        convRadius=0.8)
    bnDeConvFeatures4 = batch_norm_RELU_drop_out("Up1_4_Reduce_In_BN",
                                                 convFeatures4, isTraining,
                                                 useConvDropOut, keepProbConv)
    bnDeConvFeatures4 = conv_1x1("Up1_4_Reduce", bnDeConvFeatures4, k * 16,
                                 k * 2)
    bnDeConvFeatures4 = batch_norm_RELU_drop_out("Up1_4_Reduce_Out_BN",
                                                 bnDeConvFeatures4, isTraining,
                                                 useConvDropOut, keepProbConv)
    upFeatures1_4 = mConvBuilder.create_convolution(
        convName="Up_1_4",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=4,
        outPointLevel=1,
        inFeatures=bnDeConvFeatures4,
        inNumFeatures=k * 2,
        convRadius=1.6)
    upFeatures1 = tf.concat(
        [upFeatures1_4, upFeatures1_3, upFeatures1_2, convFeatures1], 1)
    deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_In_BN",
                                               upFeatures1, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures1 = conv_1x1("DeConv_1_Reduce", deConvFeatures1, k * 8,
                               k * 4)
    deConvFeatures1 = batch_norm_RELU_drop_out("DeConv_1_Reduce_Out_BN",
                                               deConvFeatures1, isTraining,
                                               useConvDropOut, keepProbConv)
    deConvFeatures1 = mConvBuilder.create_convolution(
        convName="DeConv_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        inFeatures=deConvFeatures1,
        inNumFeatures=k * 4,
        convRadius=0.4)
    deConvFeatures1 = tf.concat([
        upFeatures1_4, upFeatures1_3, upFeatures1_2, convFeatures1,
        deConvFeatures1
    ], 1)

    # Final upsampling
    upFeaturesFinal = batch_norm_RELU_drop_out("Up_Final_Reduce_In_BN",
                                               deConvFeatures1, isTraining,
                                               useConvDropOut, keepProbConv)
    upFeaturesFinal = conv_1x1("Up_Final_Reduce", upFeaturesFinal, k * 12,
                               k * 4)
    upFeaturesFinal = batch_norm_RELU_drop_out("Up_Final_Reduce_Out_BN",
                                               upFeaturesFinal, isTraining,
                                               useConvDropOut, keepProbConv)
    finalFeatures = mConvBuilder.create_convolution(
        convName="Up_0_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        outPointLevel=0,
        inFeatures=upFeaturesFinal,
        inNumFeatures=k * 4,
        convRadius=0.1)

    # Fully connected MLP - Global features.
    finalInput = batch_norm_RELU_drop_out("BNRELUDROP_hier_final",
                                          finalFeatures, isTraining,
                                          useConvDropOut, keepProbConv)
    finalLogits = MLP_2_hidden(finalInput,
                               k * 4,
                               k * 4,
                               k * 2,
                               numSem,
                               "Final_Logits",
                               keepProbFull,
                               isTraining,
                               useDropOutFull,
                               useInitBN=False)

    return finalLogits
Пример #6
0
def create_network(points,
                   batchIds,
                   features,
                   numInputFeatures,
                   batchSize,
                   k,
                   numOutCat,
                   isTraining,
                   keepProbConv,
                   keepProbFull,
                   useConvDropOut=False,
                   useDropOutFull=True):

    ############################################ Compute point hierarchy
    mPointHierarchy = PointHierarchy(points, features, batchIds,
                                     [0.1, 0.4, math.sqrt(3.0) + 0.1],
                                     "MCClassH_PH", batchSize)

    ############################################ Convolutions
    mConvBuilder = ConvolutionBuilder(KDEWindow=0.25)

    ############################################ LOGITS 1

    ############################################ First level

    # Convolution
    convFeatures1 = mConvBuilder.create_convolution(
        convName="Conv_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=0,
        inFeatures=features,
        inNumFeatures=numInputFeatures,
        outNumFeatures=k,
        convRadius=0.1,
        multiFeatureConv=True)

    # Pooling
    convFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_In_BN",
                                             convFeatures1, isTraining,
                                             useConvDropOut, keepProbConv)
    convFeatures1 = conv_1x1("Reduce_Pool_1", convFeatures1, k, k * 2)
    convFeatures1 = batch_norm_RELU_drop_out("Reduce_Pool_1_Out_BN",
                                             convFeatures1, isTraining,
                                             useConvDropOut, keepProbConv)
    poolFeatures1 = mConvBuilder.create_convolution(
        convName="Pool_1",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=0,
        outPointLevel=1,
        inFeatures=convFeatures1,
        inNumFeatures=k * 2,
        convRadius=0.2,
        KDEWindow=0.2)

    ############################################ Second level convolutions

    #### Convolution
    bnPoolFeatures1 = batch_norm_RELU_drop_out("Reduce_Conv_2_In_BN",
                                               poolFeatures1, isTraining,
                                               useConvDropOut, keepProbConv)
    bnPoolFeatures1 = conv_1x1("Reduce_Conv_2", bnPoolFeatures1, k * 2, k * 2)
    bnPoolFeatures1 = batch_norm_RELU_drop_out("Reduce_Conv_2_Out_BN",
                                               bnPoolFeatures1, isTraining,
                                               useConvDropOut, keepProbConv)
    convFeatures2 = mConvBuilder.create_convolution(
        convName="Conv_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        inFeatures=bnPoolFeatures1,
        inNumFeatures=k * 2,
        convRadius=0.4)
    convFeatures2 = tf.concat([poolFeatures1, convFeatures2], 1)

    # Pooling
    convFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_In_BN",
                                             convFeatures2, isTraining,
                                             useConvDropOut, keepProbConv)
    convFeatures2 = conv_1x1("Reduce_Pool_2", convFeatures2, k * 4, k * 8)
    convFeatures2 = batch_norm_RELU_drop_out("Reduce_Pool_2_Out_BN",
                                             convFeatures2, isTraining,
                                             useConvDropOut, keepProbConv)
    poolFeatures2 = mConvBuilder.create_convolution(
        convName="Pool_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        outPointLevel=2,
        inFeatures=convFeatures2,
        inNumFeatures=k * 8,
        convRadius=0.8,
        KDEWindow=0.2)

    ############################################ Third level convolutions

    # Convolution
    bnPoolFeatures2 = batch_norm_RELU_drop_out("Reduce_Conv_3_In_BN",
                                               poolFeatures2, isTraining,
                                               useConvDropOut, keepProbConv)
    bnPoolFeatures2 = conv_1x1("Reduce_Conv_3", bnPoolFeatures2, k * 8, k * 8)
    bnPoolFeatures2 = batch_norm_RELU_drop_out("Reduce_Conv_3_Out_BN",
                                               bnPoolFeatures2, isTraining,
                                               useConvDropOut, keepProbConv)
    convFeatures3 = mConvBuilder.create_convolution(
        convName="Conv_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        inFeatures=bnPoolFeatures2,
        inNumFeatures=k * 8,
        convRadius=1.2)
    convFeatures3 = tf.concat([poolFeatures2, convFeatures3], 1)

    # Pooling
    convFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_In_BN",
                                             convFeatures3, isTraining,
                                             useConvDropOut, keepProbConv)
    convFeatures3 = conv_1x1("Reduce_Pool_3", convFeatures3, k * 16, k * 32)
    convFeatures3 = batch_norm_RELU_drop_out("Reduce_Pool_3_Out_BN",
                                             convFeatures3, isTraining,
                                             useConvDropOut, keepProbConv)
    poolFeatures3 = mConvBuilder.create_convolution(
        convName="Pool_3",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        outPointLevel=3,
        inFeatures=convFeatures3,
        inNumFeatures=k * 32,
        convRadius=math.sqrt(3.0) + 0.1,
        KDEWindow=0.2)

    #Fully connected MLP - Global features.
    finalInput = batch_norm_RELU_drop_out("BNRELUDROP_final", poolFeatures3,
                                          isTraining, useConvDropOut,
                                          keepProbConv)
    finalLogits1 = MLP_2_hidden(finalInput, k * 32, k * 16, k * 8, numOutCat,
                                "Final_Logits", keepProbFull, isTraining,
                                useDropOutFull)

    ############################################ LOGITS 2

    ############################################ Second level convolutions

    #### Convolution
    convFeatures22 = mConvBuilder.create_convolution(
        convName="Conv_2_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        inFeatures=mPointHierarchy.features_[1],
        inNumFeatures=numInputFeatures,
        outNumFeatures=k * 2,
        convRadius=0.4,
        multiFeatureConv=True)

    # Pooling
    convFeatures22 = batch_norm_RELU_drop_out("Reduce_Pool_2_2_In_BN",
                                              convFeatures22, isTraining,
                                              useConvDropOut, keepProbConv)
    convFeatures22 = conv_1x1("Reduce_Pool_2_2", convFeatures22, k * 2, k * 8)
    convFeatures22 = batch_norm_RELU_drop_out("Reduce_Pool_2_2_Out_BN",
                                              convFeatures22, isTraining,
                                              useConvDropOut, keepProbConv)
    poolFeatures22 = mConvBuilder.create_convolution(
        convName="Pool_2_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=1,
        outPointLevel=2,
        inFeatures=convFeatures22,
        inNumFeatures=k * 8,
        convRadius=0.8,
        KDEWindow=0.2)

    ############################################ Third level convolutions

    # Convolution
    bnPoolFeatures22 = batch_norm_RELU_drop_out("Reduce_Conv_3_2_In_BN",
                                                poolFeatures22, isTraining,
                                                useConvDropOut, keepProbConv)
    bnPoolFeatures22 = conv_1x1("Reduce_Conv_3_2", bnPoolFeatures22, k * 8,
                                k * 8)
    bnPoolFeatures22 = batch_norm_RELU_drop_out("Reduce_Conv_3_2_Out_BN",
                                                bnPoolFeatures22, isTraining,
                                                useConvDropOut, keepProbConv)
    convFeatures32 = mConvBuilder.create_convolution(
        convName="Conv_3_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        inFeatures=bnPoolFeatures22,
        inNumFeatures=k * 8,
        convRadius=1.2)
    convFeatures32 = tf.concat([poolFeatures22, convFeatures32], 1)

    # Pooling
    convFeatures32 = batch_norm_RELU_drop_out("Reduce_Pool_3_2_In_BN",
                                              convFeatures32, isTraining,
                                              useConvDropOut, keepProbConv)
    convFeatures32 = conv_1x1("Reduce_Pool_3_2", convFeatures32, k * 16,
                              k * 32)
    convFeatures32 = batch_norm_RELU_drop_out("Reduce_Pool_3_2_Out_BN",
                                              convFeatures32, isTraining,
                                              useConvDropOut, keepProbConv)
    poolFeatures32 = mConvBuilder.create_convolution(
        convName="Pool_3_2",
        inPointHierarchy=mPointHierarchy,
        inPointLevel=2,
        outPointLevel=3,
        inFeatures=convFeatures32,
        inNumFeatures=k * 32,
        convRadius=math.sqrt(3.0) + 0.1,
        KDEWindow=0.2)

    #Fully connected MLP - Global features.
    finalInput2 = batch_norm_RELU_drop_out("2BNRELUDROP_final", poolFeatures32,
                                           isTraining, useConvDropOut,
                                           keepProbConv)
    finalLogits2 = MLP_2_hidden(finalInput2, k * 32, k * 16, k * 8, numOutCat,
                                "2Final_Logits", keepProbFull, isTraining,
                                useDropOutFull)

    ############################################ PATH DROPOUT
    counter = tf.constant(0.0, dtype=tf.float32)

    probability = tf.random_uniform([1])

    mask1 = tf.less_equal(probability[0], tf.constant(0.66))
    mask1 = tf.maximum(tf.cast(mask1, tf.float32),
                       tf.cast(tf.logical_not(isTraining), tf.float32))
    counter = tf.add(counter, mask1)
    finalLogits1 = tf.scalar_mul(mask1, finalLogits1)

    mask2 = tf.greater_equal(probability[0], tf.constant(0.33))
    mask2 = tf.maximum(tf.cast(mask2, tf.float32),
                       tf.cast(tf.logical_not(isTraining), tf.float32))
    counter = tf.add(counter, mask2)
    finalLogits2 = tf.scalar_mul(mask2, finalLogits2)

    counter = tf.multiply(tf.constant((2.0), dtype=tf.float32),
                          tf.reciprocal(counter))

    return tf.scalar_mul(counter, tf.add(finalLogits1, finalLogits2))
Пример #7
0
def create_network_parts(pointHierarchyIn,
                         convBuilder,
                         features,
                         numInputFeatures,
                         k,
                         isTraining,
                         dropVal,
                         radiusList=[0.05, 0.1]):

    #### Convolution 1
    convFeatures1 = convBuilder.create_convolution(
        convName="DeNoiser_Conv_1",
        inPointHierarchy=pointHierarchyIn,
        inPointLevel=0,
        outPointLevel=1,
        inFeatures=features,
        inNumFeatures=numInputFeatures,
        outNumFeatures=k,
        convRadius=radiusList[0],
        multiFeatureConv=True)

    #### Convolution 2
    bnConvFeatures1 = batch_norm_RELU_drop_out("DeNoiser_Reduce_1_In_BN",
                                               convFeatures1, isTraining, True,
                                               dropVal)
    bnConvFeatures1 = conv_1x1("DeNoiser_Reduce_1", bnConvFeatures1, k, k * 2)
    bnConvFeatures1 = batch_norm_RELU_drop_out("DeNoiser_Reduce_1_Out_BN",
                                               bnConvFeatures1, isTraining,
                                               True, dropVal)
    convFeatures2 = convBuilder.create_convolution(
        convName="DeNoiser_Conv_2",
        inPointHierarchy=pointHierarchyIn,
        inPointLevel=1,
        outPointLevel=2,
        inFeatures=bnConvFeatures1,
        inNumFeatures=k * 2,
        convRadius=radiusList[1])

    #### Convolution 5
    bnConvFeatures2 = batch_norm_RELU_drop_out("DeNoiser_Reduce_2_In_BN",
                                               convFeatures2, isTraining, True,
                                               dropVal)
    bnConvFeatures2 = conv_1x1("DeNoiser_Reduce_2", bnConvFeatures2, k * 2, k)
    bnConvFeatures2 = batch_norm_RELU_drop_out("DeNoiser_Reduce_2_Out_BN",
                                               bnConvFeatures2, isTraining,
                                               True, dropVal)
    convFeatures3 = convBuilder.create_convolution(
        convName="DeNoiser_Conv_3",
        inPointHierarchy=pointHierarchyIn,
        inPointLevel=2,
        outPointLevel=1,
        inFeatures=bnConvFeatures2,
        inNumFeatures=k,
        convRadius=radiusList[1])

    #### Convolution 6
    convFeatures3 = tf.concat([convFeatures3, convFeatures1], axis=1)
    bnConvFeatures3 = batch_norm_RELU_drop_out("DeNoiser_Reduce_3_In_BN",
                                               convFeatures3, isTraining, True,
                                               dropVal)
    bnConvFeatures3 = conv_1x1("DeNoiser_Reduce_3", bnConvFeatures3, k * 2, k)
    bnConvFeatures3 = batch_norm_RELU_drop_out("DeNoiser_Reduce_3_Out_BN",
                                               bnConvFeatures3, isTraining,
                                               True, dropVal)
    convFeatures4 = convBuilder.create_convolution(
        convName="DeNoiser_Conv_4",
        inPointHierarchy=pointHierarchyIn,
        inPointLevel=1,
        outPointLevel=0,
        inFeatures=bnConvFeatures3,
        inNumFeatures=k,
        convRadius=radiusList[0],
        multiFeatureConv=True,
        outNumFeatures=3)

    displacements = tf.tanh(convFeatures4)
    if convBuilder.relativeRadius_:
        aabbSizes = tf.norm(pointHierarchyIn.aabbMax_ -
                            pointHierarchyIn.aabbMin_,
                            axis=1)
        ptAABBSizes = tf.tile(
            tf.reshape(
                tf.gather(aabbSizes,
                          tf.reshape(pointHierarchyIn.batchIds_[0], [-1])),
                [-1, 1]), [1, 3])
        displacements = tf.multiply(displacements, ptAABBSizes)

    return displacements * radiusList[0]