def connectIntermediateLayers(self, layersToConnect, inputSampleInFullyCN_Train, inputSampleInFullyCN_Test, featMapsInFullyCN): centralVoxelsTrain = self.centralVoxelsTrain centralVoxelsTest = self.centralVoxelsTest for l_i in layersToConnect: currentLayer = self.networkLayers[l_i] output_train = currentLayer.outputTrain output_trainShape = currentLayer.outputShapeTrain output_test = currentLayer.outputTest output_testShape = currentLayer.outputShapeTest # Get the middle part of feature maps at intermediate levels to make them of the same shape at the beginning of the # first fully connected layer featMapsCenter_Train = extractCenterFeatMaps( output_train, output_trainShape, centralVoxelsTrain) featMapsCenter_Test = extractCenterFeatMaps( output_test, output_testShape, centralVoxelsTest) featMapsInFullyCN = featMapsInFullyCN + currentLayer._numberOfFeatureMaps inputSampleInFullyCN_Train = T.concatenate( [inputSampleInFullyCN_Train, featMapsCenter_Train], axis=1) inputSampleInFullyCN_Test = T.concatenate( [inputSampleInFullyCN_Test, featMapsCenter_Test], axis=1) return [ featMapsInFullyCN, inputSampleInFullyCN_Train, inputSampleInFullyCN_Test ]
def connectIntermediateLayers(self, layersToConnect, inputSampleInFullyCN_Train, inputSampleInFullyCN_Test, featMapsInFullyCN): centralVoxelsTrain = self.centralVoxelsTrain centralVoxelsTest = self.centralVoxelsTest for l_i in layersToConnect : currentLayer = self.networkLayers[l_i] output_train = currentLayer.outputTrain output_trainShape = currentLayer.outputShapeTrain output_test = currentLayer.outputTest output_testShape = currentLayer.outputShapeTest # Get the middle part of feature maps at intermediate levels to make them of the same shape at the beginning of the # first fully connected layer featMapsCenter_Train = extractCenterFeatMaps(output_train, output_trainShape, centralVoxelsTrain) featMapsCenter_Test = extractCenterFeatMaps(output_test, output_testShape, centralVoxelsTest) featMapsInFullyCN = featMapsInFullyCN + currentLayer._numberOfFeatureMaps inputSampleInFullyCN_Train = T.concatenate([inputSampleInFullyCN_Train, featMapsCenter_Train], axis=1) inputSampleInFullyCN_Test = T.concatenate([inputSampleInFullyCN_Test, featMapsCenter_Test], axis=1) return [featMapsInFullyCN, inputSampleInFullyCN_Train, inputSampleInFullyCN_Test]
def generateNetworkLayers(self, cnnLayers, kernel_Shapes, maxPooling_Layer, sampleShape_Train, sampleShape_Test, inputSample_Train, inputSample_Train_Bottom, inputSample_Test, inputSample_Test_Bottom, layersToConnect): rng = np.random.RandomState(24575) # Define inputs for first layers (which will be re-used for next layers) inputSampleToNextLayer_Train = inputSample_Train inputSampleToNextLayer_Test = inputSample_Test inputSampleToNextLayer_Train_Bottom = inputSample_Train_Bottom inputSampleToNextLayer_Test_Bottom = inputSample_Test_Bottom inputSampleToNextLayerShape_Train = sampleShape_Train inputSampleToNextLayerShape_Test = sampleShape_Test # Get the convolutional layers numLayers = len(kernel_Shapes) numberCNNLayers = [] numberFCLayers = [] for l_i in range(1,len(kernel_Shapes)): if len(kernel_Shapes[l_i]) == 3: numberCNNLayers = l_i + 1 numberFCLayers = numLayers - numberCNNLayers ######### -------------- Generate the convolutional layers -------------- ######### # Some checks if self.weight_Initialization_CNN == 2: if len(self.weightsTrainedIdx) <> numberCNNLayers: print(" ... WARNING!!!! Number of indexes specified for trained layers does not correspond with number of conv layers in the created architecture...") if self.weight_Initialization_CNN == 2: weightsNames = getWeightsSet(self.weightsFolderName, self.weightsTrainedIdx) for l_i in xrange(0, numberCNNLayers) : # Get properties of this layer # The second element is the number of feature maps of previous layer currentLayerKernelShape = [cnnLayers[l_i], inputSampleToNextLayerShape_Train[1]] + kernel_Shapes[l_i] # If weights are going to be initialized from other pre-trained network they should be loaded in this stage # Otherwise weights = [] if self.weight_Initialization_CNN == 2: weights = np.load(weightsNames[l_i]) maxPoolingParameters = [] dropoutRate = 0.0 ### TOP ## myLiviaNet3DConvLayerTop = LiviaNet3DConvLayer.LiviaNet3DConvLayer(rng, l_i, inputSampleToNextLayer_Train, inputSampleToNextLayer_Test, inputSampleToNextLayerShape_Train, inputSampleToNextLayerShape_Test, currentLayerKernelShape, self.applyBatchNorm, self.numberEpochToApplyBatchNorm, maxPoolingParameters, self.weight_Initialization_CNN, weights, self.activationType, dropoutRate ) self.networkLayers.append(myLiviaNet3DConvLayerTop) ## BOTTOM ## myLiviaNet3DConvLayerBottom = LiviaNet3DConvLayer.LiviaNet3DConvLayer(rng, l_i, inputSampleToNextLayer_Train_Bottom, inputSampleToNextLayer_Test_Bottom, inputSampleToNextLayerShape_Train, inputSampleToNextLayerShape_Test, currentLayerKernelShape, self.applyBatchNorm, self.numberEpochToApplyBatchNorm, maxPoolingParameters, self.weight_Initialization_CNN, weights, self.activationType, dropoutRate ) self.networkLayers.append(myLiviaNet3DConvLayerBottom) # Just for printing inputSampleToNextLayer_Train_Old = inputSampleToNextLayerShape_Train inputSampleToNextLayer_Test_Old = inputSampleToNextLayerShape_Test # Update inputs for next layer inputSampleToNextLayer_Train = myLiviaNet3DConvLayerTop.outputTrain inputSampleToNextLayer_Test = myLiviaNet3DConvLayerTop.outputTest inputSampleToNextLayer_Train_Bottom = myLiviaNet3DConvLayerBottom.outputTrain inputSampleToNextLayer_Test_Bottom = myLiviaNet3DConvLayerBottom.outputTest inputSampleToNextLayerShape_Train = myLiviaNet3DConvLayerTop.outputShapeTrain inputSampleToNextLayerShape_Test = myLiviaNet3DConvLayerTop.outputShapeTest print(" ----- (Training) Input shape: {} ---> Output shape: {} || kernel shape {}".format(inputSampleToNextLayer_Train_Old,inputSampleToNextLayerShape_Train, currentLayerKernelShape)) print(" ----- (Testing) Input shape: {} ---> Output shape: {}".format(inputSampleToNextLayer_Test_Old,inputSampleToNextLayerShape_Test)) ### Create the semi-dense connectivity centralVoxelsTrain = self.centralVoxelsTrain centralVoxelsTest = self.centralVoxelsTest numLayersPerPath = len(self.networkLayers)/2 featMapsInFullyCN = 0 # ------- TOP -------- # print(" ----------- TOP PATH ----------------") for l_i in xrange(0,numLayersPerPath-1) : print(' --- concatennating layer {} ...'.format(str(l_i))) currentLayer = self.networkLayers[2*l_i] # to access the layers from the top path output_train = currentLayer.outputTrain output_trainShape = currentLayer.outputShapeTrain output_test = currentLayer.outputTest output_testShape = currentLayer.outputShapeTest # Get the middle part of feature maps at intermediate levels to make them of the same shape at the beginning of the # first fully connected layer featMapsCenter_Train = extractCenterFeatMaps(output_train, output_trainShape, centralVoxelsTrain) featMapsCenter_Test = extractCenterFeatMaps(output_test, output_testShape, centralVoxelsTest) featMapsInFullyCN = featMapsInFullyCN + currentLayer._numberOfFeatureMaps inputSampleToNextLayer_Train = T.concatenate([inputSampleToNextLayer_Train, featMapsCenter_Train], axis=1) inputSampleToNextLayer_Test = T.concatenate([inputSampleToNextLayer_Test, featMapsCenter_Test], axis=1) # ------- Bottom -------- # print(" ---------- BOTTOM PATH ---------------") for l_i in xrange(0,numLayersPerPath-1) : print(' --- concatennating layer {} ...'.format(str(l_i))) currentLayer = self.networkLayers[2*l_i+1] # to access the layers from the bottom path output_train = currentLayer.outputTrain output_trainShape = currentLayer.outputShapeTrain output_test = currentLayer.outputTest output_testShape = currentLayer.outputShapeTest # Get the middle part of feature maps at intermediate levels to make them of the same shape at the beginning of the # first fully connected layer featMapsCenter_Train = extractCenterFeatMaps(output_train, output_trainShape, centralVoxelsTrain) featMapsCenter_Test = extractCenterFeatMaps(output_test, output_testShape, centralVoxelsTest) featMapsInFullyCN = featMapsInFullyCN + currentLayer._numberOfFeatureMaps inputSampleToNextLayer_Train_Bottom = T.concatenate([inputSampleToNextLayer_Train_Bottom, featMapsCenter_Train], axis=1) inputSampleToNextLayer_Test_Bottom = T.concatenate([inputSampleToNextLayer_Test_Bottom, featMapsCenter_Test], axis=1) ######### -------------- Generate the Fully Connected Layers ----------------- ################## inputToFullyCN_Train = inputSampleToNextLayer_Train inputToFullyCN_Train = T.concatenate([inputToFullyCN_Train, inputSampleToNextLayer_Train_Bottom], axis=1) inputToFullyCN_Test = inputSampleToNextLayer_Test inputToFullyCN_Test = T.concatenate([inputToFullyCN_Test, inputSampleToNextLayer_Test_Bottom], axis=1) featMapsInFullyCN = featMapsInFullyCN + inputSampleToNextLayerShape_Train[1] * 2 # Because we have two symmetric paths # Define inputs inputFullyCNShape_Train = [self.batch_Size, featMapsInFullyCN] + inputSampleToNextLayerShape_Train[2:5] inputFullyCNShape_Test = [self.batch_Size, featMapsInFullyCN] + inputSampleToNextLayerShape_Test[2:5] # Kamnitsas applied padding and mirroring to the images when kernels in FC layers were larger than 1x1x1. # For this current work, we employed kernels of this size (i.e. 1x1x1), so there is no need to apply padding or mirroring. # TODO. Check print(" --- Starting to create the fully connected layers....") for l_i in xrange(numberCNNLayers, numLayers) : numberOfKernels = cnnLayers[l_i] kernel_shape = [kernel_Shapes[l_i][0],kernel_Shapes[l_i][0],kernel_Shapes[l_i][0]] currentLayerKernelShape = [cnnLayers[l_i], inputFullyCNShape_Train[1]] + kernel_shape # If weights are going to be initialized from other pre-trained network they should be loaded in this stage # Otherwise weights = [] applyBatchNorm = True epochsToApplyBatchNorm = 60 maxPoolingParameters = [] dropoutRate = self.dropout_Rates[l_i-numberCNNLayers] myLiviaNet3DFullyConnectedLayer = LiviaNet3DConvLayer.LiviaNet3DConvLayer(rng, l_i, inputToFullyCN_Train, inputToFullyCN_Test, inputFullyCNShape_Train, inputFullyCNShape_Test, currentLayerKernelShape, self.applyBatchNorm, self.numberEpochToApplyBatchNorm, maxPoolingParameters, self.weight_Initialization_FCN, weights, self.activationType, dropoutRate ) self.networkLayers.append(myLiviaNet3DFullyConnectedLayer) # Just for printing inputFullyCNShape_Train_Old = inputFullyCNShape_Train inputFullyCNShape_Test_Old = inputFullyCNShape_Test # Update inputs for next layer inputToFullyCN_Train = myLiviaNet3DFullyConnectedLayer.outputTrain inputToFullyCN_Test = myLiviaNet3DFullyConnectedLayer.outputTest inputFullyCNShape_Train = myLiviaNet3DFullyConnectedLayer.outputShapeTrain inputFullyCNShape_Test = myLiviaNet3DFullyConnectedLayer.outputShapeTest # Print print(" ----- (Training) Input shape: {} ---> Output shape: {} || kernel shape {}".format(inputFullyCNShape_Train_Old,inputFullyCNShape_Train, currentLayerKernelShape)) print(" ----- (Testing) Input shape: {} ---> Output shape: {}".format(inputFullyCNShape_Test_Old,inputFullyCNShape_Test)) ######### -------------- Do Classification layer ----------------- ################## # Define kernel shape for classification layer featMaps_LastLayer = self.cnnLayers[-1] filterShape_ClassificationLayer = [self.n_classes, featMaps_LastLayer, 1, 1, 1] # Define inputs and shapes for the classification layer inputImageClassificationLayer_Train = inputToFullyCN_Train inputImageClassificationLayer_Test = inputToFullyCN_Test inputImageClassificationLayerShape_Train = inputFullyCNShape_Train inputImageClassificationLayerShape_Test = inputFullyCNShape_Test print(" ----- (Classification layer) kernel shape {}".format(filterShape_ClassificationLayer)) classification_layer_Index = l_i weights = [] applyBatchNorm = True epochsToApplyBatchNorm = 60 maxPoolingParameters = [] dropoutRate = self.dropout_Rates[len(self.dropout_Rates)-1] softmaxTemperature = 1.0 myLiviaNet_ClassificationLayer = LiviaSoftmax.LiviaSoftmax(rng, classification_layer_Index, inputImageClassificationLayer_Train, inputImageClassificationLayer_Test, inputImageClassificationLayerShape_Train, inputImageClassificationLayerShape_Test, filterShape_ClassificationLayer, self.applyBatchNorm, self.numberEpochToApplyBatchNorm, maxPoolingParameters, self.weight_Initialization_FCN, weights, 0, #self.activationType, dropoutRate, softmaxTemperature ) self.networkLayers.append(myLiviaNet_ClassificationLayer) self.lastLayer = myLiviaNet_ClassificationLayer print(" ----- (Training) Input shape: {} ---> Output shape: {} || kernel shape {}".format(inputImageClassificationLayerShape_Train,myLiviaNet_ClassificationLayer.outputShapeTrain, filterShape_ClassificationLayer)) print(" ----- (Testing) Input shape: {} ---> Output shape: {}".format(inputImageClassificationLayerShape_Test,myLiviaNet_ClassificationLayer.outputShapeTest))
def generateNetworkLayers(self, cnnLayers, kernel_Shapes, maxPooling_Layer, sampleShape_Train, sampleShape_Test, inputSample_Train, inputSample_Train_Bottom, inputSample_Test, inputSample_Test_Bottom, layersToConnect): rng = np.random.RandomState(24575) # Define inputs for first layers (which will be re-used for next layers) inputSampleToNextLayer_Train = inputSample_Train inputSampleToNextLayer_Test = inputSample_Test inputSampleToNextLayer_Train_Bottom = inputSample_Train_Bottom inputSampleToNextLayer_Test_Bottom = inputSample_Test_Bottom inputSampleToNextLayerShape_Train = sampleShape_Train inputSampleToNextLayerShape_Test = sampleShape_Test # Get the convolutional layers numLayers = len(kernel_Shapes) numberCNNLayers = [] numberFCLayers = [] for l_i in range(1, len(kernel_Shapes)): if len(kernel_Shapes[l_i]) == 3: numberCNNLayers = l_i + 1 numberFCLayers = numLayers - numberCNNLayers ######### -------------- Generate the convolutional layers -------------- ######### # Some checks if self.weight_Initialization_CNN == 2: if len(self.weightsTrainedIdx) <> numberCNNLayers: print( " ... WARNING!!!! Number of indexes specified for trained layers does not correspond with number of conv layers in the created architecture..." ) if self.weight_Initialization_CNN == 2: weightsNames = getWeightsSet(self.weightsFolderName, self.weightsTrainedIdx) numberOfOutputKernels = [0] for l_i in xrange(0, numberCNNLayers): # Get properties of this layer # The second element is the number of feature maps of previous layer currentLayerKernelShape = [ cnnLayers[l_i], inputSampleToNextLayerShape_Train[1] ] + kernel_Shapes[l_i] # If weights are going to be initialized from other pre-trained network they should be loaded in this stage # Otherwise weights = [] if self.weight_Initialization_CNN == 2: weights = np.load(weightsNames[l_i]) maxPoolingParameters = [] dropoutRate = 0.0 ### TOP ## print('--' * 50) print(' **** TOP ****') myLiviaNet3DConvLayerTop = HyperDenseNetConvLayer.HyperDenseNetConvLayer( rng, l_i, inputSampleToNextLayer_Train, inputSampleToNextLayer_Test, inputSampleToNextLayerShape_Train, inputSampleToNextLayerShape_Test, currentLayerKernelShape, self.applyBatchNorm, self.numberEpochToApplyBatchNorm, maxPoolingParameters, self.weight_Initialization_CNN, weights, self.activationType, dropoutRate) self.networkLayers.append(myLiviaNet3DConvLayerTop) numberOfOutputKernels.append( myLiviaNet3DConvLayerTop.outputShapeTrain[1]) ## BOTTOM ## print(' **** BOTTOM ****') myLiviaNet3DConvLayerBottom = HyperDenseNetConvLayer.HyperDenseNetConvLayer( rng, l_i, inputSampleToNextLayer_Train_Bottom, inputSampleToNextLayer_Test_Bottom, inputSampleToNextLayerShape_Train, inputSampleToNextLayerShape_Test, currentLayerKernelShape, self.applyBatchNorm, self.numberEpochToApplyBatchNorm, maxPoolingParameters, self.weight_Initialization_CNN, weights, self.activationType, dropoutRate) self.networkLayers.append(myLiviaNet3DConvLayerBottom) # Update inputs for next layer #inputSampleToNextLayer_Train = myLiviaNet3DConvLayerTop.outputTrain #inputSampleToNextLayer_Test = myLiviaNet3DConvLayerTop.outputTest #inputSampleToNextLayer_Train_Bottom = myLiviaNet3DConvLayerBottom.outputTrain #inputSampleToNextLayer_Test_Bottom = myLiviaNet3DConvLayerBottom.outputTest #inputSampleToNextLayerShape_Train = myLiviaNet3DConvLayerTop.outputShapeTrain #inputSampleToNextLayerShape_Test = myLiviaNet3DConvLayerTop.outputShapeTest # ~~~~~~~ Make here the dense connections ~~~~~~~~~~~ print(" -Output Layer shape: {}".format( myLiviaNet3DConvLayerTop.outputShapeTrain)) shapeTrain = [] shapeTrain.append(myLiviaNet3DConvLayerTop.outputShapeTrain[0]) shapeTrain.append(myLiviaNet3DConvLayerTop.outputShapeTrain[1]) shapeTrain.append(myLiviaNet3DConvLayerTop.outputShapeTrain[2]) shapeTrain.append(myLiviaNet3DConvLayerTop.outputShapeTrain[3]) shapeTrain.append(myLiviaNet3DConvLayerTop.outputShapeTrain[4]) shapeTest = [] shapeTest.append(myLiviaNet3DConvLayerTop.outputShapeTest[0]) shapeTest.append(myLiviaNet3DConvLayerTop.outputShapeTest[1]) shapeTest.append(myLiviaNet3DConvLayerTop.outputShapeTest[2]) shapeTest.append(myLiviaNet3DConvLayerTop.outputShapeTest[3]) shapeTest.append(myLiviaNet3DConvLayerTop.outputShapeTest[4]) # Define the sizes of the convolutional layers (denselly connected) denseInputNextLayerShape = shapeTrain denseInputNextLayerShapeTesting = shapeTest print(" -Adding Dense connections....") numLayersPerPath = len(self.networkLayers) / 2 for d_l in xrange(numLayersPerPath): denseInputNextLayerShape[1] = denseInputNextLayerShape[ 1] + numberOfOutputKernels[d_l] denseInputNextLayerShapeTesting[ 1] = denseInputNextLayerShapeTesting[ 1] + numberOfOutputKernels[d_l] denseInputNextLayerShape[1] = 2 * denseInputNextLayerShape[1] denseInputNextLayerShapeTesting[ 1] = 2 * denseInputNextLayerShapeTesting[1] print(" -denseInput Next Layer shape: {}".format( denseInputNextLayerShape)) inputSampleToNextLayerShape_Train = denseInputNextLayerShape inputSampleToNextLayerShape_Test = denseInputNextLayerShapeTesting numberOfCentralVoxelsToGetTraining = inputSampleToNextLayerShape_Train[ 2:5] numberOfCentralVoxelsToGetTesting = inputSampleToNextLayerShape_Test[ 2:5] # Get first outputs #~~~~ TOP ~~~~~~ inputSampleToNextLayer_Train = myLiviaNet3DConvLayerTop.outputTrain # inputImageToNextLayer_Top inputSampleToNextLayer_Test = myLiviaNet3DConvLayerTop.outputTest # inputImageToNextLayerTesting_Top #~~~~ BOTTOM ~~~~~~ inputSampleToNextLayer_Train_Bottom = myLiviaNet3DConvLayerBottom.outputTrain # inputImageToNextLayer_Bottom inputSampleToNextLayer_Test_Bottom = myLiviaNet3DConvLayerBottom.outputTest # inputImageToNextLayerTesting_Bottom print(" ----- Concatennating feature maps.... ----- ") # Concatennate feat maps in first layer #~~~~ TOP ~~~~~~ inputImageToNextLayer_Top_Temp = inputSampleToNextLayer_Train inputImageToNextLayerTesting_Top_Temp = inputSampleToNextLayer_Test inputSampleToNextLayer_Train = T.concatenate( [ inputSampleToNextLayer_Train, inputSampleToNextLayer_Train_Bottom ], axis=1) # inputImageToNextLayer_Top inputSampleToNextLayer_Test = T.concatenate( [ inputSampleToNextLayer_Test, inputSampleToNextLayer_Test_Bottom ], axis=1) # inputImageToNextLayerTesting_Top #~~~~ BOTTOM ~~~~~~ inputSampleToNextLayer_Train_Bottom = T.concatenate([ inputSampleToNextLayer_Train_Bottom, inputImageToNextLayer_Top_Temp ], axis=1) inputSampleToNextLayer_Test_Bottom = T.concatenate([ inputSampleToNextLayer_Test_Bottom, inputImageToNextLayerTesting_Top_Temp ], axis=1) # Concatennate feature maps in remaining layers for d_l in xrange(0, numLayersPerPath - 1): print(" ----> Layer {} ".format(d_l)) print(" ----> outputShapeTrain {} ".format( self.networkLayers[2 * d_l].outputShapeTrain)) # ~~~~~ TOP ~~~~ # # Training samples featMapsCenter_Train_Top = extractCenterFeatMaps( self.networkLayers[2 * d_l].outputTrain, self.networkLayers[2 * d_l].outputShapeTrain, numberOfCentralVoxelsToGetTraining) # Testing samples featMapsCenter_Test_Top = extractCenterFeatMaps( self.networkLayers[2 * d_l].outputTest, self.networkLayers[2 * d_l].outputShapeTest, numberOfCentralVoxelsToGetTesting) # ~~~~~ BOTTOM ~~~~ # # Training samples featMapsCenter_Train_Bottom = extractCenterFeatMaps( self.networkLayers[2 * d_l + 1].outputTrain, self.networkLayers[2 * d_l + 1].outputShapeTrain, numberOfCentralVoxelsToGetTraining) # Testing samples featMapsCenter_Test_Bottom = extractCenterFeatMaps( self.networkLayers[2 * d_l + 1].outputTest, self.networkLayers[2 * d_l + 1].outputShapeTest, numberOfCentralVoxelsToGetTesting) # ~~~~ TOP ~~~~ # # (Dense) Connections with same path inputSampleToNextLayer_Train = T.concatenate( [inputSampleToNextLayer_Train, featMapsCenter_Train_Top], axis=1) inputSampleToNextLayer_Test = T.concatenate( [inputSampleToNextLayer_Test, featMapsCenter_Test_Top], axis=1) # (Dense) Connections with the other path inputSampleToNextLayer_Train = T.concatenate([ inputSampleToNextLayer_Train, featMapsCenter_Train_Bottom ], axis=1) inputSampleToNextLayer_Test = T.concatenate( [inputSampleToNextLayer_Test, featMapsCenter_Test_Bottom], axis=1) # ~~~~ BOTTOM ~~~~ # # Connections with same path inputSampleToNextLayer_Train_Bottom = T.concatenate([ inputSampleToNextLayer_Train_Bottom, featMapsCenter_Train_Bottom ], axis=1) inputSampleToNextLayer_Test_Bottom = T.concatenate([ inputSampleToNextLayer_Test_Bottom, featMapsCenter_Test_Bottom ], axis=1) # Connections with the other path inputSampleToNextLayer_Train_Bottom = T.concatenate([ inputSampleToNextLayer_Train_Bottom, featMapsCenter_Train_Top ], axis=1) inputSampleToNextLayer_Test_Bottom = T.concatenate([ inputSampleToNextLayer_Test_Bottom, featMapsCenter_Test_Top ], axis=1) ######### -------------- Generate the Fully Connected Layers ----------------- ################## inputToFullyCN_Train = inputSampleToNextLayer_Train inputToFullyCN_Train = T.concatenate( [inputToFullyCN_Train, inputSampleToNextLayer_Train_Bottom], axis=1) inputToFullyCN_Test = inputSampleToNextLayer_Test inputToFullyCN_Test = T.concatenate( [inputToFullyCN_Test, inputSampleToNextLayer_Test_Bottom], axis=1) featMapsInFullyCN = inputSampleToNextLayerShape_Train[1] * 2 #featMapsInFullyCN = featMapsInFullyCN + inputSampleToNextLayerShape_Train[1] # Define inputs inputFullyCNShape_Train = [ self.batch_Size, featMapsInFullyCN, inputSampleToNextLayerShape_Train[2], inputSampleToNextLayerShape_Train[3], inputSampleToNextLayerShape_Train[4] ] inputFullyCNShape_Test = [ self.batch_Size, featMapsInFullyCN, inputSampleToNextLayerShape_Test[2], inputSampleToNextLayerShape_Test[3], inputSampleToNextLayerShape_Test[4] ] # Kamnitsas applied padding and mirroring to the images when kernels in FC layers were larger than 1x1x1.Jose Dolz. April, 2018. # For this current work, we employed kernels of this size (i.e. 1x1x1), so there is no need to apply padding or mirroring. # TODO. Check print("**" * 50) print(" --- Starting to create the fully connected layers....") print("**" * 50) for l_i in xrange(numberCNNLayers, numLayers): numberOfKernels = cnnLayers[l_i] kernel_shape = [ kernel_Shapes[l_i][0], kernel_Shapes[l_i][0], kernel_Shapes[l_i][0] ] currentLayerKernelShape = [ cnnLayers[l_i], inputFullyCNShape_Train[1] ] + kernel_shape # If weights are going to be initialized from other pre-trained network they should be loaded in this stage # Otherwise weights = [] applyBatchNorm = True epochsToApplyBatchNorm = 60 maxPoolingParameters = [] dropoutRate = self.dropout_Rates[l_i - numberCNNLayers] myLiviaNet3DFullyConnectedLayer = HyperDenseNetConvLayer.HyperDenseNetConvLayer( rng, l_i, inputToFullyCN_Train, inputToFullyCN_Test, inputFullyCNShape_Train, inputFullyCNShape_Test, currentLayerKernelShape, self.applyBatchNorm, self.numberEpochToApplyBatchNorm, maxPoolingParameters, self.weight_Initialization_FCN, weights, self.activationType, dropoutRate) self.networkLayers.append(myLiviaNet3DFullyConnectedLayer) # Just for printing inputFullyCNShape_Train_Old = inputFullyCNShape_Train inputFullyCNShape_Test_Old = inputFullyCNShape_Test # Update inputs for next layer inputToFullyCN_Train = myLiviaNet3DFullyConnectedLayer.outputTrain inputToFullyCN_Test = myLiviaNet3DFullyConnectedLayer.outputTest inputFullyCNShape_Train = myLiviaNet3DFullyConnectedLayer.outputShapeTrain inputFullyCNShape_Test = myLiviaNet3DFullyConnectedLayer.outputShapeTest # Print print( " ----- (Training) Input shape: {} ---> Output shape: {} || kernel shape {}" .format(inputFullyCNShape_Train_Old, inputFullyCNShape_Train, currentLayerKernelShape)) print(" ----- (Testing) Input shape: {} ---> Output shape: {}". format(inputFullyCNShape_Test_Old, inputFullyCNShape_Test)) print("--" * 35) ######### -------------- Do Classification layer ----------------- ################## # Define kernel shape for classification layer featMaps_LastLayer = self.cnnLayers[-1] filterShape_ClassificationLayer = [ self.n_classes, featMaps_LastLayer, 1, 1, 1 ] # Define inputs and shapes for the classification layer inputImageClassificationLayer_Train = inputToFullyCN_Train inputImageClassificationLayer_Test = inputToFullyCN_Test inputImageClassificationLayerShape_Train = inputFullyCNShape_Train inputImageClassificationLayerShape_Test = inputFullyCNShape_Test print("--" * 35) print(" ----- (Classification layer) kernel shape {}".format( filterShape_ClassificationLayer)) classification_layer_Index = l_i weights = [] applyBatchNorm = True epochsToApplyBatchNorm = 60 maxPoolingParameters = [] dropoutRate = self.dropout_Rates[len(self.dropout_Rates) - 1] softmaxTemperature = 1.0 myLiviaNet_ClassificationLayer = LiviaSoftmax.LiviaSoftmax( rng, classification_layer_Index, inputImageClassificationLayer_Train, inputImageClassificationLayer_Test, inputImageClassificationLayerShape_Train, inputImageClassificationLayerShape_Test, filterShape_ClassificationLayer, self.applyBatchNorm, self.numberEpochToApplyBatchNorm, maxPoolingParameters, self.weight_Initialization_FCN, weights, 0, #self.activationType, dropoutRate, softmaxTemperature) self.networkLayers.append(myLiviaNet_ClassificationLayer) self.lastLayer = myLiviaNet_ClassificationLayer print( " ----- (Training) Input shape: {} ---> Output shape: {} || kernel shape {}" .format(inputImageClassificationLayerShape_Train, myLiviaNet_ClassificationLayer.outputShapeTrain, filterShape_ClassificationLayer)) print( " ----- (Testing) Input shape: {} ---> Output shape: {}".format( inputImageClassificationLayerShape_Test, myLiviaNet_ClassificationLayer.outputShapeTest))