示例#1
0
 def predict(self, network, x):
     W1, W2, W3 = network['W1'], network['W2'], network['W3']
     B1, B2, B3 = network['b1'], network['b2'], network['b3']
     act = Activation()
     a1 = act.sigmoid(np.dot(x, W1) + B1)
     a2 = act.sigmoid(np.dot(a1, W2) + B2)
     a3 = act.softMax(np.dot(a2, W3) + B3)
     return a3
示例#2
0
    def __init__(self,
                 n_in,
                 n_out,
                 activation_last_layer='softmax',
                 activation='relu',
                 W=None,
                 b=None):
        """
        Typical hidden layer of a MLP: units are fully-connected and have
        sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
        and the bias vector b is of shape (n_out,).

        NOTE : The nonlinearity used here is ReLU

        Hidden unit activation is given by: ReLU(dot(input,W) + b)

        :type n_in: int
        :param n_in: dimensionality of input

        :type n_out: int
        :param n_out: number of hidden units

        :type activation: string
        :param activation: Non linearity to be applied in the hidden layer
        """
        self.input = None
        # initialize activation
        self.activation = Activation(activation).f
        self.activation_deriv = None
        if activation_last_layer:
            self.activation_deriv = Activation(activation_last_layer).f_deriv

        # initialize weight and bias
        self.W = np.random.uniform(low=-np.sqrt(6. / (n_in + n_out)),
                                   high=np.sqrt(6. / (n_in + n_out)),
                                   size=(n_in, n_out))
        self.b = np.zeros(n_out, )
        self.grad_W = np.zeros(self.W.shape)
        self.grad_b = np.zeros(self.b.shape)

        # initialize batch normalization parameters
        self.batch_norm = None
        self.x_norm = None
        self.gamma = np.ones(n_out, )
        self.beta = np.zeros(n_out, )
        self.grad_gamma = np.zeros(self.gamma.shape)
        self.grad_beta = np.zeros(self.beta.shape)
        self.BN_mean = []
        self.BN_var = []

        # initialize dropout parameters
        self.retain = None
        self.dropout_p = None
示例#3
0
    def __init__(self, input, weights, activation="sigmoid"):
        self.activation = Activation().get_activation_function(activation)

        # input matrix: [n_examples, n_features]
        self.input = input

        # weights matrix: [1, n_examples + 1]. +1 because first element is a bias
        self.weights = weights
示例#4
0
x = Placeholder(dGraph)

y = Multiplication(a, x, dGraph)
z = Addition(y, b, dGraph)

sess = Session()
result = sess.run(z, {x:12})

print("z = ax + b = 10*12 + 1 = ", result)


# Matrix multiplication process
g2 = Graph()
dGraph2 = g2.set_as_default()

a = Variables([[10,23], [23,1], [20,56]], dGraph2)
b = Variables([1,2], dGraph2)
x = Placeholder(dGraph2)

y = MatrixMultiplication(a, x, dGraph2)
z = Addition(y, b, dGraph2)

result = sess.run(z, {x:10})
print(result)

# Classification
activationObj = Activation()
samplesZ = np.linspace(-10, 10, 100)
samplesA = activationObj.sigmoid_function(samplesZ)
plt.plot(samplesZ, samplesA)
plt.show()
示例#5
0
    def __init__(self, para):
        Net.__init__(self, para)
        convPara1 = {
            'instanceName': 'RN18' + '_Conv1',
            'padding': True,
            'padShape': (1, 1),
            'stride': 1,
            'outChannel': para['c1OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv1 = Conv2D(convPara1)
        self.norm1 = Normalize({'instanceName': 'RN18' + '_Norm1'})
        self.scale1 = Scale({'instanceName': 'RN18' + '_Scale1'})
        self.activation1 = Activation({
            'instanceName': 'RN18' + '_Activation1',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv1)
        self.layerList.append(self.norm1)
        self.layerList.append(self.scale1)
        self.layerList.append(self.activation1)
        convPara2 = {
            'instanceName': 'RN18' + '_Conv2',
            'padding': True,
            'padShape': (1, 1),
            'stride': 2,
            'outChannel': para['c2OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv2 = Conv2D(convPara2)
        self.norm2 = Normalize({'instanceName': 'RN18' + '_Norm2'})
        self.scale2 = Scale({'instanceName': 'RN18' + '_Scale2'})
        self.activation2 = Activation({
            'instanceName': 'RN18' + '_Activation2',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv2)
        self.layerList.append(self.norm2)
        self.layerList.append(self.scale2)
        self.layerList.append(self.activation2)
        self.rnb1 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB1',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb1)
        self.rnb2 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB2',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb2)
        self.rnb3 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB3',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb3)
        self.rnb4 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB4',
            'skipMode': 'conv',
            'skipStride': 2,
            'stride1': 2,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb4)
        self.rnb5 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB5',
            'skipMode': 'identity',
            'skipStride': 1,
            'stride1': 1,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb5)
        self.pool1 = Pool({
            'instanceName': 'RN18' + '_pool1',
            'poolType': 'ave',
            'stride': para['pSize'],
            'kernelShape': (para['pSize'], para['pSize'])
        })
        self.layerList.append(self.pool1)
        self.fc1 = FullyConnected({
            'instanceName': 'RN18' + '_fc1',
            'outChannel': para['classNum'],
            'bias': True
        })
        self.layerList.append(self.fc1)
        self.softmax = Softmax({'instanceName': 'RN18' + '_softmax'})
        self.layerList.append(self.softmax)

        self.bottomInterface = self.conv1
        self.topInterface = self.softmax
        self.softmax.setNet(self)
示例#6
0
class RN18(Net):
    '''
    ResNet18 has a total of 18 layers: 
    Note that some parameters are predetermined. The parameters need to be specified are in ''.
    For all ResNetBlock modules, the output sizes of stage 1 and stage 2 conv2D blocks equals to 
    1/4 of that of the final stage.
    Conv1 - kernel:(3x3), pad:(1,1), stride:1, output: 'c1OutChannel'
    Conv1 - kernel:(3x3), pad:(1,1), stride:2, output: 'c2OutChannel'  # H and W reduced by half
    RNB1 - skipMode:identity, output : 'rnb1OutChannel' 
    RNB2 - skipMode:identity, output : same as RNB1
    RNB3 - skipMode:identity, output : same as RNB1
    RNB4 - skipMode:conv, skipStride:2, output : 'rnb4OutChannel' # H and W reduced by half
    RNB5 - skipMode:identity, output : same as RNB4
    pool - average pooling of RNB5 per channel, reducing output to 'rnb4OutChannel', need to specify
            'pSize', which is used to specify stride and kernel size
    fc - outChannel: 'classNum'
    softmax - final classification layer
    '''
    def __init__(self, para):
        Net.__init__(self, para)
        convPara1 = {
            'instanceName': 'RN18' + '_Conv1',
            'padding': True,
            'padShape': (1, 1),
            'stride': 1,
            'outChannel': para['c1OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv1 = Conv2D(convPara1)
        self.norm1 = Normalize({'instanceName': 'RN18' + '_Norm1'})
        self.scale1 = Scale({'instanceName': 'RN18' + '_Scale1'})
        self.activation1 = Activation({
            'instanceName': 'RN18' + '_Activation1',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv1)
        self.layerList.append(self.norm1)
        self.layerList.append(self.scale1)
        self.layerList.append(self.activation1)
        convPara2 = {
            'instanceName': 'RN18' + '_Conv2',
            'padding': True,
            'padShape': (1, 1),
            'stride': 2,
            'outChannel': para['c2OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv2 = Conv2D(convPara2)
        self.norm2 = Normalize({'instanceName': 'RN18' + '_Norm2'})
        self.scale2 = Scale({'instanceName': 'RN18' + '_Scale2'})
        self.activation2 = Activation({
            'instanceName': 'RN18' + '_Activation2',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv2)
        self.layerList.append(self.norm2)
        self.layerList.append(self.scale2)
        self.layerList.append(self.activation2)
        self.rnb1 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB1',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb1)
        self.rnb2 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB2',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb2)
        self.rnb3 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB3',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb3)
        self.rnb4 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB4',
            'skipMode': 'conv',
            'skipStride': 2,
            'stride1': 2,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb4)
        self.rnb5 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB5',
            'skipMode': 'identity',
            'skipStride': 1,
            'stride1': 1,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb5)
        self.pool1 = Pool({
            'instanceName': 'RN18' + '_pool1',
            'poolType': 'ave',
            'stride': para['pSize'],
            'kernelShape': (para['pSize'], para['pSize'])
        })
        self.layerList.append(self.pool1)
        self.fc1 = FullyConnected({
            'instanceName': 'RN18' + '_fc1',
            'outChannel': para['classNum'],
            'bias': True
        })
        self.layerList.append(self.fc1)
        self.softmax = Softmax({'instanceName': 'RN18' + '_softmax'})
        self.layerList.append(self.softmax)

        self.bottomInterface = self.conv1
        self.topInterface = self.softmax
        self.softmax.setNet(self)

    def stack(self, top, bottom):
        self.top = top
        self.bottom = bottom

        self.conv1.stack(self.norm1, bottom)
        self.norm1.stack(self.scale1, self.conv1)
        self.scale1.stack(self.activation1, self.norm1)
        self.activation1.stack(self.conv2, self.scale1)

        self.conv2.stack(self.norm2, self.activation1)
        self.norm2.stack(self.scale2, self.conv2)
        self.scale2.stack(self.activation2, self.norm2)
        self.activation2.stack(self.rnb1, self.scale2)

        self.rnb1.stack(self.rnb2, self.activation2)
        self.rnb2.stack(self.rnb3, self.rnb1)
        self.rnb3.stack(self.rnb4, self.rnb2)
        self.rnb4.stack(self.rnb5, self.rnb3)
        self.rnb5.stack(self.pool1, self.rnb4)
        self.pool1.stack(self.fc1, self.rnb5)
        self.fc1.stack(self.softmax, self.pool1)
        self.softmax.stack(top, self.fc1)
        self.softmax.setSource(bottom)
示例#7
0
    def addLayer(self,
                 inputs=None,
                 neurons=None,
                 activations=None,
                 alpha=None,
                 outputs=None,
                 output=False):
        """
        Function to setup the neural network architecture

        INPUT:
        --------
        inputs: int
            inputs/ features/ predictor variables into the network, default is None
        neurons: int
            Amount of Neurons (or Units) to be utilized in this respective layer, default is None
        activations: string
            states the activation function to be used of ['elu', 'relu', 'leaky_relu', 'sigmoid', 'tanh', 'softmax'], default is None
        alpha: float
            alpha value for the leaky_relu activation function, default is 0.01 (in Activation.py)
        outputs: int
            output layer size, i.e. number of classes to classify or 1 for Regression.
        output: boolean
            flag for the right last shape of the Biases and Weights, default is False

        OUTPUT: 
        ---------
            stores the results in the member variables
        """

        ## Check for the proper Input and raise errors if not satisfied
        if neurons is None:
            if self.neurons is None:
                raise ValueError(
                    "Please state the number of neurons in a layer")
            else:
                neurons = self.neurons
        if activations is None:
            if self.activations is None:
                warnings.warn(
                    "Please specify the activation functions for the hidden layers, using sigmoid now"
                )
                self.activations = 'sigmoid'
                activations = self.activations
            else:
                activations = self.activations

        if self.weights is None:
            if inputs is None:
                if self.inputs is None:
                    raise ValueError(
                        "Please specify the number of inputs = number of features"
                    )
                inputs = self.inputs
            else:
                self.inputs = inputs

            print("Adding input layer with " + str(neurons) +
                  " neurons, using " + str(activations))
            W = self.initializeWeight(inputs, neurons, activations)
            b = np.zeros(shape=(neurons, 1))
            f = Activation(function=activations, alpha=alpha)

            self.weights = [W]
            self.biases = [b]
            self.act = [f]

        elif output == True:
            if outputs is None:
                if self.outputs is None:
                    raise ValueError(
                        "Please specify the number of outputs = number of classes to be predicted, or 1 for Regression."
                    )
                else:
                    outputs = self.outputs
            else:
                if self.outputs != outputs:
                    warnings.warn(
                        "The number of outputs was earlier set to " +
                        str(self.outputs) +
                        ", but the value specified in method addLayer of " +
                        str(outputs) + " replaces this.")
                    self.outputs = outputs

            print("Adding output layer with " + str(outputs) +
                  " outputs and " + str(activations))

            previousLayerNeurons = self.weights[-1].shape[1]
            W = self.initializeWeight(previousLayerNeurons, outputs,
                                      activations)
            b = np.zeros(shape=(outputs, 1))
            f = Activation(function=activations, alpha=alpha)

            self.weights.append(W)
            self.biases.append(b)
            self.act.append(f)
        else:
            print("Adding layer with " + str(neurons) + " neurons using " +
                  str(activations))
            previousLayerNeurons = self.weights[-1].shape[1]
            W = self.initializeWeight(previousLayerNeurons, neurons,
                                      activations)
            b = np.zeros(shape=(neurons, 1))
            f = Activation(function=activations, alpha=alpha)

            self.weights.append(W)
            self.biases.append(b)
            self.act.append(f)
示例#8
0
    def __init__(self, para):
        Subnet.__init__(self, para)
        self.layerList = []

        self.fork = Fork2({'instanceName': para['instanceName'] + '_fork'})
        self.layerList.append(self.fork)
        self.skipMode = para['skipMode']
        if self.skipMode == 'conv':
            convPara4 = {
                'instanceName': para['instanceName'] + '_skipConv1',
                'padding': False,
                'padShape': (0, 0),
                'stride': para['skipStride'],
                'outChannel': para['outChannel3'],
                'kernelShape': (1, 1),
                'bias': False
            }
            self.skipConv = Conv2D(convPara4)
            self.skipNorm = Normalize(
                {'instanceName': para['instanceName'] + '_skipNorm'})
            self.skipScale = Scale(
                {'instanceName': para['instanceName'] + '_skipScale'})
            self.layerList.append(self.skipConv)
            self.layerList.append(self.skipNorm)
            self.layerList.append(self.skipScale)

        convPara1 = {
            'instanceName': para['instanceName'] + '_mainConv1',
            'padding': False,
            'padShape': (0, 0),
            'stride': para['stride1'],
            'outChannel': para['outChannel1'],
            'kernelShape': (1, 1),
            'bias': False
        }
        convPara2 = {
            'instanceName': para['instanceName'] + '_mainConv2',
            'padding': True,
            'padShape': (1, 1),
            'stride': 1,
            'outChannel': para['outChannel2'],
            'kernelShape': (3, 3),
            'bias': False
        }
        convPara3 = {
            'instanceName': para['instanceName'] + '_mainConv3',
            'padding': False,
            'padShape': (0, 0),
            'stride': 1,
            'outChannel': para['outChannel3'],
            'kernelShape': (1, 1),
            'bias': False
        }

        self.mainConv1 = Conv2D(convPara1)
        self.mainNorm1 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm1'})
        self.mainScale1 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale1'})
        self.mainActivation1 = Activation({
            'instanceName':
            para['instanceName'] + '_mainReLU1',
            'activationType':
            para['activationType']
        })
        self.layerList.append(self.mainConv1)
        self.layerList.append(self.mainNorm1)
        self.layerList.append(self.mainScale1)
        self.layerList.append(self.mainActivation1)

        self.mainConv2 = Conv2D(convPara2)
        self.mainNorm2 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm2'})
        self.mainScale2 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale2'})
        self.mainActivation2 = Activation({
            'instanceName':
            para['instanceName'] + '_mainReLU2',
            'activationType':
            para['activationType']
        })
        self.layerList.append(self.mainConv2)
        self.layerList.append(self.mainNorm2)
        self.layerList.append(self.mainScale2)
        self.layerList.append(self.mainActivation2)

        self.mainConv3 = Conv2D(convPara3)
        self.mainNorm3 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm3'})
        self.mainScale3 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale3'})
        self.layerList.append(self.mainConv3)
        self.layerList.append(self.mainNorm3)
        self.layerList.append(self.mainScale3)

        self.sum = Sum2({'instanceName': para['instanceName'] + '_sum'})
        self.activation3 = Activation({
            'instanceName': para['instanceName'] + '_outReLU3',
            'activationType': para['activationType']
        })
        self.layerList.append(self.sum)
        self.layerList.append(self.activation3)
        self.bottomInterface = self.fork
        self.topInterface = self.activation3
示例#9
0
class ResNetBlock(Subnet):
    '''
    On the main path, the first convolution block has (1x1) kernel, zero padding. The second 
    convolution block has (3x3) kernel, (1,1) padding and stride of 1. The third convolution 
    block has (1x1) kernel, zero padding and stride of 1. The skip path has either identity mapping
    or a convolution block with (1x1) kernel and zero padding. The number of output channels is 
    the same as that of the third convolution block on the main path.
    
    Parameters required: 
    'instanceName': name of the block
    'skipMode': slect the operations on the skip path, 'conv' or 'identity'
    'skipStride': stride of the convolution block on the skip path
    'stride1': stride of the first convolution block on the main path
    'outChannel1': number of output channel of the first convolution block on the main path
    'outChannel2': number of output channel of the second convolution block on the main path
    'outChannel3': number of output channel of the third convolution block on the main path
    'activationType': activation function of the non-linear block, 'ReLU' or 'sigmoid'
    '''

    # 'conv' mode has a convolution block on the skip path. 'identity' mode is strict pass through.
    skipModes = ['conv', 'identity']

    def __init__(self, para):
        Subnet.__init__(self, para)
        self.layerList = []

        self.fork = Fork2({'instanceName': para['instanceName'] + '_fork'})
        self.layerList.append(self.fork)
        self.skipMode = para['skipMode']
        if self.skipMode == 'conv':
            convPara4 = {
                'instanceName': para['instanceName'] + '_skipConv1',
                'padding': False,
                'padShape': (0, 0),
                'stride': para['skipStride'],
                'outChannel': para['outChannel3'],
                'kernelShape': (1, 1),
                'bias': False
            }
            self.skipConv = Conv2D(convPara4)
            self.skipNorm = Normalize(
                {'instanceName': para['instanceName'] + '_skipNorm'})
            self.skipScale = Scale(
                {'instanceName': para['instanceName'] + '_skipScale'})
            self.layerList.append(self.skipConv)
            self.layerList.append(self.skipNorm)
            self.layerList.append(self.skipScale)

        convPara1 = {
            'instanceName': para['instanceName'] + '_mainConv1',
            'padding': False,
            'padShape': (0, 0),
            'stride': para['stride1'],
            'outChannel': para['outChannel1'],
            'kernelShape': (1, 1),
            'bias': False
        }
        convPara2 = {
            'instanceName': para['instanceName'] + '_mainConv2',
            'padding': True,
            'padShape': (1, 1),
            'stride': 1,
            'outChannel': para['outChannel2'],
            'kernelShape': (3, 3),
            'bias': False
        }
        convPara3 = {
            'instanceName': para['instanceName'] + '_mainConv3',
            'padding': False,
            'padShape': (0, 0),
            'stride': 1,
            'outChannel': para['outChannel3'],
            'kernelShape': (1, 1),
            'bias': False
        }

        self.mainConv1 = Conv2D(convPara1)
        self.mainNorm1 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm1'})
        self.mainScale1 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale1'})
        self.mainActivation1 = Activation({
            'instanceName':
            para['instanceName'] + '_mainReLU1',
            'activationType':
            para['activationType']
        })
        self.layerList.append(self.mainConv1)
        self.layerList.append(self.mainNorm1)
        self.layerList.append(self.mainScale1)
        self.layerList.append(self.mainActivation1)

        self.mainConv2 = Conv2D(convPara2)
        self.mainNorm2 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm2'})
        self.mainScale2 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale2'})
        self.mainActivation2 = Activation({
            'instanceName':
            para['instanceName'] + '_mainReLU2',
            'activationType':
            para['activationType']
        })
        self.layerList.append(self.mainConv2)
        self.layerList.append(self.mainNorm2)
        self.layerList.append(self.mainScale2)
        self.layerList.append(self.mainActivation2)

        self.mainConv3 = Conv2D(convPara3)
        self.mainNorm3 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm3'})
        self.mainScale3 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale3'})
        self.layerList.append(self.mainConv3)
        self.layerList.append(self.mainNorm3)
        self.layerList.append(self.mainScale3)

        self.sum = Sum2({'instanceName': para['instanceName'] + '_sum'})
        self.activation3 = Activation({
            'instanceName': para['instanceName'] + '_outReLU3',
            'activationType': para['activationType']
        })
        self.layerList.append(self.sum)
        self.layerList.append(self.activation3)
        self.bottomInterface = self.fork
        self.topInterface = self.activation3

    def stack(self, top, bottom):
        self.top = top
        self.bottom = bottom
        if self.skipMode == 'conv':
            self.fork.fork(self.skipConv, self.mainConv1, bottom)
            self.skipConv.stack(self.skipNorm, self.fork.skip)
            self.skipNorm.stack(self.skipScale, self.skipConv)
            self.skipScale.stack(self.sum.skip, self.skipNorm)
        else:
            self.fork.fork(self.sum.skip, self.mainConv1, bottom)
        # main path
        self.mainConv1.stack(self.mainNorm1, self.fork.main)
        self.mainNorm1.stack(self.mainScale1, self.mainConv1)
        self.mainScale1.stack(self.mainActivation1, self.mainNorm1)
        self.mainActivation1.stack(self.mainConv2, self.mainScale1)

        self.mainConv2.stack(self.mainNorm2, self.mainActivation1)
        self.mainNorm2.stack(self.mainScale2, self.mainConv2)
        self.mainScale2.stack(self.mainActivation2, self.mainNorm2)
        self.mainActivation2.stack(self.mainConv3, self.mainScale2)

        self.mainConv3.stack(self.mainNorm3, self.mainActivation2)
        self.mainNorm3.stack(self.mainScale3, self.mainConv3)
        self.mainScale3.stack(self.sum.main, self.mainNorm3)
        # sum
        if self.skipMode == 'conv':
            self.sum.sum(self.activation3, self.skipScale, self.mainScale3)
        else:
            self.sum.sum(self.activation3, self.fork.skip, self.mainScale3)
        self.activation3.stack(top, self.sum)