Exemplo n.º 1
0
 def __init__(self):
     super(SimplerCNN, self).__init__()
     self.dropout2d_input = nn.Dropout2d(rate=0.3)
     self.conv1 = nn.Conv2d(in_channels=3,
                            out_channels=15,
                            kernel_size=3,
                            stride=3,
                            padding=2)
     self.relu1 = nn.LeakyRelu()
     self.conv2 = nn.Conv2d(in_channels=15,
                            out_channels=30,
                            kernel_size=3,
                            stride=3,
                            padding=3)
     self.relu2 = nn.LeakyRelu()
     self.dropout2d_conv1 = nn.Dropout2d(rate=0.5)
     self.conv3 = nn.Conv2d(in_channels=30, out_channels=40, kernel_size=4)
     self.relu3 = nn.LeakyRelu()
     self.flatten = nn.Flatten()
     self.dropout2d_conv2 = nn.Dropout2d(rate=0.2)
     self.linear = nn.Linear(in_dimension=360, out_dimension=180)
     self.relu4 = nn.LeakyRelu()
     self.bn1 = nn.BatchNorm()
     self.dropout3 = nn.Dropout(rate=0.3)
     self.linear2 = nn.Linear(in_dimension=180, out_dimension=10)
     self.bn2 = nn.BatchNorm()
     self.softmax = nn.Softmax()
     self.set_forward()
Exemplo n.º 2
0
        def conv_layer(x):
            """
            the derivative check in the gradient checker relates to the input of the function
            hence, the input should be z - since the backward step computes @loss / @z
            """

            conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=2)
            relu1 = nn.Relu()
            conv2 = nn.Conv2d(in_channels=2, out_channels=4, kernel_size=2)
            relu2 = nn.Relu()
            flatten = nn.Flatten()
            linear = nn.Linear(4, 2)
            softmax = nn.Softmax()

            # forward pass
            a = relu1(conv1(x))
            a = relu2(conv2(a))
            a_flatten = flatten(a)
            dist = softmax(linear(a_flatten))

            # backward
            labels = np.zeros(dist.shape)
            labels[:, 1] = 1
            loss = -np.log(np.sum(dist * labels, axis=1))

            softmax_grad = softmax.backward(labels)
            linear_grad = linear.backward(softmax_grad)
            flatten_grad = flatten.backward(linear_grad)
            relu2_grad = relu2.backward(flatten_grad)
            conv2_grad = conv2.backward(relu2_grad)
            relu1_grad = relu1.backward(conv2_grad)
            conv1_grad = conv1.backward(relu1_grad)

            return loss, conv1_grad
Exemplo n.º 3
0
 def __init__(self):
     super(ConvNet, self).__init__([
         lrp_module.Conv2d(1, 6, 5),
         lrp_module.ReLU(),
         lrp_module.MaxPool2d(2, 2),
         lrp_module.Conv2d(6, 16, 5),
         lrp_module.ReLU(),
         lrp_module.MaxPool2d(2, 2),
         lrp_module.Reshape(4, 4, 16),
         lrp_module.Linear(4 * 4 * 16, 120),
         lrp_module.ReLU(),
         lrp_module.Linear(120, 100),
         lrp_module.ReLU(),
         lrp_module.Linear(100, 10)
     ])
     self.outputLayers = [0, 2, 3, 5, 6, 9, 11, 12]
Exemplo n.º 4
0
        def conv(b):
            """
            the derivative check in the gradient checker relates to the input of the function
            hence, the input should be z - since the backward step computes @loss / @z
            """

            # simulate end of classification
            conv = nn.Conv2d(in_channels=1, out_channels=3, kernel_size=2)
            relu = nn.Relu()
            flatten = nn.Flatten()
            linear = nn.Linear(in_dimension=12, out_dimension=4)
            softmax = nn.Softmax()

            conv.set_biases(b.reshape(3, 1))

            # forward
            a = flatten(relu(conv(x)))
            dist = softmax(linear(a))

            # backward
            labels = np.zeros(dist.shape)
            labels[:, 1] = 1
            loss = -np.log(np.sum(dist * labels, axis=1))

            softmax_grad = softmax.backward(labels)
            linear_grad = linear.backward(softmax_grad)
            flatten_grad = flatten.backward(linear_grad)
            relu_grad = relu.backward(flatten_grad)
            conv_grad = conv.backward(relu_grad)

            b_grad = conv.b_grad

            return loss, b_grad
Exemplo n.º 5
0
 def __init__(self):
     super(SimpleCNN, self).__init__()
     self.conv1 = nn.Conv2d(in_channels=3,
                            out_channels=6,
                            kernel_size=3,
                            stride=3,
                            padding=2)
     self.tanh1 = nn.Tanh()
     self.conv2 = nn.Conv2d(in_channels=6,
                            out_channels=10,
                            kernel_size=3,
                            stride=3,
                            padding=3)
     self.tanh2 = nn.Tanh()
     self.dropout2d = nn.Dropout2d(rate=0.5)
     self.flatten = nn.Flatten()
     self.linear = nn.Linear(in_dimension=360, out_dimension=10)
     self.softmax = nn.Softmax()
     self.set_forward()
Exemplo n.º 6
0
    def __init__(
                self,
                input_dim=257,
                output_dim=257,
                hidden_layers=2,
                hidden_units=512,
                left_context=1,
                right_context=1,
                kernel_size=6,
                kernel_num=9,
                target_mode='MSA',
                dropout=0.2
                
        ):
        super(SRUC, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.hidden_layers = hidden_layers
        self.hidden_units = hidden_units
        self.left_context = left_context
        self.right_context = right_context
        self.kernel_size = kernel_size
        self.kernel_sum = kernel_num
        self.target_mode = target_mode

        self.input_layer = nn.Sequential(
                nn.Linear((left_context+1+right_context)*input_dim, hidden_units),
                nn.Tanh()
            )
        
        self.rnn_layer = SRU(
                    input_size=hidden_units,
                    hidden_size=hidden_units,
                    num_layers=self.hidden_layers,
                    dropout=dropout,
                    rescale=True,
                    bidirectional=False,
                    layer_norm=False
            )
        
        self.conv2d_layer = nn.Sequential(
                #nn.Conv2d(in_channels=1,out_channels=kernel_num,kernel_size=(kernel_size, kernel_size), stride=[1,1],padding=(5,5), dilation=(2,2)),
                modules.Conv2d(in_channels=1, out_channels=kernel_num, kernel_size=(kernel_size, kernel_size)),
                nn.Tanh(),
                nn.MaxPool2d(3,stride=1,padding=(1,1))
            )
        
        self.output_layer = nn.Sequential(
                nn.Linear(hidden_units*kernel_num, (left_context+1+right_context)*self.output_dim),
                nn.Sigmoid()
            )