Beispiel #1
0
    def __init__(self):
        super(VGG, self).__init__()
        links = [('conv1_1', L.Convolution2D(3, 64, 3, stride=1, pad=1))]
        links += [('conv1_2', L.Convolution2D(64, 64, 3, stride=1, pad=1))]
        links += [('_mpool1', F.MaxPooling2D(2, 2, 0, True, True))]
        links += [('conv2_1', L.Convolution2D(64, 128, 3, stride=1, pad=1))]
        links += [('conv2_2', L.Convolution2D(128, 128, 3, stride=1, pad=1))]
        links += [('_mpool2', F.MaxPooling2D(2, 2, 0, True, True))]
        links += [('conv3_1', L.Convolution2D(128, 256, 3, stride=1, pad=1))]
        links += [('conv3_2', L.Convolution2D(256, 256, 3, stride=1, pad=1))]
        links += [('conv3_3', L.Convolution2D(256, 256, 3, stride=1, pad=1))]
        links += [('_mpool3', F.MaxPooling2D(2, 2, 0, True, True))]
        links += [('conv4_1', L.Convolution2D(256, 512, 3, stride=1, pad=1))]
        links += [('conv4_2', L.Convolution2D(512, 512, 3, stride=1, pad=1))]
        links += [('conv4_3', L.Convolution2D(512, 512, 3, stride=1, pad=1))]
        links += [('_mpool4', F.MaxPooling2D(2, 2, 0, True, True))]
        links += [('conv5_1', L.Convolution2D(512, 512, 3, stride=1, pad=1))]
        links += [('conv5_2', L.Convolution2D(512, 512, 3, stride=1, pad=1))]
        links += [('conv5_3', L.Convolution2D(512, 512, 3, stride=1, pad=1))]
        links += [('_mpool5', F.MaxPooling2D(2, 2, 0, True, True))]
        links += [('fc6', L.Linear(25088, 4096))]
        links += [('_dropout6', F.Dropout(0.5))]
        links += [('fc7', L.Linear(4096, 4096))]
        links += [('_dropout7', F.Dropout(0.5))]
        links += [('fc8', L.Linear(4096, 1000))]

        for link in links:
            if not link[0].startswith('_'):
                self.add_link(*link)

        self.forward = links
Beispiel #2
0
    def check_backward(self, x_data, y_grad):
        dropout = functions.Dropout(self.ratio)

        def f(x):
            return dropout.apply((x, ))[0]

        gradient_check.check_backward(f, x_data, y_grad,
                                      **self.check_double_backward_options)
Beispiel #3
0
    def check_immutable(self, inputs, backend_config):
        if backend_config.use_cuda:
            inputs = cuda.to_gpu(inputs)

        with backend_config:
            dropout = functions.Dropout(0.5)
            y1, = dropout.apply(inputs)
            y2, = dropout.apply(inputs)
        testing.assert_allclose(y1.data, y2.data)
Beispiel #4
0
    def step(self, hs, h, id):
        attention_out = self.attention(hs, h)
        embed_out = self.embed(id)
        embed_out = self.embed_proj(embed_out)
        next_h = attention_out + h + embed_out
        for layer_id in range(self.layer_num):
            next_h = getattr(self, "rnn_cell_" + str(layer_id))(next_h)
            next_h = F.Dropout(0.75)(next_h)

        out = self.out(next_h)
        return next_h, out
Beispiel #5
0
    def check_backward(self, inputs, grad_outputs, backend_config):
        if backend_config.use_cuda:
            inputs = cuda.to_gpu(inputs)
            grad_outputs = cuda.to_gpu(grad_outputs)

        # Instantiate the function class directly in order to reuse the mask,
        # because f will be called repeatedly.
        dropout = functions.Dropout(self.ratio)

        def f(*inputs):
            return dropout.apply(inputs)

        with backend_config:
            gradient_check.check_backward(f, inputs, grad_outputs,
                                          **self.check_backward_options)
Beispiel #6
0
 def check_immutable(self, x_data):
     d = functions.Dropout(0.5)
     y1, = d.apply((chainer.Variable(x_data), ))
     y2, = d.apply((chainer.Variable(x_data), ))
     testing.assert_allclose(y1.data, y2.data)
Beispiel #7
0
 def __call__(self, x):
     temp = F.relu(self.l1(x)+self.state)
     self.state = F.Dropout(F.relu(self.l2(temp)))
     return self.l3(self.state)