Ejemplo n.º 1
0
    def __init__(self, n_class, in_ch, n_e=24*2, n_h=48*2, g_size=32, n_step=8, scale=3, var=7.5):
        w = math.sqrt(2)  # MSRA scaling
        n_ch_after_core = 256
        n_emb_l =128
        # n_ch_in_core = n_h*2*math.ceil(g_size/8)*math.ceil(g_size/8)
        n_ch_in_core = n_h*math.ceil(g_size/4)*math.ceil(g_size/4)
        super().__init__(
            emb_l = L.Linear(2, n_emb_l),  # embed location
            emb_x = L.MLPConvolution2D( \
                in_ch*scale, (n_e, n_e, n_e), 5, pad=2, wscale=w),  # embed image
            conv_loc_to_glimpse = L.Linear(n_emb_l, n_ch_in_core),  # loc to glimpse. output channel is n_h, if FC.
            conv_image_to_glimpse = L.MLPConvolution2D( \
                n_e, (n_h, n_h, n_h), 5, pad=2, wscale=w),  # image to glimpse
            core_lstm = L.LSTM(n_ch_in_core, n_ch_after_core),  # core LSTM. in the paper, named recurrent network.
            fc_ha = L.Linear(n_ch_after_core, n_class),  # core to action(from core_lstm to action)
            fc_hl = L.Linear(n_ch_after_core, 2),  # core to loc(from core_lstm to loc). in the paper, named emission network.
            fc_hb = L.Linear(n_ch_after_core, 1),  # core to baseline(from core_lstm to baseline)
        )
        self.g_size = g_size
        self.n_step = n_step
        self.scale = scale
        self.var = var

        self.train = True
        self.n_class = n_class
        self.active_learn = False
Ejemplo n.º 2
0
 def __init__(self):
     w = math.sqrt(2)  # MSRA scaling
     super(NIN, self).__init__(
         mlpconv1=L.MLPConvolution2D(
             3, (96, 96, 96), 11, stride=4, wscale=w),
         mlpconv2=L.MLPConvolution2D(
             96, (256, 256, 256), 5, pad=2, wscale=w),
         mlpconv3=L.MLPConvolution2D(
             256, (384, 384, 384), 3, pad=1, wscale=w),
         mlpconv4=L.MLPConvolution2D(
             384, (1024, 1024, 1000), 3, pad=1, wscale=w),
     )
     self.train = True
    def __init__(self, n_class=1000):  # 1000 is for ImageNet
        super(NIN, self).__init__()
        conv_init = I.HeNormal()  # MSRA scaling
        self.n_class = n_class

        with self.init_scope():
            self.mlpconv1 = L.MLPConvolution2D(
                None, (96, 96, 96), 11, stride=4, conv_init=conv_init)
            self.mlpconv2 = L.MLPConvolution2D(
                None, (256, 256, 256), 5, pad=2, conv_init=conv_init)
            self.mlpconv3 = L.MLPConvolution2D(
                None, (384, 384, 384), 3, pad=1, conv_init=conv_init)
            self.mlpconv4 = L.MLPConvolution2D(
                None, (1024, 1024, self.n_class), 3, pad=1, conv_init=conv_init)
Ejemplo n.º 4
0
 def __init__(self):
     w = math.sqrt(2)
     super(NIN, self).__init__(
         mlpconv1=L.MLPConvolution2D(3, (192, 160, 96), 5, pad=2, wscale=w),
         mlpconv2=L.MLPConvolution2D(96, (192, 192, 192),
                                     5,
                                     pad=2,
                                     wscale=w),
         mlpconv3=L.MLPConvolution2D(192, (192, 192, 10),
                                     3,
                                     pad=1,
                                     wscale=w),
     )
     self.train = True
 def setUp(self):
     self.mlp = links.MLPConvolution2D(3, (96, 96, 96),
                                       11,
                                       activation=functions.sigmoid)
     self.mlp.to_gpu()
     self.x = cuda.cupy.zeros((10, 3, 20, 20), dtype=numpy.float32)
     self.gy = cuda.cupy.zeros((10, 96, 10, 10), dtype=numpy.float32)
Ejemplo n.º 6
0
 def __init__(self, n_class=10):
     w = chainer.initializers.HeNormal()
     super(NIN, self).__init__()
     with self.init_scope():
         self.mlpconv1 = L.MLPConvolution2D(3, (192, 160, 96),
                                            5,
                                            pad=2,
                                            conv_init=w)
         self.mlpconv2 = L.MLPConvolution2D(96, (192, 192, 192),
                                            5,
                                            pad=2,
                                            conv_init=w)
         self.mlpconv3 = L.MLPConvolution2D(192, (192, 192, n_class),
                                            3,
                                            pad=1,
                                            conv_init=w)
Ejemplo n.º 7
0
 def setUp(self):
     self.mlp = links.MLPConvolution2D(3, (96, 96, 96),
                                       11,
                                       activation=functions.sigmoid)
     with testing.assert_warns(DeprecationWarning):
         self.mlp.to_gpu()
     self.x = cuda.cupy.zeros((10, 3, 20, 20), dtype=numpy.float32)
     self.gy = cuda.cupy.zeros((10, 96, 10, 10), dtype=numpy.float32)
Ejemplo n.º 8
0
 def test_valid_instantiation_ksize_is_not_none(self):
     l = links.MLPConvolution2D(
         self.in_channels, self.out_channels, self.ksize, self.stride,
         self.pad, functions.relu, conv_init=None, bias_init=None)
     self.assertEqual(len(l), 2)
     self.assertEqual(l[0].W.shape,
                      (self.out_channels[0], self.in_channels,
                       self.ksize, self.ksize))
     self.assertEqual(l[1].W.shape,
                      (self.out_channels[1], self.out_channels[0], 1, 1))
Ejemplo n.º 9
0
Archivo: nin.py Proyecto: taura/chainer
 def __init__(self):
     conv_init = I.HeNormal()  # MSRA scaling
     super(NIN, self).__init__(
         mlpconv1=L.MLPConvolution2D(None, (96, 96, 96),
                                     11,
                                     stride=4,
                                     conv_init=conv_init),
         mlpconv2=L.MLPConvolution2D(None, (256, 256, 256),
                                     5,
                                     pad=2,
                                     conv_init=conv_init),
         mlpconv3=L.MLPConvolution2D(None, (384, 384, 384),
                                     3,
                                     pad=1,
                                     conv_init=conv_init),
         mlpconv4=L.MLPConvolution2D(None, (1024, 1024, 1000),
                                     3,
                                     pad=1,
                                     conv_init=conv_init),
     )
Ejemplo n.º 10
0
 def __init__(self, image_colors, class_labels):
     w = math.sqrt(2)  # MSRA scaling
     super(NIN, self).__init__(
         mlpconv1=L.MLPConvolution2D(None, (96, 96, 96),
                                     11,
                                     stride=4,
                                     wscale=w),
         mlpconv2=L.MLPConvolution2D(None, (256, 256, 256),
                                     5,
                                     pad=2,
                                     wscale=w),
         mlpconv3=L.MLPConvolution2D(None, (384, 384, 384),
                                     3,
                                     pad=1,
                                     wscale=w),
         mlpconv4=L.MLPConvolution2D(None, (1024, 1024, class_labels),
                                     3,
                                     pad=1,
                                     wscale=w),
     )
     self.train = True
Ejemplo n.º 11
0
    def __init__(self, compute_accuracy=False):
        super(NIN, self).__init__()
        self.compute_accuracy = compute_accuracy
        conv_init = I.HeNormal()  # MSRA scaling

        with self.init_scope():
            self.mlpconv1 = L.MLPConvolution2D(None, (96, 96, 96),
                                               11,
                                               stride=4,
                                               conv_init=conv_init)
            self.mlpconv2 = L.MLPConvolution2D(None, (256, 256, 256),
                                               5,
                                               pad=2,
                                               conv_init=conv_init)
            self.mlpconv3 = L.MLPConvolution2D(None, (384, 384, 384),
                                               3,
                                               pad=1,
                                               conv_init=conv_init)
            self.mlpconv4 = L.MLPConvolution2D(None, (1024, 1024, 1000),
                                               3,
                                               pad=1,
                                               conv_init=conv_init)
Ejemplo n.º 12
0
    def test_valid_instantiation_in_channels_is_omitted(self):
        l = links.MLPConvolution2D(
            self.out_channels, self.ksize, stride=self.stride, pad=self.pad,
            activation=functions.relu, conv_init=None, bias_init=None)
        x = numpy.random.uniform(
            -1, 1, (10, self.in_channels, 10, 10)).astype(numpy.float32)
        l(x)  # create weight tensors of convolutions by initialization

        self.assertEqual(len(l), 2)
        self.assertEqual(l[0].W.shape,
                         (self.out_channels[0], self.in_channels,
                          self.ksize, self.ksize))
        self.assertEqual(l[1].W.shape,
                         (self.out_channels[1], self.out_channels[0], 1, 1))
 def test_forbid_wscale_as_a_keyword_argument(self):
     with self.assertRaises(ValueError):
         links.MLPConvolution2D(self.in_channels,
                                self.out_channels,
                                wscale=1)
 def test_forbid_wscale_as_a_positional_argument(self):
     with self.assertRaises(TypeError):
         # 7th positional argument was wscale in v1
         links.MLPConvolution2D(self.in_channels, self.out_channels, None,
                                self.stride, self.pad, functions.relu, 1)
Ejemplo n.º 15
0
    def __init__(self,
                 n_dims,
                 in_channels,
                 hidden_channels,
                 out_channel,
                 kernel_size=3,
                 initialW=initializers.HeNormal(),
                 initial_bias=None,
                 block_type='default',
                 is_residual=False,
                 batch_norm=False):

        self.n_dims = n_dims
        self.in_channels = in_channels
        self.hidden_channels = hidden_channels
        self.out_channel = out_channel
        self.kernel_size = kernel_size
        self.initialW = initialW
        self.initial_bias = initial_bias
        self.block_type = block_type
        self.is_residual = is_residual
        self.batch_norm = batch_norm

        pad = self.kernel_size // 2 if self.kernel_size % 2 == 0 else (
            self.kernel_size - 1) // 2

        super().__init__()

        with self.init_scope():

            if self.block_type == 'default':
                self.conv_1 = L.ConvolutionND(self.n_dims,
                                              self.in_channels,
                                              self.hidden_channels,
                                              self.kernel_size,
                                              stride=1,
                                              pad=pad,
                                              initialW=self.initialW,
                                              initial_bias=self.initial_bias)
                self.conv_2 = L.ConvolutionND(self.n_dims,
                                              self.hidden_channels,
                                              self.out_channel,
                                              self.kernel_size,
                                              stride=1,
                                              pad=pad,
                                              initialW=self.initialW,
                                              initial_bias=self.initial_bias)

            elif self.block_type == 'dilated':
                assert self.n_dims != 2, 'Currently, dilated convolution is unsupported in 3D.'
                self.conv_1 = L.DilatedConvolution2D(
                    self.in_channels,
                    self.hidden_channels,
                    self.kernel_size,
                    stride=1,
                    pad=pad,
                    dilate=1,
                    initialW=self.initialW,
                    initial_bias=self.initial_bias)
                self.conv_2 = L.DilatedConvolution2D(
                    self.hidden_channels,
                    self.out_channel,
                    self.kernel_size,
                    stride=1,
                    pad=pad * 2,
                    dilate=2,
                    initialW=self.initialW,
                    initial_bias=self.initial_bias)

            elif self.block_type == 'mlp':
                assert self.n_dims != 2, 'Currently, mlp convolution is unsupported in 3D.'
                self.conv_1 = L.MLPConvolution2D(self.in_channels,
                                                 [self.hidden_channels] * 3,
                                                 self.kernel_size,
                                                 stride=1,
                                                 pad=pad,
                                                 conv_init=self.initialW,
                                                 bias_init=self.initial_bias)
                self.conv_2 = L.MLPConvolution2D(self.hidden_channels,
                                                 [self.out_channel] * 3,
                                                 self.kernel_size,
                                                 stride=1,
                                                 pad=pad,
                                                 conv_init=self.initialW,
                                                 bias_init=self.initial_bias)

            if self.batch_norm:
                self.bn_conv_1 = L.BatchNormalization(self.hidden_channels)
                self.bn_conv_2 = L.BatchNormalization(self.out_channel)
 def setUp(self):
     args, kwargs = self.mlpconv_args
     self.mlp = links.MLPConvolution2D(*args, **kwargs)
     self.x = numpy.zeros((10, 3, 20, 20), dtype=numpy.float32)
Ejemplo n.º 17
0
 def setUp(self):
     self.mlp = links.MLPConvolution2D(3, (96, 96, 96),
                                       11,
                                       activation=functions.sigmoid,
                                       use_cudnn=self.use_cudnn)
     self.x = numpy.zeros((10, 3, 20, 20), dtype=numpy.float32)
Ejemplo n.º 18
0
plt.imshow(image)

print(int(400 / 4))
print(int(300 / 4))

print(98 * 4)
print(73 * 4)

#image = image[:,:,:,np.newaxis]
image = np.transpose(image, (2, 0, 1))
image = image[np.newaxis, :, :, :]
print("input image:", image.shape)

#conv1=L.Convolution2D(3,  96, 11)#, stride=)
w = math.sqrt(2)  # MSRA scaling
conv1 = L.MLPConvolution2D(3, (96, 96, 96), 11, stride=4, wscale=w)

mlpconv1 = conv1(image)
print("conv result1:", mlpconv1.data.shape)

#rev_image = np.transpose(cv1.data,(1,2,3,0))

#plt.imshow(rev_image[0])
type(mlpconv1)

print("conv result1 image shape:", mlpconv1.data[0].shape)
print("mlpconv1 is")
plot(mlpconv1.data, mlpconv1.data.shape[1])

relu1 = F.relu(mlpconv1)
print("relu result1:", relu1.data.shape)