示例#1
0
    def __call__(self, x):
        h = F.relu(self.l0(x))
        h = F.prelu(self.l1(h))
        h = F.relu(self.l2(h))
        h = F.prelu(self.l3(h))

        return h
示例#2
0
    def check_forward(self, x_data, W_data):
        x = chainer.Variable(x_data)
        W = chainer.Variable(W_data)
        y = functions.prelu(x, W)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = self.x.copy()
        masked = numpy.ma.masked_greater_equal(y_expect, 0, copy=False)
        shape = (1, ) + W.shape + (1, ) * (x.ndim - W.ndim - 1)
        masked *= self.W.reshape(shape)
        testing.assert_allclose(y_expect, y.data)
示例#3
0
    def check_forward(self, x_data, W_data):
        x = chainer.Variable(x_data)
        W = chainer.Variable(W_data)
        y = functions.prelu(x, W)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = self.x.copy()
        masked = numpy.ma.masked_greater_equal(y_expect, 0, copy=False)
        shape = (1,) + W.shape + (1,) * (x.ndim - W.ndim - 1)
        masked *= self.W.reshape(shape)
        testing.assert_allclose(y_expect, y.data)
示例#4
0
    def check_forward(self, x_data, W_data):
        x = chainer.Variable(x_data)
        W = chainer.Variable(W_data)
        y = functions.prelu(x, W)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = self.x.copy()
        for i in numpy.ndindex(self.x.shape):
            if self.x[i] < 0:
                y_expect[i] *= self.W

        testing.assert_allclose(y_expect, y.data)
示例#5
0
    def check_forward(self, x_data, W_data):
        x = chainer.Variable(x_data)
        W = chainer.Variable(W_data)
        y = functions.prelu(x, W)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = self.x.copy()
        for i in numpy.ndindex(self.x.shape):
            if self.x[i] < 0:
                y_expect[i] *= self.W

        testing.assert_allclose(
            y_expect, y.data)
示例#6
0
    def test_forward(self, backend_config):
        # TODO(niboshi): Support it
        if backend_config.use_chainerx and self.dtype == numpy.float16:
            raise unittest.SkipTest('ChainerX does not support float16')

        x_data, W_data = backend_config.get_array((self.x, self.W))
        x = chainer.Variable(x_data)
        W = chainer.Variable(W_data)
        y = functions.prelu(x, W)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = self.x.copy()
        masked = numpy.ma.masked_greater_equal(y_expect, 0, copy=False)
        shape = (1, ) + W.shape + (1, ) * (x.ndim - W.ndim - 1)
        masked *= self.W.reshape(shape)
        testing.assert_allclose(y_expect, y.data)
示例#7
0
    def test_forward(self, backend_config):
        # TODO(niboshi): Support it
        if backend_config.use_chainerx and self.dtype == numpy.float16:
            raise unittest.SkipTest('ChainerX does not support float16')

        x_data, W_data = backend_config.get_array((self.x, self.W))
        x = chainer.Variable(x_data)
        W = chainer.Variable(W_data)
        y = functions.prelu(x, W)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = self.x.copy()
        masked = numpy.ma.masked_greater_equal(y_expect, 0, copy=False)
        shape = (1,) + W.shape + (1,) * (x.ndim - W.ndim - 1)
        masked *= self.W.reshape(shape)
        testing.assert_allclose(y_expect, y.data)
示例#8
0
 def f(x, W):
     y = functions.prelu(x, W)
     return y * y
示例#9
0
 def forward(self, inputs, device):
     x, W = inputs
     y = functions.prelu(x, W)
     return y,
示例#10
0
 def forward(self, inputs, device):
     x, W = inputs
     y = functions.prelu(x, W)
     return y,
    def __call__(self, x, t, train):
        """
		h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
		h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
		h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
		h = self.mlpconv4(F.dropout(h, train=self.train))
		y = F.reshape(F.average_pooling_2d(h, 6), (x.data.shape[0], 1000))
		"""
        """
		ninのまね
		h = F.max_pooling_2d(F.relu(self.conv1(x)), 3, stride=2)
		h = F.max_pooling_2d(F.relu(self.conv2(h)), 3, stride=2)
		h = F.max_pooling_2d(F.relu(self.conv3(h)), 3, stride=2)
		h = self.conv4(F.dropout(h, train=self.train))
		y = F.reshape(F.average_pooling_2d(h, 6), (x.data.shape[0], 1000))
		"""

        #---------------------------------------------60model
        #h = self.conv1(x)
        #h = F.relu(h)
        #h = F.max_pooling_2d(h, 3, stride=2)

        #h = self.conv2(h)
        #h = F.relu(h)
        #h = F.average_pooling_2d(h, 3, stride=2)

        #h = self.conv3(h)
        #h = F.relu(h)
        #h = F.average_pooling_2d(h, 3, stride=2)

        #h = self.conv4(h)
        #h = F.relu(h)
        #h = F.average_pooling_2d(h, 3, stride=2)

        #y = F.reshape(F.average_pooling_2d(h, 6), (x.data.shape[0],48))

        #----------------------------------------vasily
        #h = F.relu(self.bn1(self.conv1(x)))
        #h = F.relu(self.bn2(self.conv2(h)))
        #h = F.relu(self.bn3(self.conv3(h)))
        #h = F.relu(self.bn4(self.conv4(h)))
        #y = self.fl(h)

        h = self.conv1(x)
        #h = F.relu(h)
        h = F.prelu(h, xp.ones((8, 221, 221), dtype=xp.float32) * 0.25)
        h = F.max_pooling_2d(h, 3, stride=2)

        h = self.conv2(h)
        #h = F.relu(h)
        h = F.prelu(h, xp.ones((16, 106, 106), dtype=xp.float32) * 0.15)
        h = F.average_pooling_2d(h, 3, stride=2)

        h = self.conv3(h)
        #h = F.relu(h)
        #h = F.relu(F.dropout(h, ratio=0.3,train=train))
        h = F.prelu(h, xp.ones((32, 50, 50), dtype=xp.float32) * 0.05)
        h = F.average_pooling_2d(h, 3, stride=2)

        h = self.conv4(h)
        h = F.prelu(F.dropout(h, ratio=0.4, train=train),
                    xp.ones((48, 22, 22), dtype=xp.float32) * 0.01)
        #h = F.prelu(h,xp.ones((48,22,22),dtype=xp.float32)*0.01)
        h = F.average_pooling_2d(h, 3, stride=2)

        #y=self.mo(F.dropout(h,ratio=0.5,train=train))
        y = F.reshape(F.average_pooling_2d(h, 6), (x.data.shape[0], 48))
        """
		#print x.data.ndim
		#convolution->bn->reluが安定するらしい・・・
		h = self.conv1(x)
		#h = self.bn1(h)
		h = F.relu(h)
		#h = self.bn1(h)
		#n= normarize window size
		#h = F.local_response_normalization(h,n=3)
		#wmax_pooling_2s(x,indowsize,)
		h = F.max_pooling_2d(h, 3, stride=3)

		#print h.data.shape
		h = self.conv2(h)
		#h = self.bn2(h)
		h = F.relu(h)
		#h = F.max_pooling_2d(h, 3, stride=2)
		h = F.average_pooling_2d(h, 3, stride=3)

		h = self.conv3(h)
		#h = self.bn3(h)
		h = F.relu(h)
		#h = self.bn3(h)
		#x, poolingWindowSize, stride=, pad=
		#h = F.max_pooling_2d(h, 3, stride=2)
		h = F.average_pooling_2d(h, 3, stride=3)
		
		h=self.l1(h)
		h=F.relu(h)
		h = F.dropout(h, ratio=0.5, train=train)#0.3
		y = self.l2(h)		

		#h = self.bn4(h)
		#h = F.relu(h)
		
		##h=self.l2(h)
		#h = self.bn4(h)
		##h = F.relu(h)
		#h=F.relu(self.l2(h))
		#y = F.dropout(self.l3(h),ratio=0.3,train=train)#0.2
		#h = self.bn5(h)
		#y = self.l3(h)

		#h = F.relu(self.conv1(x))
		#h = self.bn4(h)
		#h = F.average_pooling_2d(h, 2)
		#h = F.relu(self.conv2(h))
		#h = self.bn5(h)
		#h = F.average_pooling_2d(h, 2)
		#h = F.relu(self.conv3(h))
		#h = self.bn6(h)	
		#h = F.average_pooling_2d(h, 2)
		#h = F.relu(self.l1(x))
		#h = F.dropout(F.relu(self.l1(h)), train=train)
		#y = self.l2(h)
		"""
        #http://qiita.com/supersaiakujin/items/ccdb41c1f33ad5d27fdf
        #活性化関数softmax=exp(a)/sum(exp(a))は、何でもありの入力の値を確率に直す
        if train:
            #cross_entropyは誤差関数。すべてのラベルについて-sum(log(softmax(y))*Label)する。これを最小化したい。
            return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
        else:
            return F.accuracy(y, t)