예제 #1
0
 def forward(self, inputs, device):
     x, = inputs
     r = self.r.astype(x.dtype)
     r = device.send(r)
     with chainer.using_config('train', self.train):
         y = functions.rrelu(x, l=self.l, u=self.u, r=r)
     return y,
예제 #2
0
파일: test_rrelu.py 프로젝트: hvy/chainer
 def forward(self, inputs, device):
     x, = inputs
     r = self.r.astype(x.dtype)
     r = device.send(r)
     with chainer.using_config('train', self.train):
         y = functions.rrelu(x, l=self.l, u=self.u, r=r)
     return y,
예제 #3
0
 def f(x):
     return functions.rrelu(
         x,
         self.l,
         self.u,
         r=r.astype(x.dtype)  # check_backward casts only x
     )
예제 #4
0
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     xp = backend.get_array_module(x)
     with chainer.using_config('train', self.train):
         y, r = functions.rrelu(x, l=self.l, u=self.u, return_r=True)
     self.assertEqual(y.data.dtype, self.dtype)
     expected = xp.where(x_data >= 0, x_data, x_data * r)
     testing.assert_allclose(expected, y.data, **self.check_forward_options)
예제 #5
0
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     xp = cuda.get_array_module(x)
     with chainer.using_config('train', self.train):
         y, r = functions.rrelu(x, l=self.l, u=self.u, return_r=True)
     self.assertEqual(y.data.dtype, self.dtype)
     expected = xp.where(x_data >= 0, x_data, x_data * r)
     testing.assert_allclose(
         expected, y.data, **self.check_forward_options)
예제 #6
0
    def _check(self):
        r = self.r if self.specify_r else None
        with chainer.using_config('tarin', self.train):
            out, out_r = functions.rrelu(
                self.x, self.l, self.u, r=r, return_r=True)

        assert isinstance(out_r, type(out.array))
        if r is None:
            assert out_r.shape == out.array.shape
        else:
            if chainer.config.train:
                assert out_r is r
    def prediction(self, x):
        x = Variable(x)
        ecfp = self.build_ecfp(x)
        fcfp = self.build_fcfp(x)

        ecfp_fcfp = F.concat((ecfp, fcfp), axis=1)
        h1 = self.attention_layer1(ecfp_fcfp)
        #h2 = self.attention_layer2(h1)
        attentions_1 = F.rrelu(self.attention_layer2(h1))
        attentions_2 = F.rrelu(self.attention_layer3(h1))
        attentions = F.concat((attentions_1, attentions_2), axis=1)
        attentions = F.softmax(attentions)
        attentions = F.split_axis(attentions, 2, 1)
        attention_ecfp = attentions[0]
        attention_fcfp = attentions[1]
        attentioned_ecfc = F.concat(
            (attention_ecfp * ecfp, attention_fcfp * fcfp), axis=1)
        #attentioned_ecfc = F.concat((attention_ecfp * ecfp, attention_fcfp * fcfp),axis=1)

        pred = self.dnn(attentioned_ecfc)
        return pred, attention_ecfp, attention_fcfp
예제 #8
0
 def f(x):
     return functions.rrelu(x, self.l, self.u, r=r)
예제 #9
0
 def f(x):
     return functions.rrelu(x, self.l, self.u, r=r)