def _calc(self, x): e0 = F.relu(self.bnc0(self.c0(x))) syn0 = F.relu(self.bnc1(self.c1(e0))) del e0 e1 = F.max_pooling_nd(syn0, self.pool_size, self.pool_size) e2 = F.relu(self.bnc2(self.c2(e1))) syn1 = F.relu(self.bnc3(self.c3(e2))) del e1, e2 e3 = F.max_pooling_nd(syn1, self.pool_size, self.pool_size) e4 = F.relu(self.bnc4(self.c4(e3))) e5 = F.relu(self.bnc5(self.c5(e4))) del e3, e4 d0 = F.concat([self.dc0(e5), syn1]) del e5, syn1 d1 = F.relu(self.bndc1(self.dc1(d0))) d2 = F.relu(self.bndc2(self.dc2(d1))) del d0, d1 d3 = F.concat([self.dc3(d2), syn0]) del d2, syn0 d4 = F.relu(self.bndc4(self.dc4(d3))) d5 = F.relu(self.bndc5(self.dc5(d4))) del d3, d4 d6 = self.dc6(d5) del d5 return d6
def _calc(self, x): e0 = F.relu(self.bnc0(self.c0(x))) syn0 = F.relu(self.bnc1(self.c1(e0))) del e0 e1 = F.max_pooling_nd(syn0, self.pool_size, self.pool_size) e2 = F.relu(self.bnc2(self.c2(e1))) syn1 = F.relu(self.bnc3(self.c3(e2))) del e1, e2 e3 = F.max_pooling_nd(syn1, self.pool_size, self.pool_size) e4 = F.relu(self.bnc4(self.c4(e3))) syn2 = F.relu(self.bnc5(self.c5(e4))) del e3, e4 e5 = F.max_pooling_nd(syn2, self.pool_size, self.pool_size) e6 = F.relu(self.bnc6(self.c6(e5))) e7 = F.relu(self.bnc7(self.c7(e6))) del e5, e6 d0 = F.concat([self.dc0(e7), syn2]) del e7, syn2 d1 = F.relu(self.bndc1(self.dc1(d0))) d2 = F.relu(self.bndc2(self.dc2(d1))) del d0, d1 d3 = F.concat([self.dc3(d2), syn1]) del d2, syn1 d4 = F.relu(self.bndc4(self.dc4(d3))) d5 = F.relu(self.bndc5(self.dc5(d4))) del d3, d4 d6 = F.concat([self.dc6(d5), syn0]) del d5, syn0 d7 = F.relu(self.bndc7(self.dc7(d6))) d8 = F.relu(self.bndc8(self.dc8(d7))) del d6, d7 d9 = self.dc9(d8) del d8 return d9
def pre(self, x): h = self.conv1(x) h = F.max_pooling_nd(h, 3, stride=2) h = self.conv2(h) h = F.max_pooling_nd(h, 3, stride=2) h = self.conv3(h) h = F.max_pooling_nd(h, 3, stride=2) h0 = F.relu(self.l_cl1(h)) h0 = F.relu(self.l_cl2(h0)) c_result = F.softmax(self.l_cl(h0)) h1 = F.relu(self.l_pos1(h)) h1 = F.relu(self.l_pos2(h1)) h1 = F.relu(self.l_pos3(h1)) h1 = F.sigmoid(self.l_pos(h1)) h2 = F.relu(self.l_ori1(h)) h2 = F.relu(self.l_ori2(h2)) h2 = F.relu(self.l_ori3(h2)) h2 = F.tanh(self.l_ori(h2)) r_result = F.concat((h1, h2), axis=1) return c_result, r_result
def test_forward_cpu_wide(self): # see #120 ndim = self.ndim x_shape = (2, 3) + (15,) * ndim x_data = numpy.random.rand(*x_shape).astype(self.dtype) x = chainer.Variable(x_data) ksize = stride = int(math.ceil(pow(32, 1.0 / ndim))) functions.max_pooling_nd(x, ksize, stride=stride, pad=0)
def test_forward_cpu_wide(self): # see #120 ndim = self.ndim x_shape = (2, 3) + (15, ) * ndim x_data = numpy.random.rand(*x_shape).astype(self.dtype) x = chainer.Variable(x_data) ksize = stride = int(math.ceil(pow(32, 1.0 / ndim))) functions.max_pooling_nd(x, ksize, stride=stride, pad=0)
def __call__(self, x): h = self.conv1a(x, self.bn1a) h = F.max_pooling_nd(h, ksize=(1, 2, 2), stride=(1, 2, 2)) h = self.conv2a(h, self.bn2a) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.conv3a(h, self.bn3a) h = self.conv3b(h, self.bn3b) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.conv4a(h, self.bn4a) h = self.conv4b(h, self.bn4b) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.conv5a(h, self.bn5a) h = self.conv5b(h, self.bn5b) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) # h = self.fc5(h) # h = F.relu(h) # h = F.dropout(h, ratio=0.5) h = self.fc6(h) h = F.relu(h) h = F.dropout(h, ratio=0.5) h = self.fc7(h) h = F.relu(h) h = F.dropout(h, ratio=0.5) h = self.fc8(h) return h
def __call__(self, x): h1 = F.relu(self.bnc0(self.conv1(x))) h2 = F.relu(self.bnc1(self.conv2(h1))) h3 = F.max_pooling_nd(h2, ksize=2, stride=2) del h1 h4 = F.relu(self.bnc2(self.conv3(h3))) h5 = F.relu(self.bnc3(self.conv4(h4))) h6 = F.max_pooling_nd(h5, ksize=2, stride=2) del h3, h4 h7 = F.relu(self.bnc4(self.conv5(h6))) h8 = F.relu(self.bnc5(self.conv6(h7))) h9 = self.dconv1(h8) del h6, h7, h8 h10 = F.concat([h9, self.cropping(h5, h9)]) h11 = F.relu(self.bnd4(self.conv7(h10))) h12 = F.relu(self.bnd3(self.conv8(h11))) h13 = self.dconv2(h12) del h9, h10, h11, h12 h14 = F.concat([h13, self.cropping(h2, h13)]) h15 = F.relu(self.bnd2(self.conv9(h14))) h16 = F.relu(self.bnd1(self.conv10(h15))) lcl = F.softmax(self.lcl(h16), axis=1) del h2, h14, h15, g16 return lcl
def __call__(self, x): test = not self.train h1 = F.relu(self.bnc0(self.conv1(x))) h2 = F.relu(self.bnc1(self.conv2(h1))) #cover_all please check map size h3 = F.max_pooling_nd(h2, ksize=2, stride=2) #del h1 h4 = F.relu(self.bnc2(self.conv3(h3))) #del h3 h5 = F.relu(self.bnc3(self.conv4(h4))) #del h4 h6 = F.max_pooling_nd(h5,ksize=2,stride=2) h7 = F.relu(self.bnc4(self.conv5(h6))) #del h6 h8 = F.relu(self.bnc5(self.conv6(h7))) #del h7 h9 = F.max_pooling_nd(h8, ksize=2,stride=2) h10 = F.relu(self.bnc6(self.conv7(h9))) #del h9 h11 = F.relu(self.bnc7(self.conv8(h10))) #del h10 h12 = self.dconv1(h11) #del h11 h13 = F.concat([h12, self.cropping(h8,h12)], axis=1) #del h8, h12 h14 = F.relu(self.bnd8(self.conv9(h13))) #del h13 h15 = F.relu(self.bnd7(self.conv10(h14))) #del h14 h16 = self.dconv2(h15) #del h15 h17 = F.concat([h16, self.cropping(h5,h16)]) #del h5, h16 h18 = F.relu(self.bnd5(self.conv11(h17))) #del h17 h19 = F.relu(self.bnd4(self.conv12(h18))) #del h18 h20 = self.dconv3(h19) #del h19 h21 = F.concat([h20, self.cropping(h2,h20)]) #del h2, h20 h22 = F.relu(self.bnd2(self.conv13(h21))) #del h21 h23 = F.relu(self.bnd1(self.conv14(h22))) #del h22 h24 = self.conv15(h23) #del h23 return h24
def fwd(self, x): h = self.conv1(x) h = self.conv2(h) h = F.max_pooling_nd(h, 3, stride=2) h = self.conv3(h) h = self.conv4(h) h = F.max_pooling_nd(h, 3, stride=2) h = self.conv5(h) h = F.max_pooling_nd(h, 3, stride=2) h1 = F.relu(self.l_pos1(h)) h1 = F.dropout(h1, ratio=0.5) ## h1 = F.relu(self.l_pos2(h1)) ## h1 = self.l_pos3(h1) h1 = F.sigmoid(self.l_pos(h1)) h2 = F.relu(self.l_ori1(h)) ## h2 = F.dropout(h2, ratio = 0.5) h2 = F.relu(self.l_ori2(h2)) h2 = self.l_ori3(h2) h2 = F.tanh(self.l_ori(h2)) y = F.concat((h1, h2), axis=1) return y
def __call__(self, x): # encoder pass e0 = F.relu(self.bne0(self.ce0(x))) e1 = F.relu(self.bne1(self.ce1(e0))) del e0 e2 = F.relu(self.bne2(self.ce2(F.max_pooling_nd(e1, ksize=2, stride=2)))) e3 = F.relu(self.bne3(self.ce3(e2))) del e2 e4 = F.relu(self.bne4(self.ce4(F.max_pooling_nd(e3, ksize=2, stride=2)))) # decoder pass d4 = F.relu(self.bnd4(self.cd4(e4))) del e4 d3 = F.relu(self.bnd3(self.cd3(F.concat([self.deconv2(d4), e3])))) del d4, e3 d2 = F.relu(self.bnd2(self.cd2(d3))) del d3 d1 = F.relu(self.bnd1(self.cd1(F.concat([self.deconv1(d2), e1])))) del d2, e1 d0 = F.relu(self.bnd0(self.cd0(d1))) del d1 lcl = F.softmax(self.lcl(d0), axis=1) return lcl #(batchsize, ch, z, y, x)
def fwd(self, x): h1 = F.max_pooling_nd(self.model1.fwd(x), self.pooling_size_1st) h2 = F.max_pooling_nd(self.model2.fwd(h1), self.pooling_size_2nd) h3 = self.model3.fwd(h2) h4 = self.model4.fwd(h3) h5 = self.model5.fwd(h4) h6 = self.model6.fwd(h5) h7 = self.model7.fwd(h6) return h7
def __call__(self, x): h = F.max_pooling_nd(F.relu(self.conv1(x)), 1, 1) h = F.max_pooling_nd(F.relu(self.conv2(h)), 2, 2) h = F.relu(self.conv3a(h)) h = F.max_pooling_nd(F.relu(self.conv3b(h)), 2, 2) h = F.relu(self.conv4a(h)) h = F.max_pooling_nd(F.relu(self.conv4b(h)), 2, 2) h = F.relu(self.conv5a(h)) h = F.max_pooling_nd(F.relu(self.conv5a(h)), 2, 2) h = F.dropout(F.relu(self.fc(h)), ratio=self.dropout) return h
def fwd(self, x): h = F.max_pooling_nd(F.local_response_normalization( F.relu(self.conv1(x))), 3, stride=2) h = F.max_pooling_nd(F.local_response_normalization( F.relu(self.conv2(h))), 3, stride=2) h = F.dropout(F.relu(self.fc3(h)), train=self.train) h = self.fc4(h) return h
def __call__(self, x): ''' 強化学習用のQ関数ではないので、普通にlossを返す ''' model1_out = self.model1.fwd_loss(x) h1 = F.max_pooling_nd(model1_out[0], self.pooling_size_1st) model2_out = self.model2.fwd_loss(h1) h2 = F.max_pooling_nd(model2_out[0], self.pooling_size_2nd) model3_out = self.model3.fwd_loss(h2) self.debug_info = (model1_out, model3_out, model3_out) return [model1_out[1], model2_out[1], model3_out[1]]
def keic(self, model, y_local, alpha_r): # Knowledge Enchied Inference Composition hx = None cx = None xs_f = [] for i, x in enumerate(y_local): x = F.dropout(x, ratio=self.dropout) xs_f.append(x) _, _, y_hidden = model(hx, cx, xs_f) y_hidden = F.stack(y_hidden) # pooling batchsize, maxlen, embedsize = y_hidden.shape y_mean = F.average_pooling_nd(F.swapaxes(y_hidden, axis1=1, axis2=2), ksize=maxlen).reshape( batchsize, embedsize) y_max = F.max_pooling_nd(F.swapaxes(y_hidden, axis1=1, axis2=2), ksize=maxlen).reshape(batchsize, embedsize) weight = F.softmax(F.relu( self.keic_feedforward(alpha_r.reshape(batchsize * maxlen, -1))).reshape( batchsize, maxlen, -1), axis=1) y_weight = F.sum(F.broadcast_to(weight, y_hidden.shape) * y_hidden, axis=1) y_pooling = F.concat((y_mean, y_max, y_weight), axis=1) return y_pooling
def intermidiate_feature_map(self, x): h = self.conv1a(x) h = F.max_pooling_nd(h, ksize=(1, 2, 2), stride=(1, 2, 2)) h = self.conv2a(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(1, 2, 2)) h = self.conv3a(h) h = self.conv3b(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(1, 2, 2)) h = self.conv4a(h) h = self.conv4b(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.conv5a(h) h = self.conv5b(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) return h
def check_backward_consistency_regression(self, x_data, gy_data, use_cudnn='always'): # Regression test to two-dimensional max pooling layer. if len(self.dims) != 2: return ksize = self.ksize stride = self.stride pad = self.pad xp = cuda.get_array_module(x_data) # Backward computation for N-dimensional max pooling layer. x_nd = chainer.Variable(xp.array(x_data)) with chainer.using_config('use_cudnn', use_cudnn): y_nd = functions.max_pooling_nd( x_nd, ksize, stride=stride, pad=pad, cover_all=self.cover_all) y_nd.grad = gy_data y_nd.backward() # Backward computation for two-dimensional max pooling layer. x_2d = chainer.Variable(xp.array(x_data)) with chainer.using_config('use_cudnn', use_cudnn): y_2d = functions.max_pooling_2d( x_2d, ksize, stride=stride, pad=pad, cover_all=self.cover_all) y_2d.grad = gy_data y_2d.backward() # Test that the two result gradients are close enough. testing.assert_allclose(x_nd.grad, x_2d.grad)
def f(x): y = functions.max_pooling_nd(x, self.ksize, stride=self.stride, pad=self.pad, cover_all=self.cover_all) return y * y
def forward(self): x = chainer.Variable(self.x) return functions.max_pooling_nd(x, self.ksize, self.stride, self.pad, cover_all=False)
def _check(self, x): out, indices = functions.max_pooling_nd(x, 2, cover_all=False, return_indices=True) assert isinstance(out, chainer.Variable) assert isinstance(out.array, type(x)) assert isinstance(indices, type(x)) assert indices.shape == out.array.shape # Calculate expected indices. expect = numpy.zeros(indices.shape, dtype=indices.dtype) for i in six.moves.range(2): for c in six.moves.range(3): xx = x[i, c] expect[i, c] = numpy.array([ [ xx[0:2, 0:2].ravel().argmax(), xx[0:2, 2:4].ravel().argmax() ], [ xx[2:4, 0:2].ravel().argmax(), xx[2:4, 2:4].ravel().argmax() ], ]) if out.xp is cuda.cupy: expect = cuda.to_gpu(expect) assert (expect == indices).all()
def __call__(self, x): h = self.embed(x) h = h.transpose(0, 2, 1) h = self.conv1(h) h = self.cb2(h) h = F.max_pooling_nd(h, 3, 2, 1, cover_all=False) h = self.cb3(h) h = F.max_pooling_nd(h, 3, 2, 1, cover_all=False) h = self.cb4(h) h = F.max_pooling_nd(h, 3, 2, 1, cover_all=False) h = self.cb5(h) h = k_max_pooling_1d(h, 8) h = F.relu(self.fc6(h)) h = F.relu(self.fc7(h)) h = self.fc8(h) return h
def network(self, x): h = self.conv1_1(x) h = F.max_pooling_nd(h, ksize=(1, 2, 2), stride=(1, 2, 2)) h = self.conv2_1(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.conv3_1(h) h = self.conv3_2(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.conv4_1(h) h = self.conv4_2(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.conv5_1(h) h = self.conv5_2(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.l6(h) y = self.l7(h) return y
def __call__(self, x, use_cudnn=False): test = not self.train e0 = F.relu(self.bnc0(self.c0(x), test=test), use_cudnn) syn0 = F.relu(self.bnc1(self.c1(e0), test=test), use_cudnn) del e0 e1 = F.max_pooling_nd(syn0, 2, 2) e2 = F.relu(self.bnc2(self.c2(e1), test=test), use_cudnn) syn1 = F.relu(self.bnc3(self.c3(e2), test=test), use_cudnn) del e1, e2 e3 = F.max_pooling_nd(syn1, 2, 2) e4 = F.relu(self.bnc4(self.c4(e3), test=test), use_cudnn) syn2 = F.relu(self.bnc5(self.c5(e4), test=test), use_cudnn) del e3, e4 e5 = F.max_pooling_nd(syn2, 2, 2) e6 = F.relu(self.bnc6(self.c6(e5), test=test), use_cudnn) e7 = F.relu(self.bnc7(self.c7(e6), test=test), use_cudnn) del e5, e6 # e8 = F.relu(self.bnc8(self.c8(e7), test=test), use_cudnn) d9 = F.concat([self.dc9(e7), syn2]) del e7, syn2 d8 = F.relu(self.bnd8(self.dc8(d9), test=test), use_cudnn) d7 = F.relu(self.bnd7(self.dc7(d8), test=test), use_cudnn) del d9, d8 d6 = F.concat([self.dc6(d7), syn1]) del d7, syn1 d5 = F.relu(self.bnd5(self.dc5(d6), test=test), use_cudnn) d4 = F.relu(self.bnd4(self.dc4(d5), test=test), use_cudnn) del d6, d5 d3 = F.concat([self.dc3(d4), syn0]) del d4, syn0 d2 = F.relu(self.bnd2(self.dc2(d3), test=test), use_cudnn) d1 = F.relu(self.bnd1(self.dc1(d2), test=test), use_cudnn) del d3, d2 d0 = self.dc0(d1) return d0
def forward(self, x): h1 = F.relu(self.bn1(self.conv1(x))) h1 = F.max_pooling_nd(h1, 8, 2, 3) h2 = self.res2(h1) h3 = self.res3(h2) h4 = self.res4(h3) h5 = self.res5(h4) y = self.fc6(h5) return y
def forward(self, inputs, device): ksize = self.ksize stride = self.stride pad = self.pad cover_all = self.cover_all x, = inputs y = functions.max_pooling_nd( x, ksize, stride=stride, pad=pad, cover_all=cover_all) return y,
def _calc(self, x): e0 = F.relu(self.bnc0(self.c0(x))) syn0 = F.relu(self.bnc1(self.c1(e0))) del e0 e1 = F.max_pooling_nd(syn0, self.pool_size, self.pool_size) e2 = F.relu(self.bnc2(self.c2(e1))) syn1 = F.relu(self.bnc3(self.c3(e2))) del e1, e2 e3 = F.max_pooling_nd(syn1, self.pool_size, self.pool_size) e4 = F.relu(self.bnc4(self.c4(e3))) syn2 = F.relu(self.bnc5(self.c5(e4))) del e3, e4 e5 = F.max_pooling_nd(syn2, self.pool_size, self.pool_size) e6 = F.relu(self.bnc6(self.c6(e5))) syn3 = F.relu(self.bnc7(self.c7(e6))) del e5, e6 e7 = F.max_pooling_nd(syn3, self.pool_size, self.pool_size) e8 = F.relu(self.bnc8(self.c8(e7))) e9 = F.relu(self.bnc9(self.c9(e8))) del e7, e8 d0 = F.concat([self.dc0(e9), syn3]) del e9, syn3 d1 = F.relu(self.bndc1(self.dc1(d0))) d2 = F.relu(self.bndc2(self.dc2(d1))) del d0, d1 d3 = F.concat([self.dc3(d2), syn2]) del d2, syn2 d4 = F.relu(self.bndc4(self.dc4(d3))) d5 = F.relu(self.bndc5(self.dc5(d4))) del d3, d4 d6 = F.concat([self.dc6(d5), syn1]) del d5, syn1 d7 = F.relu(self.bndc7(self.dc7(d6))) d8 = F.relu(self.bndc8(self.dc8(d7))) del d6, d7 d9 = F.concat([self.dc9(d8), syn0]) del d8, syn0 d10 = F.relu(self.bndc10(self.dc10(d9))) d11 = F.relu(self.bndc11(self.dc11(d10))) del d9, d10 d12 = self.dc12(d11) del d11 return d12
def __call__(self, x): h = self.embed(x) h = h.transpose(0, 2, 1) h = self.conv1(h) h = self.cb2(h) h = F.max_pooling_nd(h, 3, 2, 1, cover_all=False) h = self.cb3(h) h = F.max_pooling_nd(h, 3, 2, 1, cover_all=False) h = self.cb4(h) h = F.max_pooling_nd(h, 3, 2, 1, cover_all=False) h = self.cb5(h) h = k_max_pooling_1d(h, 8) h = F.relu(self.fc6(h)) h = F.relu(self.fc7(h)) o1 = self.fc8_1(h) o2 = self.fc8_2(h) o3 = self.fc8_3(h) out = F.concat([o1, o2, o3], axis=1) return out
def __call__(self, x): # 64 channel blocks: h = self.block1_1(x) h = F.dropout(h, ratio=0.3) h = self.block1_2(h) h = F.max_pooling_nd(h, ksize=2, stride=2) # 128 channel blocks: h = self.block2_1(h) h = F.dropout(h, ratio=0.4) h = self.block2_2(h) h = F.max_pooling_nd(h, ksize=2, stride=2) # 256 channel blocks: h = self.block3_1(h) h = F.dropout(h, ratio=0.4) h = self.block3_2(h) h = F.dropout(h, ratio=0.4) h = self.block3_3(h) h = F.max_pooling_nd(h, ksize=2, stride=2) # 512 channel blocks: h = self.block4_1(h) h = F.dropout(h, ratio=0.4) h = self.block4_2(h) h = F.dropout(h, ratio=0.4) h = self.block4_3(h) h = F.max_pooling_nd(h, ksize=2, stride=2) # 512 channel blocks: h = self.block5_1(h) h = F.dropout(h, ratio=0.4) h = self.block5_2(h) h = F.dropout(h, ratio=0.4) h = self.block5_3(h) h = F.max_pooling_nd(h, ksize=2, stride=2) h = F.dropout(h, ratio=0.5) h = self.fc1(h) h = self.bn_fc1(h) h = F.relu(h) h = F.dropout(h, ratio=0.5) return self.fc2(h)
def pre(self, x): h = self.conv1(x) h = F.max_pooling_nd(h, 3, stride=2) h = self.conv2(h) h = F.max_pooling_nd(h, 3, stride=2) h = self.conv3(h) h = F.max_pooling_nd(h, 3, stride=2) h1 = F.relu(self.l_pos1(h)) h1 = F.relu(self.l_pos2(h1)) h1 = F.relu(self.l_pos3(h1)) h1 = F.sigmoid(self.l_pos(h1)) h2 = F.relu(self.l_ori1(h)) h2 = F.relu(self.l_ori2(h2)) h2 = F.relu(self.l_ori3(h2)) h2 = F.tanh(self.l_ori(h2)) y = F.concat((h1, h2), axis=1) return y
def __call__(self, x): h = self.conv1a(x) h = F.max_pooling_nd(h, ksize=(1, 2, 2), stride=(1, 2, 2)) h = self.conv2a(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.conv3a(h) h = self.conv3b(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.conv4a(h) h = self.conv4b(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.conv5a(h) h = self.conv5b(h) h = F.max_pooling_nd(h, ksize=(2, 2, 2), stride=(2, 2, 2)) h = self.fc6(h) h = F.relu(h) h = F.dropout(h, ratio=0.5) h = self.fc7(h) h = F.relu(h) h = F.dropout(h, ratio=0.5) h = self.fc8(h) return h
def __call__(self, state): ''' state: state vector Q値の範囲が報酬体系によって負の値をとる場合、F.reluは負の値をとれないので、学習に適さない。 活性化関数は、負の値も取ることが可能なものを選択する必要がある。 例えば、F.leaky_relu等。勾配消失問題を考えると、これが良い感じ。 return: type: Variable of Chainer Q values of all actions ''' state32 = state.astype(np.float32) nrow, ncol = state32.shape #print('nrow, ncol =', nrow, ncol) twn_status = chainer.Variable( state32[:, 0:self.n_size_twn_status].astype(np.float32)) x_ray = state32[:, self.n_size_twn_status:self.n_size_twn_status + self.num_ray] eb_status = chainer.Variable( state32[:, self.n_size_twn_status + self.num_ray:self.n_size_twn_status + self.num_ray + self.n_size_eb_status].astype(np.float32)) x = x_ray.reshape(nrow, self.in_channel_1st, self.num_ray) h1 = F.max_pooling_nd( F.leaky_relu(self.conv1(x)), self.pooling_size_1st) # 最大値プーリングは2×2,活性化関数はReLU h2 = F.max_pooling_nd(F.leaky_relu(self.conv2(h1)), self.pooling_size_2nd) h3 = self.l3(h2) h3_c = F.concat((twn_status, h3, eb_status), axis=1) h4 = F.leaky_relu(self.l4(h3_c)) h7 = self.ml5(h4) self.debug_info = (h7, h3, h1, h2, h3_c, h4) return h7
def check_forward_consistency_regression(self, x_data, use_cudnn='always'): # Regression test to max_pooling_2d. if len(self.dims) != 2: return ksize = self.ksize stride = self.stride pad = self.pad with chainer.using_config('use_cudnn', use_cudnn): y_nd = functions.max_pooling_nd(self.x, ksize, stride=stride, pad=pad, cover_all=self.cover_all) y_2d = functions.max_pooling_2d(self.x, ksize, stride=stride, pad=pad, cover_all=self.cover_all) testing.assert_allclose(y_nd.data, y_2d.data)
def check_forward_consistency_regression(self, x_data, use_cudnn=True): # Regression test to max_pooling_2d. if len(self.dims) != 2: return ksize = self.ksize stride = self.stride pad = self.pad y_nd = functions.max_pooling_nd(x_data, ksize, stride=stride, pad=pad, use_cudnn=use_cudnn, cover_all=self.cover_all) y_2d = functions.max_pooling_2d(x_data, ksize, stride=stride, pad=pad, use_cudnn=use_cudnn, cover_all=self.cover_all) testing.assert_allclose(y_nd.data, y_2d.data)
def check_forward(self, x_data, use_cudnn=True): dims = self.dims ksize = self.ksize stride = self.stride pad = self.pad x = chainer.Variable(x_data) y = functions.max_pooling_nd(x, ksize, stride=stride, pad=pad, cover_all=self.cover_all, use_cudnn=use_cudnn) self.assertEqual(y.data.dtype, self.dtype) y_data = cuda.to_cpu(y.data) self.assertEqual(self.gy.shape, y_data.shape) patches = pooling_nd_helper.pooling_patches( dims, ksize, stride, pad, self.cover_all) for k in six.moves.range(2): for c in six.moves.range(3): x = self.x[k, c] expect = numpy.array([x[idx].max() for idx in patches]) expect = expect.reshape(y_data.shape[2:]) testing.assert_allclose(expect, y_data[k, c])
def _check(self, x): out, indices = functions.max_pooling_nd( x, 2, cover_all=False, return_indices=True) assert isinstance(out, chainer.Variable) assert isinstance(out.array, type(x)) assert isinstance(indices, type(x)) assert indices.shape == out.array.shape # Calculate expected indices. expect = numpy.zeros(indices.shape, dtype=indices.dtype) for i in six.moves.range(2): for c in six.moves.range(3): xx = x[i, c] expect[i, c] = numpy.array([ [xx[0:2, 0:2].ravel().argmax(), xx[0:2, 2:4].ravel().argmax()], [xx[2:4, 0:2].ravel().argmax(), xx[2:4, 2:4].ravel().argmax()], ]) if out.xp is not numpy: expect = cuda.to_gpu(expect) assert (expect == indices).all()
def forward(self): x = chainer.Variable(self.x) return functions.max_pooling_nd( x, self.ksize, self.stride, self.pad, cover_all=False, use_cudnn=self.use_cudnn)
def f(x): y = functions.max_pooling_nd( x, self.ksize, stride=self.stride, pad=self.pad, cover_all=self.cover_all) return y * y
def test_max_pooling_3d(self): (x, ksize) = self._get_data(3) testing.assert_allclose( functions.max_pooling_nd(x, ksize).data, functions.max_pooling_3d(x, ksize).data)
def __call__(self, x): return functions.max_pooling_nd(x, self.ksize, self.stride, self.pad)