def check_ambiguous_poolings(self): with self.assertRaises(ValueError): functions.spatial_pyramid_pooling_2d(self.v, 3) with testing.assert_warns(DeprecationWarning), \ self.assertRaises(ValueError): functions.spatial_pyramid_pooling_2d( self.v, 3, pooling_class=functions.MaxPooling2D, pooling='max')
def check_invalid_poolings(self): with self.assertRaises(ValueError): functions.spatial_pyramid_pooling_2d(self.v, 3, pooling='avg') with testing.assert_warns(DeprecationWarning), \ self.assertRaises(ValueError): functions.spatial_pyramid_pooling_2d( self.v, 3, pooling_class=functions.AveragePooling2D)
def func(self, x): if hasattr(self, 'pooling'): y = functions.spatial_pyramid_pooling_2d( x, self.pyramid_height, pooling=self.pooling) elif hasattr(self, 'pooling_class'): with testing.assert_warns(DeprecationWarning): y = functions.spatial_pyramid_pooling_2d( x, self.pyramid_height, self.pooling_class) else: assert False return y
def calc_loss(self, x, t, layer,train=True): self.clear() if(layer ==0) : h = F.max_pooling_2d(F.relu(model.conv1(x)),4) h = self.norm2(h,test= not train) h = F.relu(model.l1(h)) h = F.relu(model.l2(h)) elif layer ==1 : h = F.spatial_pyramid_pooling_2d(F.relu(model.conv1(x)),4,F.MaxPooling2D) h = self.norm5(h,test= not train) h = F.relu(model.l4(h)) h = F.relu(model.l2(h)) elif layer ==2 : h = F.average_pooling_2d(F.relu(model.conv1(x)),4) h = self.norm2(h,test= not train) h = F.relu(model.l1(h)) h = F.relu(model.l2(h)) elif layer ==3 : h = F.max_pooling_2d(F.relu(model.conv1(x)),2) h = self.norm2(h,test= not train) h = F.max_pooling_2d(F.relu(model.conv2(h)),2) h = self.norm1(h,test= not train) h = F.relu(model.l3(h)) h = F.relu(model.l2(h)) elif layer ==4 : h = F.max_pooling_2d(F.relu(model.conv4(x)),2) h = self.norm3(h,test= not train) h = F.spatial_pyramid_pooling_2d(F.relu(model.conv3(h)),2,F.MaxPooling2D) h = self.norm6(h,test= not train) h = F.relu(model.l5(h)) h = F.relu(model.l2(h)) elif layer ==5 : h = F.average_pooling_2d(F.relu(model.conv1(x)),2) h = self.norm2(h,test= not train) h = F.average_pooling_2d(F.relu(model.conv2(h)),2) h = self.norm1(h,test= not train) h = F.relu(model.l3(h)) h = F.relu(model.l2(h)) ''' print (h.data.shape) #t=t.T.data print (t.data) print (h.data.shape) #print (h.data.T) print (t.data.shape) ''' loss = F.mean_squared_error(h, t) return loss
def __call__(self, x): h = x h = self.__dict__["P1_1"](F.leaky_relu(h)) h = self.__dict__["BN1_1"](h) h = self.__dict__["P1_2"](F.leaky_relu(h)) h = self.__dict__["BN1_2"](h) h = F.max_pooling_2d(F.leaky_relu(h), ksize=3, stride=2, cover_all=False) h = self.__dict__["P2_1"](h) h = self.__dict__["BN2_1"](h) h = self.__dict__["P2_2"](F.leaky_relu(h)) h = self.__dict__["BN2_2"](h) h = self.__dict__["P2_2"](F.leaky_relu(h)) h = self.__dict__["BN2_3"](h) h = F.max_pooling_2d(F.leaky_relu(h), ksize=3, stride=2, cover_all=False) h = self.__dict__["P3_1"](h) h = self.__dict__["BN3_1"](h) h = self.__dict__["P3_2"](F.leaky_relu(h)) h = self.__dict__["BN3_2"](h) h = self.__dict__["P3_3"](F.leaky_relu(h)) h = self.__dict__["BN3_3"](h) #h = F.average_pooling_2d(F.leaky_relu(h), ksize=6) h = F.spatial_pyramid_pooling_2d(F.leaky_relu(h), 3, F.MaxPooling2D) h = self.__dict__["BNL0"](h) h = self.__dict__["L1"](F.leaky_relu(h)) h = self.__dict__["BNL1"](h) h = self.__dict__["L2"](F.leaky_relu(h)) y = h #h = F.spatial_pyramid_pooling_2d(F.leaky_relu(h), 3) #y = F.reshape(h,(len(h.data),self.F_unit)) return y
def check_forward(self, x_data, use_cudnn=True): x = chainer.Variable(x_data) y = functions.spatial_pyramid_pooling_2d(x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn) self.assertEqual(y.data.dtype, self.dtype) y_data = cuda.to_cpu(y.data) self.assertEqual(self.gy.shape, y_data.shape)
def check_forward_ones(self, x_data, use_cudnn=True): x = chainer.Variable(x_data) y = functions.spatial_pyramid_pooling_2d(x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn) y_data = cuda.to_cpu(y.data) self.assertEqual((self.n, self.output_dim, 1, 1), y_data.shape) gradient_check.assert_allclose(y_data, numpy.ones_like(y_data))
def __call__(self, x): h = F.max_pooling_2d(F.relu(self.conv1(x)), 3, stride=2) h = F.max_pooling_2d(F.relu(self.conv2(h)), 3, stride=2) h = F.relu(self.conv3(h)) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.relu(self.fc4(h)) h = self.fc5(h) return h
def feat(self, h): h = F.convolution_2d(h, self.W1) * F.convolution_2d(h, self.W2) h = F.spatial_pyramid_pooling_2d(h, self.p, F.MaxPooling2D) if self.l2normalize: h = F.reshape(h, (h.data.shape[0], -1)) h = ssq(h) h = F.normalize(h) return h
def check_forward_ones(self, x_data, use_cudnn=True): x = chainer.Variable(x_data) y = functions.spatial_pyramid_pooling_2d( x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn) y_data = cuda.to_cpu(y.data) self.assertEqual((self.n, self.output_dim, 1, 1), y_data.shape) gradient_check.assert_allclose(y_data, numpy.ones_like(y_data))
def __call__(self, x): h = F.max_pooling_2d(F.relu(self.conv1(x)), 3, stride=2) h = F.max_pooling_2d(F.relu(self.conv2(h)), 3, stride=2) h = F.relu(self.conv3(h)) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.dropout(F.relu(self.fc4(h)), ratio=0.5) h = self.fc5(h) return h
def check_forward(self, x_data, use_cudnn=True): x = chainer.Variable(x_data) y = functions.spatial_pyramid_pooling_2d( x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn) self.assertEqual(y.data.dtype, numpy.float32) y_data = cuda.to_cpu(y.data) self.assertEqual(self.gy.shape, y_data.shape)
def __call__(self, x, turn, train=True): h = F.max_pooling_2d(self.act(self.conv1(x)), 3, stride=2) h = F.max_pooling_2d(self.act(self.conv2(h)), 3, stride=2) h = self.act(self.conv3(h)) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = self.act(self.fc4(h)) h = self.fc5(h) h = self.bn_list[turn](h, test=not train) return h
def check_forward(self, x_data, use_cudnn='always'): x = chainer.Variable(x_data) with chainer.using_config('use_cudnn', use_cudnn): y = functions.spatial_pyramid_pooling_2d( x, self.pyramid_height, self.pooling_class) self.assertEqual(y.data.dtype, self.dtype) y_data = cuda.to_cpu(y.data) self.assertEqual(self.gy.shape, y_data.shape)
def check_forward_ones(self, x_data, use_cudnn='always'): x = chainer.Variable(x_data) with chainer.using_config('use_cudnn', use_cudnn): y = functions.spatial_pyramid_pooling_2d( x, self.pyramid_height, self.pooling_class) y_data = cuda.to_cpu(y.data) self.assertEqual(y_data.shape, (self.n, self.output_dim, 1, 1)) self.assertEqual(y_data.dtype, self.dtype) testing.assert_allclose(y_data, numpy.ones_like(y_data))
def check_backward(self, x_data, y_grad, use_cudnn=True): x = chainer.Variable(x_data) y = functions.spatial_pyramid_pooling_2d(x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn) y.grad = y_grad y.backward() func = y.creator f = lambda: func.forward((x.data,)) gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,)) gradient_check.assert_allclose(cuda.to_cpu(gx), cuda.to_cpu(x.grad), atol=1e-04)
def __call__(self, x): h = x.reshape(tuple((1, )) + x.shape) h = self.conv0(h) h = F.leaky_relu(h) for i in range(1, self.n): h = self[f"conv{i}"](h) h = F.spatial_pyramid_pooling_2d(h, 5) h = self.l0(h) h = F.leaky_relu(h) h = self.l1(h) return h
def check_backward(self, x_data, y_grad, use_cudnn=True): x = chainer.Variable(x_data) y = functions.spatial_pyramid_pooling_2d( x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn) y.grad = y_grad y.backward() func = y.creator f = lambda: func.forward((x.data,)) gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,)) gradient_check.assert_allclose(cuda.to_cpu(gx), cuda.to_cpu(x.grad))
def __call__(self, x): h = x for iL in range(self.NPLayers): h = self.__dict__["P%d"%iL](h) if iL==0: h = F.local_response_normalization(h) h = F.max_pooling_2d(F.elu(h), ksize=self.NKsize[iL+1], cover_all=True) h = F.spatial_pyramid_pooling_2d(F.elu(h), 3, F.MaxPooling2D) h = F.dropout(F.elu(self.L1(h)),ratio=self.L1_dropout,train=self.IsTrain) h = F.elu(self.L2(h)) y = h return y
def forward(self, x,layer, train=True): self.clear() if(layer ==0) : h = F.max_pooling_2d(F.relu(model.conv1(x)),4) h = self.norm2(h,test= not train) h = F.relu(model.l1(h)) h = F.relu(model.l2(h)) elif layer ==1 : h = F.spatial_pyramid_pooling_2d(F.relu(model.conv1(x)),4,F.MaxPooling2D) h = self.norm5(h,test= not train) h = F.relu(model.l4(h)) h = F.relu(model.l2(h)) elif layer ==2 : h = F.average_pooling_2d(F.relu(model.conv1(x)),4) h = self.norm2(h,test= not train) h = F.relu(model.l1(h)) h = F.relu(model.l2(h)) elif layer ==3 : h = F.max_pooling_2d(F.relu(model.conv1(x)),2) h = self.norm2(h,test= not train) h = F.max_pooling_2d(F.relu(model.conv2(h)),2) h = self.norm1(h,test= not train) h = F.relu(model.l3(h)) h = F.relu(model.l2(h)) elif layer ==4 : h = F.max_pooling_2d(F.relu(model.conv4(x)),2) h = self.norm3(h,test= not train) h = F.spatial_pyramid_pooling_2d(F.relu(model.conv3(h)),2,F.MaxPooling2D) h = self.norm6(h,test= not train) h = F.relu(model.l5(h)) h = F.relu(model.l2(h)) elif layer ==5 : h = F.average_pooling_2d(F.relu(model.conv1(x)),2) h = self.norm2(h,test= not train) h = F.average_pooling_2d(F.relu(model.conv2(h)),2) h = self.norm1(h,test= not train) h = F.relu(model.l3(h)) h = F.relu(model.l2(h)) return h
def forward(self, x_data, y_data, train = True,gpu=-1): if gpu >= 0: x_data = cuda.to_gpu(x_data) y_data = cuda.to_gpu(y_data) x = Variable(x_data) t = Variable(y_data) h = F.max_pooling_2d(F.sigmoid(self.conv1(x)), 3, stride=2) h = F.max_pooling_2d(F.relu(self.conv2(h)), 3, stride=2) h = F.relu(self.conv3(h)) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.dropout(F.relu(self.fc4(h)), ratio=0.4, train=train) y = self.fc5(h) return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
def __call__(self, x, t): h = F.max_pooling_2d(F.relu(self.conv1(x)), 3, stride=2) h = F.max_pooling_2d(F.relu(self.conv2(h)), 3, stride=2) h = F.relu(self.conv3(h)) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.dropout(F.relu(self.fc4(h)), ratio=0.5, train=self.train) y = self.fc5(h) if self.train: self.loss = F.softmax_cross_entropy(y, t) self.acc = F.accuracy(y, t) return self.loss else: self.pred = F.softmax(y) return self.pred
def __call__(self, x): h = F.elu(self.conv1(x)) h = F.max_pooling_2d(h, 3, stride=2) h = self.res2(h, self.train) h = self.res3(h, self.train) h = self.res4(h, self.train) h = self.res5(h, self.train) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.elu(self.conv2(h)) h = F.dropout(h, ratio=0.5) h = self.conv3(h) h = F.reshape(h, (-1, self.num_class)) return h
def __call__(self, x, t): h = F.max_pooling_2d(F.relu(self.conv1(x)), 3, stride=2) h = F.max_pooling_2d(F.relu(self.conv2(h)), 3, stride=2) h = F.relu(self.conv3(h)) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.dropout(F.relu(self.fc4(h)), ratio=0.5, train=self.train) h = self.fc5(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) if self.train: return self.loss else: self.pred = F.softmax(h) return self.pred
def __call__(self, x): h = x for iL in range(self.NPLayers): h = self.__dict__["P%d" % iL](h) if iL == 0: h = F.local_response_normalization(h) h = F.max_pooling_2d(F.elu(h), ksize=self.NKsize[iL + 1], cover_all=True) h = F.spatial_pyramid_pooling_2d(F.elu(h), 3, F.MaxPooling2D) h = F.dropout(F.elu(self.L1(h)), ratio=self.L1_dropout, train=self.IsTrain) h = F.elu(self.L2(h)) y = h return y
def __call__(self, x, t=None): self.clear() x.volatile = not self.train t.volatile = 'AUTO' h = F.elu(self.conv1(x)) h = F.max_pooling_2d(h, 3, stride=2) h = self.fire2(h) h = self.fire3(h) h = self.fire4(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.fire5(h) h = self.fire6(h) h = self.fire7(h) h = self.fire8(h) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.elu(self.conv9(h)) memory_h = chainer.Variable(h.data, volatile='AUTO') with chainer.no_backprop_mode(): weight, self.memory = \ self.apply_memory(memory_h, t, self.update_weight, self.train) if self.train: self.apply_memory.memory.data = self.memory.data h = F.dropout(h, ratio=0.5, train=self.train) h = self.conv_infer(h) h = F.reshape(h, (-1, self.n_class)) h = h*weight self.h = h self.prob = F.softmax(h) if self.active_learn: t = mask_gt_for_active_learning(self.prob, t, self.xp, self.n_class) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) chainer.report({'loss': self.loss, 'accuracy': self.accuracy}, self) return self.loss
def __call__(self, x, t): x.volatile = True h = F.max_pooling_2d(F.elu(self.conv1(x)), 3, stride=2) h = F.max_pooling_2d(F.elu(self.conv2(h)), 3, stride=2) h = F.elu(self.conv3(h)) h.volatile = False h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.dropout(F.elu(self.fc4(h)), ratio=0.5, train=self.train) h = self.fc5(h) self.prob = F.softmax(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) chainer.report({'loss': self.loss, 'accuracy': self.accuracy}, self) return self.loss
def __call__(self, x, t): h_lists = [] self.loss = 0 for i in range(self.iter_time): h_list_p2 = h_lists[i-2] if i>0 and i%2==0 else None h_list = self.rec(x, h=h_list_p2) h_lists.append(h_list) h = F.spatial_pyramid_pooling_2d(h_list[-1], 3, F.MaxPooling2D) h = F.elu(h) h = self.conv_infer(h) h = F.reshape(h, (-1, self.n_class)) self.loss += F.softmax_cross_entropy(h, t) # x = F.elu(h_list[-1]) self.h = h return h_list[-1]
def __call__(self, x): h = self.conv1(x) F.max_pooling_2d(h, 2) h = self.block1_1(h, 0.2) h = self.block1_2(h, 0.2) h = self.block1_3(h, 0.2) h = self.block2_1(h, 0.3) h = self.block2_2(h, 0.3) h = self.block2_3(h, 0.3) h = self.block3_1(h, 0.4) h = self.block3_2(h, 0.4) h = self.block3_3(h, 0.4) # h = F.spatial_pyramid_pooling_2d(h, 3, F.average_pooling_2d) h = F.spatial_pyramid_pooling_2d(h, 4, F.MaxPooling2D) h = self.l1(h) return F.tanh(self.l2(h))
def __call__(self, x): h = F.elu(self.conv1(x)) h = F.max_pooling_2d(h, 3, stride=2) h = self.fire2(h) h = self.fire3(h) h = self.fire4(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.fire5(h) h = self.fire6(h) h = self.fire7(h) self.sorce_hidden_state = self.fire8(h) # H in the paper. h = F.spatial_pyramid_pooling_2d( \ self.sorce_hidden_state, 3, F.MaxPooling2D) h = F.elu(self.conv9(h)) h = F.dropout(h, ratio=0.5, train=self.train) self.context_vec = self.conv10(h)
def __call__(self, x): h = F.elu(self.conv1(x)) h = F.max_pooling_2d(h, 3, stride=2) h = self.fire2(h) h = self.fire3(h) h = self.fire4(h) # h = F.max_pooling_2d(h, 3, stride=2) # # h = self.fire5(h) # h = self.fire6(h) # h = self.fire7(h) # h = self.fire8(h) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.elu(self.conv9(h)) query = F.reshape(h, h.data.shape[:2]) return query
def f(x): y = functions.spatial_pyramid_pooling_2d( x, self.pyramid_height, self.pooling_class) return y * y
def forward(self): x = chainer.Variable(self.x) return functions.spatial_pyramid_pooling_2d( x, 3, functions.MaxPooling2D)
def check_invalid_dtype(self): functions.spatial_pyramid_pooling_2d( self.v, 3, functions.MaxPooling2D)
def check_invalid_dtype(self): functions.spatial_pyramid_pooling_2d( self.v, 3, pooling='max')
def __call__(self, x): h1 = F.spatial_pyramid_pooling_2d(F.relu(self.conv1(x)), 2, F.MaxPooling2D) h2 = F.dropout(F.relu(self.l1(h1))) y = self.l2(h2) return y
def check_ambiguous_poolings(self): with self.assertRaises(ValueError): functions.spatial_pyramid_pooling_2d(self.v, 3)
def check_invalid_poolings(self): with self.assertRaises(ValueError): functions.spatial_pyramid_pooling_2d(self.v, 3, pooling='avg')
def func(self, x): return functions.spatial_pyramid_pooling_2d( x, self.pyramid_height, pooling=self.pooling)
def check_invalid_dtype(self): functions.spatial_pyramid_pooling_2d(self.v, 3, pooling='max')
def __call__(self, x): """ Forward propagation Args: x (a Variable of feature array): a feature array Returns: h (a Variable of hidden state array): a hidden state array """ dropout_ratio = 0.2 if self.model_arch == 'MP-C-SPP-FC-LSTM': # 69.565 z = F.max_pooling_2d(x, 2) # ksize=2, stride=2 z = F.tanh(self.input_conv(z)) # 512, ksize=3, stride=1. pad=1 #z = F.spatial_pyramid_pooling_2d(z, 3, pooling_class=F.MaxPooling2D) z = F.spatial_pyramid_pooling_2d(z, 3, pooling="max") z = F.tanh(self.input(z)) elif self.model_arch == 'MP-C-SPP-FC-DO-LSTM': # 68.944 z = F.max_pooling_2d(x, 2) z = F.tanh(self.input_conv(z)) #z = F.spatial_pyramid_pooling_2d(z, 3, pooling_class=F.MaxPooling2D) z = F.spatial_pyramid_pooling_2d(z, 3, pooling="max") z = F.tanh(self.input(z)) z = F.dropout(z, ratio=dropout_ratio) # 12182019 elif self.model_arch == 'MP-C-SPP4-FC-LSTM': # revert stride=2 as previously doesn't work - 12192019 z = F.max_pooling_2d(x, 2) # ksize=2, stride=2 z = F.tanh(self.input_conv(z)) # 512, ksize=3, stride=1. pad=1 z = F.spatial_pyramid_pooling_2d(z, 4, pooling="max") z = F.tanh(self.input(z)) elif self.model_arch == 'MP-C-SPP4-FC-DO-LSTM': # 68.944 z = F.max_pooling_2d(x, 2) # ksize=2, stride=2 z = F.tanh(self.input_conv(z)) z = F.spatial_pyramid_pooling_2d(z, 4, pooling="max") #pyramid_height=4 z = F.tanh(self.input(z)) z = F.dropout(z, ratio=dropout_ratio) elif self.model_arch == 'MP-C-SPP4-RL-LSTM': z = F.max_pooling_2d(x, 2) # ksize=2, stride=2 z = F.tanh(self.input_conv(z)) z = F.spatial_pyramid_pooling_2d(z, 4, pooling="max") #pyramid_height=4 z = F.relu(self.input(z)) elif self.model_arch == 'MP-C-SPP4-RL-DO-LSTM': z = F.max_pooling_2d(x, 2) # ksize=2, stride=2 z = F.tanh(self.input_conv(z)) z = F.spatial_pyramid_pooling_2d(z, 4, pooling="max") #pyramid_height=4 z = F.relu(self.input(z)) z = F.dropout(z, ratio=dropout_ratio) elif self.model_arch == 'MP-C-SPP4-RL2-LSTM': z = F.max_pooling_2d(x, 2) # ksize=2, stride=2 z = F.relu(self.input_conv(z)) z = F.spatial_pyramid_pooling_2d(z, 4, pooling="max") #pyramid_height=4 z = F.relu(self.input(z)) elif self.model_arch == 'MP-C-SPP4-RL2-DO-LSTM': z = F.max_pooling_2d(x, 2) # ksize=2, stride=2 z = F.relu(self.input_conv(z)) z = F.spatial_pyramid_pooling_2d(z, 4, pooling="max") #pyramid_height=4 z = F.relu(self.input(z)) z = F.dropout(z, ratio=dropout_ratio) # end 12182019 # 12242019 elif self.model_arch == 'MP-C-RL-SPP4-LSTM': z = F.max_pooling_2d(x, 2) # ksize=2, stride=2 z = F.relu(self.input_conv(z)) z = F.spatial_pyramid_pooling_2d(z, 4, pooling="max") #pyramid_height=4 z = F.tanh(self.input(z)) elif self.model_arch == 'MP-C-RL-SPP4-DO-LSTM': z = F.max_pooling_2d(x, 2) # ksize=2, stride=2 z = F.relu(self.input_conv(z)) z = F.spatial_pyramid_pooling_2d(z, 4, pooling="max") #pyramid_height=4 z = F.tanh(self.input(z)) z = F.dropout(z, ratio=dropout_ratio) # end 12242019 elif self.model_arch == 'DO-MP-C-SPP-FC-LSTM': z = F.dropout(x, ratio=dropout_ratio) z = F.max_pooling_2d(z, 2) z = F.tanh(self.input_conv(z)) #z = F.spatial_pyramid_pooling_2d(z, 3, pooling_class=F.MaxPooling2D) z = F.spatial_pyramid_pooling_2d(z, 3, pooling="max") z = F.tanh(self.input(z)) elif self.model_arch == 'MP-C-SPP-FC-DO-LSTM2' or self.model_arch == 'MP-C-SPP-FC-DO-LSTM3': # 10252019, 10282019 #z = F.max_pooling_2d(x, 2) #11012019 z = F.tanh(self.input_conv(x)) z = F.dropout(z, ratio=dropout_ratio) #11012019 #z = F.max_pooling_2d(z, 2) #11012019 z = F.tanh(self.input_middle_conv(z)) #z = F.max_pooling_2d(z, 2) # 10272019, 11012019 z = F.dropout(z, ratio=dropout_ratio) #11012019 z = F.tanh(self.input_sec_middle_conv(z)) # 10272019 #z = F.max_pooling_2d(z, 2) # 10292019, 11012019 z = F.dropout(z, ratio=dropout_ratio) #11012019 z = F.tanh(self.input_third_middle_conv(z)) # 10292019 #z = F.spatial_pyramid_pooling_2d(z, 3, pooling_class=F.MaxPooling2D) z = F.spatial_pyramid_pooling_2d(z, 3, pooling="max") z = F.tanh(self.input(z)) z = F.dropout(z, ratio=dropout_ratio) elif self.model_arch == 'MP-C-SPP-FC-LSTM2' or self.model_arch == 'MP-C-SPP-FC-LSTM3': # 10252019, 10282019 #z = F.max_pooling_2d(x, 2) # ksize=2, stride=2 z = F.tanh(self.input_conv(x)) # 512, ksize=3, stride=1. pad=1 z = F.dropout(z, ratio=dropout_ratio) #11012019 #z = F.max_pooling_2d(x, 2) #11012019 z = F.tanh(self.input_middle_conv(z)) #z = F.max_pooling_2d(z, 2) # 10272019 z = F.dropout(z, ratio=dropout_ratio) #11012019 z = F.tanh(self.input_sec_middle_conv(z)) # 10272019 #z = F..max_pooling_2d(z, 2) # 10292019 z = F.dropout(z, ratio=dropout_ratio) #11012019 z = F.tanh(self.input_third_middle_conv(z)) # 10292019 #z = F.spatial_pyramid_pooling_2d(z, 3, pooling_class=F.MaxPooling2D) z = F.spatial_pyramid_pooling_2d(z, 3, pooling="max") z = F.tanh(self.input(z)) if self.model_arch == 'MP-C-SPP-FC-DO-LSTM2' or self.model_arch == 'MP-C-SPP-FC-LSTM2': #10252019 return self.lstm2(self.lstm(z)) elif self.model_arch == 'MP-C-SPP-FC-DO-LSTM3' or self.model_arch == 'MP-C-SPP-FC-LSTM3': #10282019 return self.lstm3(self.lstm2(self.lstm(z))) else: return self.lstm(z)
def __call__(self, x): return F.spatial_pyramid_pooling_2d(x, self.pyramid_height, self.pooling_class, self.use_cudnn)
def forward(self, x, t, train=True): h1 = F.spatial_pyramid_pooling_2d(F.relu(self.conv1(x)), 2, F.MaxPooling2D) h2 = F.dropout(F.relu(self.l1(h1)), train=train) y = self.l2(h2) return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
def f(x_data): return functions.spatial_pyramid_pooling_2d( x_data, self.pyramid_height, self.pooling_class)
def forward(self): x = chainer.Variable(self.x) return functions.spatial_pyramid_pooling_2d( x, 3, functions.MaxPooling2D, use_cudnn=self.use_cudnn)
def forward(self): x = chainer.Variable(self.x) return functions.spatial_pyramid_pooling_2d( x, 3, pooling='max')
def __call__(self, x): return functions.spatial_pyramid_pooling_2d(x, self.pyramid_height, self.pooling_class)