def predict(self, x, train = False): h1 = F.leaky_relu(self.l1(x)) h2 = F.leaky_relu(self.l2(h1)) h3 = F.leaky_relu(self.l3(h2)) h4 = F.leaky_relu(self.l4(h3)) y = self.q_value(h4) return y
def __call__(self, x): h = x h = self.__dict__["P1_1"](F.leaky_relu(h)) h = self.__dict__["BN1_1"](h) h = self.__dict__["P1_2"](F.leaky_relu(h)) h = self.__dict__["BN1_2"](h) h = F.max_pooling_2d(F.leaky_relu(h), ksize=3, stride=2, cover_all=False) h = self.__dict__["P2_1"](h) h = self.__dict__["BN2_1"](h) h = self.__dict__["P2_2"](F.leaky_relu(h)) h = self.__dict__["BN2_2"](h) h = self.__dict__["P2_2"](F.leaky_relu(h)) h = self.__dict__["BN2_3"](h) h = F.max_pooling_2d(F.leaky_relu(h), ksize=3, stride=2, cover_all=False) h = self.__dict__["P3_1"](h) h = self.__dict__["BN3_1"](h) h = self.__dict__["P3_2"](F.leaky_relu(h)) h = self.__dict__["BN3_2"](h) h = self.__dict__["P3_3"](F.leaky_relu(h)) #h = self.__dict__["BN3_3"](h) h = F.average_pooling_2d(F.leaky_relu(h), ksize=6) #h = self.__dict__["BN3_3"](h) h = self.__dict__["L1"](F.leaky_relu(h)) h = self.__dict__["L2"](h) y = h #h = F.spatial_pyramid_pooling_2d(F.leaky_relu(h), 3) #y = F.reshape(h,(len(h.data),self.F_unit)) return y
def __call__(self, x, train=True): h1 = F.leaky_relu(self.norm1(self.dc1(x), test=not train)) h2 = F.leaky_relu(self.norm2(self.dc2(h1), test=not train)) h3 = F.leaky_relu(self.norm3(self.dc3(h2), test=not train)) h4 = F.leaky_relu(self.norm4(self.dc4(h3), test=not train)) h5 = F.leaky_relu(self.norm5(self.dc5(h4), test=not train)) return self.dc6(h5)
def __call__(self, x, test=False): h = F.leaky_relu(self.c0(x)) # no bn because images from generator will katayotteru? h = F.leaky_relu(self.bn1(self.c1(h), test=test)) h = F.leaky_relu(self.bn2(self.c2(h), test=test)) h = F.leaky_relu(self.bn3(self.c3(h), test=test)) l = self.l4l(h) return l
def forward_but_one(self, x_data, train=True): x = chainer.Variable(x_data, volatile=not train) h = F.max_pooling_2d(F.relu(self.bn1(self.conv1(x))), 5, stride=2) h = F.max_pooling_2d(F.relu(self.bn2(self.conv2(h))), 5, stride=2) h = F.max_pooling_2d(F.relu(self.conv3(h)), 3, stride=2) h = F.leaky_relu(self.conv4(h), slope=0.2) h = F.dropout(F.leaky_relu(self.fc5(h), slope=0.2), train=train) return self.fc6(h)
def predict(self,x,train=False, ratio = 0.5): h1 = F.dropout(F.leaky_relu(self.l1(x)),train = train, ratio = ratio) h2 = F.dropout(F.leaky_relu(self.l2(h1)),train = train, ratio = ratio) h3 = F.dropout(F.leaky_relu(self.l3(h2)),train = train, ratio = ratio) #h1 = F.leaky_relu(self.l1(x)) #h2 = F.leaky_relu(self.l2(h1)) #h3 = F.leaky_relu(self.l3(h2)) y = self.l4(h3) return y
def generate_image(self, z): with chainer.no_backprop_mode(), chainer.using_config('train', False): h = F.reshape(F.relu(self.bn1(self.l0(z))), (len(z), self.ch, self.bottom_width, self.bottom_width)) h = F.leaky_relu(self.bn2(self.dc1(h))) h = F.leaky_relu(self.bn3(self.dc2(h))) h = F.leaky_relu(self.bn4(self.dc3(h))) x = F.tanh(self.dc4(h)) return x
def __call__(self, x,train = True): x_batch1,x_batch2 = x initial_V_concat_1 = self.l_polarity(x_batch1) initial_V_concat_2 = self.l_polarity(x_batch2) h_concat_1 = F.dropout(F.leaky_relu(initial_V_concat_1), train=False) h_concat_2 = F.dropout(F.leaky_relu(initial_V_concat_2), train=False) h_hidden_1 = F.dropout(F.tanh(self.l_hidden1(h_concat_1)), train=train) h_hidden_2 = F.dropout(F.tanh(self.l_hidden2(h_concat_2)), train=train) y = self.l_output(h_hidden_1 + h_hidden_2) #y = self.l_output(h_concat_1 + h_concat_2) return y, (initial_V_concat_1 + initial_V_concat_2)
def __call__(self, x, train=True): xp = cuda.get_array_module(x.data) h1 = F.leaky_relu(self.dc1(x)) h2 = F.leaky_relu(self.norm2(self.dc2(h1), test=not train)) h3 = F.leaky_relu(self.norm3(self.dc3(h2), test=not train)) h4 = F.leaky_relu(self.norm4(self.dc4(h3), test=not train)) mean = self.mean(h4) var = self.var(h4) rand = xp.random.normal(0, 1, var.data.shape).astype(np.float32) z = mean + F.exp(var) * Variable(rand, volatile=not train) return (z, mean, var)
def __call__(self, x, t=None): self.clear() h = F.leaky_relu(self.conv1(x), slope=0.1) h = F.leaky_relu(self.conv2(h), slope=0.1) #h = F.leaky_relu(self.conv3(h), slope=0.1) #h = F.leaky_relu(self.conv4(h), slope=0.1) h = F.clipped_relu(self.conv3(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
def predict(self,x,train=False, ratio = 0.5): with chainer.using_config('train', train): #h1 = F.dropout(F.leaky_relu(self.l1(x)),train = train, ratio = ratio) #h2 = F.dropout(F.leaky_relu(self.l2(h1)),train = train, ratio = ratio) #h3 = F.dropout(F.leaky_relu(self.l3(h2)),train = train, ratio = ratio) h = F.dropout(F.leaky_relu(self.l1(x)), ratio = 0.2) h = F.dropout(F.leaky_relu(self.l2(h)), ratio = 0.5) h = F.dropout(F.leaky_relu(self.l3(h)), ratio = 0.5) #h = F.leaky_relu(self.l4(h)) #h = F.leaky_relu(self.l5(h)) #h = F.leaky_relu(self.l6(h)) y = self.l_out(h) return y
def predict(self,x,train=False, ratio = 0.5): #h1 = F.dropout(F.leaky_relu(self.l1(x)),train = train, ratio = ratio) #h2 = F.dropout(F.leaky_relu(self.l2(h1)),train = train, ratio = ratio) #h3 = F.dropout(F.leaky_relu(self.l3(h2)),train = train, ratio = ratio) #h4 = F.dropout(F.leaky_relu(self.l4(h3)),train = train, ratio = ratio) #h5 = F.dropout(F.leaky_relu(self.l5(h4)),train = train, ratio = ratio) #h6 = F.dropout(F.leaky_relu(self.l6(h5)),train = train, ratio = ratio) h = F.leaky_relu(self.l1(x)) h = F.leaky_relu(self.l2(h)) h = F.leaky_relu(self.l3(h)) #h = F.leaky_relu(self.l4(h)) #h = F.leaky_relu(self.l5(h)) #h = F.leaky_relu(self.l6(h)) y = self.l_out(h) return y
def forward_one_step(h1, h2, cur_word, next_word, volatile=False): word = V(cur_word, volatile=volatile) x = F.leaky_relu(model.embed(word)) tmp_x = model.Wx1(x) tmp_h1 = model.Wh1(h1) h1 = F.leaky_relu(tmp_x + tmp_h1) tmp_x2 = model.Wx2(h1) tmp_h2 = model.Wh2(h2) h2 = F.leaky_relu(tmp_x2 + tmp_h2) y = model.Wy(h2) t = V(next_word, volatile=volatile) loss = F.softmax_cross_entropy(y, t) pred = F.softmax(y) return h1, h2, loss, np.argmax(cuda.to_cpu_async(pred.data))
def forward(x): h = x for i in range(1, steps): key = 'conv{}'.format(i) h = F.leaky_relu(getattr(model, key)(h), 0.1) key = 'conv{}'.format(steps) y = getattr(model, key)(h) return y
def forward(self, x, state, train=True, dropout_ratio=0.5): # x.volatile = not train h0 = self.embed(x) if dropout_ratio > 0.1: h0 = F.dropout(h0, ratio=dropout_ratio, train=train) h1 = F.leaky_relu(self.l1_x(h0) + self.l1_h(state['h1'])) if dropout_ratio > 0.1: h1 = F.dropout(h1, ratio=dropout_ratio, train=train) h2 = F.leaky_relu(self.l2_x(h1) + self.l2_h(state['h2'])) if dropout_ratio > 0.1: h2 = F.dropout(h2, ratio=dropout_ratio, train=train) y = self.l3(h2) return {'h1': h1, 'h2': h2}, y
def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.leaky_relu(x, slope=self.slope) self.assertEqual(y.data.dtype, self.dtype) expected = numpy.where(self.x >= 0, self.x, self.x * self.slope) testing.assert_allclose( expected, y.data, **self.check_forward_options)
def __call__(self, x): h1 = F.leaky_relu(self.conv1(x)) h2 = F.leaky_relu(self.conv2(h1)) h3 = F.leaky_relu(self.conv3(h2)) h4 = F.leaky_relu(self.conv4(h3)) print h4.data.shape h5 = self.l1(h4) h6 = self.l2(h5) print h6.data #h7 = F.sigmoid_cross_entropy(h6, y) print x.data.shape print h1.data.shape print h2.data.shape print h3.data.shape print h4.data.shape print h5.data.shape print h6.data.shape return h6
def _setup_relu(self, layer): slope = layer.relu_param.negative_slope if slope != 0: fw = lambda x: functions.leaky_relu(x, slope=slope) else: fw = functions.relu self.forwards[layer.name] = fw self._add_layer(layer)
def check_forward(self, x_data): x = Variable(x_data) y = leaky_relu(x, slope=self.slope) expected = self.x.copy() for i in numpy.ndindex(self.x.shape): if self.x[i] < 0: expected[i] *= self.slope assert_allclose(expected, y.data)
def __call__(self, x, t): h = F.max_pooling_2d(F.leaky_relu(self.conv1(x),slope=0.1),(1,4),stride=2) h = F.max_pooling_2d(F.leaky_relu(self.conv2(h),slope=0.1),(1,4),stride=2) #h = F.leaky_relu(self.conv1(x),slope=0.1) #h = F.leaky_relu(self.conv2(h),slope=0.3) #h = F.leaky_relu(self.conv3(h),slope=0.6) #h = F.leaky_relu(self.conv4(h),slope=0.3) #h = F.dropout(F.relu(self.fc1(x)), train=self.train) #h = F.dropout(F.relu(self.fc2(x)), train=self.train,ratio=.7) #h = F.dropout(F.relu(self.fc3(h)), train=self.train) h = F.dropout(F.relu(self.fc4(h)), train=self.train,ratio=.7) #h = F.dropout(F.relu(self.fc6(h)), train=self.train,ratio=.6) #h = F.dropout(F.relu(self.fc5(h)), train=self.train,ratio=.6) h = self.fc_last(h) self.pre = F.softmax(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss
def __call__(self, x, t): h = F.dropout(F.leaky_relu(self.fc1(x),slope=0.03),train=self.train) #h = F.dropout(F.sigmoid(self.fc1(x)), train=self.train,ratio = .7) #h = F.dropout(F.relu(self.fc2(h)), train=self.train,ratio = .7) h = F.dropout(F.relu(self.fc3(h)), train=self.train,ratio = .7) h = self.fc_last(h) self.pre = F.softmax(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss
def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.leaky_relu(x, slope=self.slope) expected = self.x.copy() for i in numpy.ndindex(self.x.shape): if self.x[i] < 0: expected[i] *= self.slope gradient_check.assert_allclose(expected, y.data)
def __call__(self, x): h = F.leaky_relu(self.c0_0(x)) h = F.dropout(F.leaky_relu(self.bn0_1(self.c0_1(h))), ratio=0.2) h = F.dropout(F.leaky_relu(self.bn1_0(self.c1_0(h))), ratio=0.2) h = F.dropout(F.leaky_relu(self.bn1_1(self.c1_1(h))), ratio=0.2) h = F.dropout(F.leaky_relu(self.bn2_0(self.c2_0(h))), ratio=0.2) h = F.dropout(F.leaky_relu(self.bn2_1(self.c2_1(h))), ratio=0.2) h = F.dropout(F.leaky_relu(self.bn3_0(self.c3_0(h))), ratio=0.2) return self.l4(h)
def __call__(self, x): h = F.leaky_relu(self.l1(x)) h = F.leaky_relu(self.l2(h)) h = F.leaky_relu(self.l3(h)) h = F.leaky_relu(self.l4(h)) h = F.leaky_relu(self.l5(h)) h = F.leaky_relu(self.l6(h)) h = F.leaky_relu(self.l7(h)) return F.exp(self.l9(h)-13.0)
def check_backward(self, x_data, y_grad): x = Variable(x_data) y = leaky_relu(x, slope=self.slope) y.grad = y_grad y.backward() func = y.creator f = lambda: func.forward((x.data,)) gx, = numerical_grad(f, (x.data,), (y.grad,)) assert_allclose(gx, x.grad)
def __call__(self, x, test=False): h1 = F.leaky_relu(self.c1(x)) h2 = F.leaky_relu(self.bn1(self.c2(h1), test=test)) h3 = F.leaky_relu(self.bn2(self.c3(h2), test=test)) h4 = F.leaky_relu(self.bn3(self.c4(h3), test=test)) #h2 = F.leaky_relu(self.c2(h1)) #h3 = F.leaky_relu(self.c3(h2)) #h4 = F.leaky_relu(self.c4(h3)) #h5 = F.average_pooling_2d(h4, 4) #h5 = self.l1(h4) h5 = self.l1(h4) print x.data.shape print h1.data.shape print h2.data.shape print h3.data.shape print h4.data.shape print h5.data.shape #print h6.data.shape return h5
def check_backward(self, x_data, y_grad): x = chainer.Variable(x_data) y = functions.leaky_relu(x, slope=self.slope) y.grad = y_grad y.backward() func = y.creator f = lambda: func.forward((x.data,)) gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,)) gradient_check.assert_allclose(gx, x.grad)
def __call__(self, x): h = add_noise(x) h = F.leaky_relu(add_noise(self.c0_0(h))) h = F.leaky_relu(add_noise(self.bn0_1(self.c0_1(h)))) h = F.leaky_relu(add_noise(self.bn1_0(self.c1_0(h)))) h = F.leaky_relu(add_noise(self.bn1_1(self.c1_1(h)))) h = F.leaky_relu(add_noise(self.bn2_0(self.c2_0(h)))) h = F.leaky_relu(add_noise(self.bn2_1(self.c2_1(h)))) h = F.leaky_relu(add_noise(self.bn3_0(self.c3_0(h)))) return self.l4(h)
def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.leaky_relu(x, slope=self.slope) self.assertEqual(y.data.dtype, self.dtype) expected = self.x.copy() for i in numpy.ndindex(self.x.shape): if self.x[i] < 0: expected[i] *= self.slope testing.assert_allclose( expected, y.data, **self.check_forward_options)
def check_forward(self, x_data, backend_config): if backend_config.use_cuda: x_data = cuda.to_gpu(x_data) x = chainer.Variable(x_data) with backend_config: y = functions.leaky_relu(x, slope=self.slope) self.assertEqual(y.data.dtype, self.dtype) expected = numpy.where(self.x >= 0, self.x, self.x * self.slope) testing.assert_allclose( expected, y.data, **self.check_forward_options)
def __call__(self, x): h = F.leaky_relu(self.l0(x)) # h = F.leaky_relu(self.l1(h)) logits = self.l1(h) return logits
def __call__(self, x): h = F.leaky_relu((self.c0(x))) h = F.leaky_relu((self.c1(h))) h = self.pooling_comp * F.average_pooling_2d(h, 2, 2, 0) return h
def __call__(self, x): return leaky_relu(x, self.a)
def __call__(self, x): h = F.leaky_relu(self.bias1( self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias2( self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias3( self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias4( self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias5( self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias6( self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias7( self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias8( self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias9( self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias10( self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias11( self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias12( self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias13( self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1) high_resolution_feature = h h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias14( self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias15( self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias16( self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias17( self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias18( self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias19( self.bn19(self.conv19(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias20( self.bn20(self.conv20(h), finetune=self.finetune)), slope=0.1) h2 = high_resolution_feature h2 = F.leaky_relu(self.bias21( self.bn21(self.conv21(h2), finetune=self.finetune)), slope=0.1) h2 = reorg(h2) h = F.concat((h2, h), axis=1) h = F.leaky_relu(self.bias22( self.bn22(self.conv22(h), finetune=self.finetune)), slope=0.1) h = self.bias23(self.conv23(h)) return h
def forward(self, x): y1 = F.leaky_relu(x) return y1
def __call__(self, x): #x.to_cpu() #cv2.imshow('image',x.data[0].transpose(1,2,0)) #cv2.waitKey(0) #cv2.destroyAllWindows() #x.to_gpu() ##### common layer h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), test=not self.train, finetune=self.finetune)), slope=0.1) high_resolution_feature = reorg(h) # 高解像度特徴量をreorgでサイズ落として保存しておく h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), test=not self.train, finetune=self.finetune)), slope=0.1) ###### new layer h = F.leaky_relu(self.bias19(self.bn19(self.conv19(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias20(self.bn20(self.conv20(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.concat((high_resolution_feature, h), axis=1) # output concatenation h = F.leaky_relu(self.bias21(self.bn21(self.conv21(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = self.bias22(self.conv22(h)) return h
def predict(self, x): #calculate network output h1 = F.leaky_relu(self.l1(x)) h2 = F.leaky_relu(self.l2(h1)) h3 = F.leaky_relu(self.l3(h2)) h4 = F.leaky_relu(self.l4(h3)) return h4
def __call__(self, x): x = F.log(x) + 13.0 h = F.leaky_relu(self.l1(x)) h = F.leaky_relu(self.l2(h)) h = F.leaky_relu(self.l3(h)) return F.exp(self.l9(h) - 13.0)
def __call__(self, x, test=False): h = F.leaky_relu(self.l0(x)) h = F.leaky_relu(self.l1(h)) h = F.leaky_relu(self.l2(h)) return chainerrl.action_value.DiscreteActionValue(self.l3(h))
def f(x): return functions.leaky_relu(x, self.slope)
def __call__(self, x, t): h = F.max_pooling_2d(F.leaky_relu(self.conv1(x)), 2, 2) h = F.max_pooling_2d(F.leaky_relu(self.conv2(h)), 2, 2) h = F.leaky_relu(self.conv3(h)) h = F.leaky_relu(self.conv4(h)) h = F.leaky_relu(self.conv5(h)) h = F.max_pooling_2d(F.leaky_relu(self.conv6(h)), 2, 2) h = F.leaky_relu(self.conv7(h)) h = F.leaky_relu(self.conv8(h)) h = F.leaky_relu(self.conv9(h)) h = F.leaky_relu(self.conv10(h)) h = F.leaky_relu(self.conv11(h)) h = F.leaky_relu(self.conv12(h)) h = F.leaky_relu(self.conv13(h)) h = F.leaky_relu(self.conv14(h)) h = F.leaky_relu(self.conv15(h)) h = F.max_pooling_2d(F.leaky_relu(self.conv16(h)), 2, 2) h = F.leaky_relu(self.conv17(h)) h = F.leaky_relu(self.conv18(h)) h = F.leaky_relu(self.conv19(h)) if self.pre_train: h = F.average_pooling_2d(h, 2, 2) h = self.fc_pre(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss else: h = F.leaky_relu(self.conv20(h)) h = F.leaky_relu(self.conv21(h)) h = F.leaky_relu(self.conv22(h)) h = F.leaky_relu(self.conv23(h)) h = F.leaky_relu(self.conv24(h)) self.h = h h = F.leaky_relu(self.fc25(h)) h = F.relu(self.fc26(h)) #self.loss = self.loss_func(h, t) #self.accuracy = self.loss self.img = (x, h)
def __call__(self, x, last=False): l1 = F.leaky_relu(self.c1(x)) l2 = F.leaky_relu(self.c2(l1)) if last: return self.toRGB(l2) return l2
def Q_func(self, x): h1 = F.leaky_relu(self.L1(x)) h2 = F.leaky_relu(self.L2(h1)) h3 = F.leaky_relu(self.L3(h2)) return F.identity(self.Q_value(h3))
def __call__(self, x): batch_size = x.data.shape[0] ##### common layer h = F.leaky_relu(self.bias1( self.bn1(self.conv1(x), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias2( self.bn2(self.conv2(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias3( self.bn3(self.conv3(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias4( self.bn4(self.conv4(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias5( self.bn5(self.conv5(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias6( self.bn6(self.conv6(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias7( self.bn7(self.conv7(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias8( self.bn8(self.conv8(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias9( self.bn9(self.conv9(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias10( self.bn10(self.conv10(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias11( self.bn11(self.conv11(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias12( self.bn12(self.conv12(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias13( self.bn13(self.conv13(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias14( self.bn14(self.conv14(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias15( self.bn15(self.conv15(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias16( self.bn16(self.conv16(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias17( self.bn17(self.conv17(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias18( self.bn18(self.conv18(h), test=not self.train, finetune=self.finetune)), slope=0.1) ###### new layer h = self.conv19(h) h = F.average_pooling_2d(h, h.data.shape[-1], stride=1, pad=0) # reshape y = F.reshape(h, (batch_size, -1)) return y
def __call__(self, x): h = x h = self.__dict__["P1_1"](F.leaky_relu(h)) h = self.__dict__["BN1_1"](h) h = self.__dict__["P1_2"](F.leaky_relu(h)) h = self.__dict__["BN1_2"](h) h = F.max_pooling_2d(F.leaky_relu(h), ksize=3, stride=2, cover_all=False) h = self.__dict__["P2_1"](h) h = self.__dict__["BN2_1"](h) h = self.__dict__["P2_2"](F.leaky_relu(h)) h = self.__dict__["BN2_2"](h) h = self.__dict__["P2_2"](F.leaky_relu(h)) h = self.__dict__["BN2_3"](h) h = F.max_pooling_2d(F.leaky_relu(h), ksize=3, stride=2, cover_all=False) h = self.__dict__["P3_1"](h) h = self.__dict__["BN3_1"](h) h = self.__dict__["P3_2"](F.leaky_relu(h)) h = self.__dict__["BN3_2"](h) h = self.__dict__["P3_3"](F.leaky_relu(h)) h = self.__dict__["BN3_3"](h) #h = F.average_pooling_2d(F.leaky_relu(h), ksize=6) h = F.spatial_pyramid_pooling_2d(F.leaky_relu(h), 3, F.MaxPooling2D) h = self.__dict__["BNL0"](h) h = self.__dict__["L1"](F.leaky_relu(h)) h = self.__dict__["BNL1"](h) h = self.__dict__["L2"](F.leaky_relu(h)) y = h #h = F.spatial_pyramid_pooling_2d(F.leaky_relu(h), 3) #y = F.reshape(h,(len(h.data),self.F_unit)) return y
def __call__(self, x): hs = [F.leaky_relu(self.c0(x))] #for i in range(1,8): for i in range(1, 5): hs.append(self['c%d' % i](hs[i - 1])) return hs
def __call__(self, x, SE_PRE): h = F.leaky_relu(self.bn1(self.res1(x))) h = F.leaky_relu(self.bn2(self.res2(h))) h = self.bn3(self.res3(h)) return h + self.bn4(self.res4(SE_PRE)) if self.use_conv else h + SE_PRE
#!/usr/bin/env python import chainer.functions as F import numpy as np dtypes = {'fp16': np.float16, 'fp32': np.float32} activation = { 'relu': F.relu, 'lrelu': lambda x: F.leaky_relu(x, slope=0.2), 'tanh': F.tanh, 'none': None, }
def forward(self, x): y1 = F.leaky_relu(x, slope=0.1) return y1
def __call__(self, x): return F.leaky_relu(x, self.slope)
def __call__(self, x): h = F.leaky_relu(self.bias1( self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias2( self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias3( self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias4( self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias5( self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias6( self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias7( self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias8( self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias9( self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias10( self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias11( self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias12( self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias13( self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias14( self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias15( self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias16( self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias17( self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias18( self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1) h = F.average_pooling_2d(h, h.shape[-2:]) h = self.fc19(h) return h
def __call__(self, x, with_dict=False): output_dict = dict() output_dict['input'] = x # 512 -> 4x4 x = _pixel_norm(chainer.Variable(x), eps=1e-8) x = F.reshape(self.fc7_1(x), (-1, 512, 4, 4)) x = _pixel_norm(F.leaky_relu(self.b7_1(x)), eps=1e-8) x = _pixel_norm(F.leaky_relu(self.conv7_2(x)), eps=1e-8) output_dict['x2'] = x # 4x4 -> 8x8 x = F.unpooling_2d(x, ksize=2, cover_all=False) x = _pixel_norm(F.leaky_relu(self.conv6_1(x)), eps=1e-8) x = _pixel_norm(F.leaky_relu(self.conv6_2(x)), eps=1e-8) output_dict['x3'] = x # 8x8 -> 16x16 x = F.unpooling_2d(x, ksize=2, cover_all=False) x = _pixel_norm(F.leaky_relu(self.conv5_1(x)), eps=1e-8) x = _pixel_norm(F.leaky_relu(self.conv5_2(x)), eps=1e-8) # 16x16 -> 32x32 x = F.unpooling_2d(x, ksize=2, cover_all=False) x = _pixel_norm(F.leaky_relu(self.conv4_1(x)), eps=1e-8) x = _pixel_norm(F.leaky_relu(self.conv4_2(x)), eps=1e-8) # 32x32 -> 64x64 x = F.unpooling_2d(x, ksize=2, cover_all=False) x = _pixel_norm(F.leaky_relu(self.conv3_1(x)), eps=1e-8) x = _pixel_norm(F.leaky_relu(self.conv3_2(x)), eps=1e-8) # 256x64x64 -> 128x128x128 x = F.unpooling_2d(x, ksize=2, cover_all=False) x = _pixel_norm(F.leaky_relu(self.conv2_1(x)), eps=1e-8) x = _pixel_norm(F.leaky_relu(self.conv2_2(x)), eps=1e-8) # 128x128x128 -> 64x256x256 x = F.unpooling_2d(x, ksize=2, cover_all=False) x = _pixel_norm(F.leaky_relu(self.conv1_1(x)), eps=1e-8) x = _pixel_norm(F.leaky_relu(self.conv1_2(x)), eps=1e-8) # 64x256x256 -> 1x256x256 (ToRGB_lod0) img = self.conv0_0(x) images_out = img return images_out
def encode(self, x): h1 = F.leaky_relu(F.dropout(self.bn1(self.fc1(x)), train=False)) h2 = F.leaky_relu(F.dropout(self.bn2(self.fc2(h1)), train=False)) h3 = F.leaky_relu(F.dropout(self.bn3(self.fc3(h2)), train=False)) h4 = F.leaky_relu(F.dropout(self.bn4(self.fc4(h3)), train=False)) return h4
def regression2_forward(self, x): # Regression 2 h1 = f.leaky_relu(self.conv(x)) h2 = self.fc(h1) return h2
def forward(self, x): return F.leaky_relu(x)
def __call__(self, x_ld, x_gd): # Local Discriminator hl = F.leaky_relu(self.ld_bn0(self.ld_c0(x_ld))) hl = F.leaky_relu(self.ld_bn1(self.ld_c1(hl))) hl = F.leaky_relu(self.ld_bn2(self.ld_c2(hl))) hl = F.leaky_relu(self.ld_bn3(self.ld_c3(hl))) hl = F.leaky_relu(self.ld_bn4(self.ld_c4(hl))) hl = F.leaky_relu(self.ld_fc(hl)) # Global Discriminator hg = F.leaky_relu(self.gd_bn0(self.gd_c0(x_gd))) hg = F.leaky_relu(self.gd_bn1(self.gd_c1(hg))) hg = F.leaky_relu(self.gd_bn2(self.gd_c2(hg))) hg = F.leaky_relu(self.gd_bn3(self.gd_c3(hg))) hg = F.leaky_relu(self.gd_bn4(self.gd_c4(hg))) hg = F.leaky_relu(self.gd_bn5(self.gd_c5(hg))) hg = F.leaky_relu(self.gd_fc(hg)) # concatenation out = F.concat((hl, hg), axis=1) out = self.concl(out) return out
def __call__(self, x): ##### common layer h = F.leaky_relu(self.bias1( self.bn1(self.conv1(x), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias2( self.bn2(self.conv2(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias3( self.bn3(self.conv3(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias4( self.bn4(self.conv4(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias5( self.bn5(self.conv5(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias6( self.bn6(self.conv6(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias7( self.bn7(self.conv7(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias8( self.bn8(self.conv8(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias9( self.bn9(self.conv9(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias10( self.bn10(self.conv10(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias11( self.bn11(self.conv11(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias12( self.bn12(self.conv12(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias13( self.bn13(self.conv13(h), test=not self.train, finetune=self.finetune)), slope=0.1) high_resolution_feature = reorg(h) # 高解像度特徴量をreorgでサイズ落として保存しておく h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias14( self.bn14(self.conv14(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias15( self.bn15(self.conv15(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias16( self.bn16(self.conv16(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias17( self.bn17(self.conv17(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias18( self.bn18(self.conv18(h), test=not self.train, finetune=self.finetune)), slope=0.1) ###### new layer h = F.leaky_relu(self.bias19( self.bn19(self.conv19(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias20( self.bn20(self.conv20(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = F.concat((h, high_resolution_feature), axis=1) # output concatnation h = F.leaky_relu(self.bias21( self.bn21(self.conv21(h), test=not self.train, finetune=self.finetune)), slope=0.1) h = self.conv22(h) return h
def __call__(self, x, train=True): with chainer.using_config('train', train): h1 = F.leaky_relu(self.norm1(self.dc1(x))) h2 = F.leaky_relu(self.norm2(self.dc2(h1))) h3 = F.leaky_relu(self.norm3(self.dc3(h2))) return self.dc4(h3)
def _leaky_relu(x): return F.leaky_relu(x, slope=0.1)
def forward(self, x): h = F.leaky_relu(self.c0(x)) h = F.leaky_relu(self.bn1(self.c1(h))) h = F.leaky_relu(self.bn2(self.c2(h))) h = F.leaky_relu(self.bn3(self.c3(h))) return self.c4(h)