Пример #1
0
    def predict(self, x_data):
        x = chainer.Variable(x_data, volatile=True)
        self.train = False
        test = True

        h = F.max_pooling_2d(
            F.relu(self.norm1(self.conv1(x), test=test)),  3, stride=2, pad=1)
        h = F.max_pooling_2d(
            F.relu(self.norm2(self.conv2(h), test=test)), 3, stride=2, pad=1)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = self.inc3c(h)
        h = self.inc4a(h)

        a = F.average_pooling_2d(h, 5, stride=3)
        a = F.relu(self.norma(self.conva(a), test=test))
        a = F.relu(self.norma2(self.lina(a), test=test))
        a = self.outa(a)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        b = F.average_pooling_2d(h, 5, stride=3)
        b = F.relu(self.normb(self.convb(b), test=test))
        b = F.relu(self.normb2(self.linb(b), test=test))
        b = self.outb(b)

        h = self.inc4e(h)
        h = self.inc5a(h)
        h = F.average_pooling_2d(self.inc5b(h), 7)
        h = self.out(h)

        return F.softmax(h)
Пример #2
0
    def forward(self, x_data, y_data, train=True):
        x = chainer.Variable(x_data, volatile=not train)
        t = chainer.Variable(y_data, volatile=not train)

        h = F.max_pooling_2d(
            F.relu(self.norm1(self.conv1(x))),  3, stride=2, pad=1)
        h = F.max_pooling_2d(
            F.relu(self.norm2(self.conv2(h))), 3, stride=2, pad=1)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = self.inc3c(h)
        h = self.inc4a(h)

        a = F.average_pooling_2d(h, 5, stride=3)
        a = F.relu(self.norma(self.conva(a)))
        a = F.relu(self.norma2(self.lina(a)))
        a = self.outa(a)
        a = F.softmax_cross_entropy(a, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        b = F.average_pooling_2d(h, 5, stride=3)
        b = F.relu(self.normb(self.convb(b)))
        b = F.relu(self.normb2(self.linb(b)))
        b = self.outb(b)
        b = F.softmax_cross_entropy(b, t)

        h = self.inc4e(h)
        h = self.inc5a(h)
        h = F.average_pooling_2d(self.inc5b(h), 7)
        h = self.out(h)
        return 0.3 * (a + b) + F.softmax_cross_entropy(h, t), F.accuracy(h, t)
Пример #3
0
 def __call__(self, x):
     h2 = F.average_pooling_2d(x[0], 56, stride=1)
     h3 = F.average_pooling_2d(x[1], 28, stride=1)
     h4 = F.average_pooling_2d(x[2], 14, stride=1)
     h5 = F.average_pooling_2d(x[3], 7, stride=1)
     h = F.concat((h2, h3, h4, h5), axis=1)
     return self.fc(h)
Пример #4
0
    def forward(self, img, doc, train=True, regression=False, useImage=True, useDoc=True):
        test = not train

        if useImage:
            h = F.max_pooling_2d(
                F.relu(self.norm1(self.conv1(img), test=test)),  3, stride=2, pad=1)
            h = F.max_pooling_2d(
                F.relu(self.norm2(self.conv2(h), test=test)), 3, stride=2, pad=1)

            h = self.inc3a(h)
            h = self.inc3b(h)
            h = self.inc3c(h)
            h = self.inc4a(h)

            if train:
                a = F.average_pooling_2d(h, 5, stride=3)
                a = F.relu(self.norma(self.conva(a), test=test))
                a = F.relu(self.norma2(self.lina(a), test=test))
                a = self.outa(a)

            h = self.inc4b(h)
            h = self.inc4c(h)
            h = self.inc4d(h)

            if train:
                b = F.average_pooling_2d(h, 5, stride=3)
                b = F.relu(self.normb(self.convb(b), test=test))
                b = F.relu(self.normb2(self.linb(b), test=test))
                b = self.outb(b)

            h = self.inc4e(h)
            h = self.inc5a(h)
            h = F.average_pooling_2d(self.inc5b(h), 7)
            h = F.relu(self.linz(h))

        if useDoc:
            h2 = F.relu(self.doc_fc1(F.dropout(doc, train=train)))
            h2 = F.relu(self.doc_fc2(h2))

        if useDoc and useImage:
            bi = F.relu(self.bi1(h, h2))#F.concat((h, h2), axis=1)
            h = self.out(bi)
        elif useImage:
            h = self.outimg(h)
        else:
            h = self.outdoc(h2)

        if train:
            if useImage:
                return {
                    "a": a,
                    "b": b,
                    "h": h
                }
            else:
                return h

        else:
            return h
 def __call__(self, x):
     h1 = F.sigmoid(F.average_pooling_2d(self.conv1(x), 2))
     h2 = F.sigmoid(F.average_pooling_2d(self.conv2(h1),2))
     h3 = self.conv3(h2)
     h4 = F.tanh(self.l1(h3))
     p = self.l2(h4)
 
     return p
Пример #6
0
 def forward(self, x):
     y0 = F.relu(self.model.conv1(x))
     y1 = self.model.cccp2(F.relu(self.model.cccp1(y0)))
     x1 = F.relu(self.model.conv2(F.average_pooling_2d(F.relu(y1), 3, stride=2)))
     y2 = self.model.cccp4(F.relu(self.model.cccp3(x1)))
     x2 = F.relu(self.model.conv3(F.average_pooling_2d(F.relu(y2), 3, stride=2)))
     y3 = self.model.cccp6(F.relu(self.model.cccp5(x2)))
     x3 = F.relu(getattr(self.model,"conv4-1024")(F.dropout(F.average_pooling_2d(F.relu(y3), 3, stride=2), train=False)))
     return [y0,x1,x2,x3]
def forward(model, x_data, y_data, train=True):
    x, t = Variable(x_data, volatile=not train), Variable(y_data, volatile=not train)
    h = F.relu(F.max_pooling_2d(model.conv1(x), 3, stride=2))
    h = F.relu(F.average_pooling_2d(model.conv2(h), 3, stride=2))
    h = F.relu(F.average_pooling_2d(model.conv3(h), 3, stride=2))
    h = model.fl4(h)  # <- cifar10_quick.prototxt in caffe, instead of below line
    # h = F.relu(model.fl4(h))
    y = model.fl5(h)
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
Пример #8
0
    def __call__(self, x, t):
        self.clear()
        test = not self.train

        h = F.max_pooling_2d(
            F.relu(self.norm1(self.conv1(x), test=test)),  3, stride=2, pad=1)
        h = F.max_pooling_2d(
            F.relu(self.norm2(self.conv2(h), test=test)), 3, stride=2, pad=1)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = self.inc3c(h)
        h = self.inc4a(h)

        a = F.average_pooling_2d(h, 5, stride=3)
        a = F.relu(self.norma(self.conva(a), test=test))
        a = F.relu(self.norma2(self.lina(a), test=test))
        a = self.outa(a)
        self.loss1 = F.softmax_cross_entropy(a, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        b = F.average_pooling_2d(h, 5, stride=3)
        b = F.relu(self.normb(self.convb(b), test=test))
        b = F.relu(self.normb2(self.linb(b), test=test))
        b = self.outb(b)
        self.loss2 = F.softmax_cross_entropy(b, t)

        h = self.inc4e(h)
        h = self.inc5a(h)
        h = F.average_pooling_2d(self.inc5b(h), 7)
        h = self.out(h)
        self.loss3 = F.softmax_cross_entropy(h, t)

        self.loss = 0.3 * (self.loss1 + self.loss2) + self.loss3
        self.accuracy = F.accuracy(h, t)
        shishi = F.softmax(h)
#        kankan = shishi.data[0].tolist()
#        categories = np.loadtxt("labels.txt", str, delimiter="\t")
#        top_k = 10
#        prediction = zip(kankan,categories)
#        for feifei in categories:
#          print(feifei)
#        prediction.sort(cmp=lambda x,y: cmp(x[0],y[0]),reverse=True)
#        cuowushuchu = ('cuowuchushu.txt','w')
#        for rank,(score,name) in enumerate(prediction[:3],start=1):
#          print('#%d | %s | %4.1f%%' % (rank,name,score * 100))
#        print('\n')
        
#        for rank,(score,name) in enumerate(prediction[:2],start=1):
#          feijigege = score * 100
#          cuowushuchu.write(str(name)+' '+str(feijigege))
#        cuowushuchu.close()
        return shishi  
Пример #9
0
    def forward(self, x_img, x_doc, y_data, train=True):
        x_img = cuda.cupy.asarray(x_img)
        x_doc = cuda.cupy.asarray(x_doc)
        y_data = cuda.cupy.asarray(y_data)

        img, doc, t = Variable(x_img), Variable(x_doc), Variable(y_data)

        h = F.relu(self.conv1(img))
        h = F.local_response_normalization(
        F.max_pooling_2d(h, 3, stride=2), n=5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(
        F.local_response_normalization(h, n=5), 3, stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss1_conv(l))
        l = F.relu(self.loss1_fc1(l))
        l = self.loss1_fc2(l)
        self.loss1 = F.softmax_cross_entropy(l, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss2_conv(l))
        l = F.relu(self.loss2_fc1(l))
        l = self.loss2_fc2(l)
        self.loss2 = F.softmax_cross_entropy(l, t)

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.loss3_fc1(F.dropout(h, 0.4, train=train))

        h2 = F.relu(self.doc_fc1(F.dropout(doc, train=train)))
        h2 = F.relu(self.doc_fc2(h2))
        b = F.relu(self.bi1(h, h2))
        h = self.loss3_fc2(b)

        self.loss3 = F.softmax_cross_entropy(h, t)

        if train:
            return 0.3 * (self.loss1 + self.loss2) + self.loss3
        else:
            return F.accuracy(h, t)
Пример #10
0
    def forward_super(self, x, train=True):
        h = F.relu(self.conv1(x))
        h = F.relu(self.conv2(h))
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.relu(self.conv5(h))

        h1 = F.average_pooling_2d(h, self.p_w, stride=self.p_w)
        h1 = F.average_pooling_2d(h1, self.p_w, stride=self.p_w)

        return h, h1
Пример #11
0
 def forward(self, x):
     y1 = self.model.conv1_2(F.relu(self.model.conv1_1(x)))
     x1 = F.average_pooling_2d(F.relu(y1), 2, stride=2)
     y2 = self.model.conv2_2(F.relu(self.model.conv2_1(x1)))
     x2 = F.average_pooling_2d(F.relu(y2), 2, stride=2)
     y3 = self.model.conv3_3(F.relu(self.model.conv3_2(F.relu(self.model.conv3_1(x2)))))
     x3 = F.average_pooling_2d(F.relu(y3), 2, stride=2)
     y4 = self.model.conv4_3(F.relu(self.model.conv4_2(F.relu(self.model.conv4_1(x3)))))
 #    x4 = F.average_pooling_2d(F.relu(y4), 2, stride=2)
 #    y5 = model.conv5_3(F.relu(model.conv5_2(F.relu(model.conv5_1(x4)))))
     return [y1,y2,y3,y4]
Пример #12
0
    def __call__(self, x_recon, x):
        bs = x.shape[0]
        d = np.prod(x.shape[1:])

        if x.shape[1:] == 3:
            h_recon = F.average_pooling_2d(x_recon, (2, 2))
            h = F.average_pooling_2d(x, (2, 2))
            self.loss = F.mean_squared_error(x_recon, x) / d
        else:
            self.loss = F.mean_squared_error(x_recon, x) / d

        return self.loss
Пример #13
0
 def __call__(self, x, h_feat, test=False):
     bs = x.shape[0]
     h = self.convunit0(x, test)
     h = self.convunit1(h, test)
     h_feat = F.average_pooling_2d(h_feat, (7, 7))
     h_feat = F.reshape(h_feat, (bs, 128))
     h = F.average_pooling_2d(h, (7, 7))
     h = F.reshape(h_feat, (bs, 128))
     h = F.concat((h, h_feat))
     h = self.linear(h)
     #h = F.sigmoid(h)
     return h
Пример #14
0
	def calc_loss(self, x, t, layer,train=True):
		self.clear()
		if(layer ==0) :
			h = F.max_pooling_2d(F.relu(model.conv1(x)),4)
			h = self.norm2(h,test= not train)                               
			h = F.relu(model.l1(h))
			h = F.relu(model.l2(h))
		elif layer ==1 :
			h = F.spatial_pyramid_pooling_2d(F.relu(model.conv1(x)),4,F.MaxPooling2D)
			h = self.norm5(h,test= not train)
			h = F.relu(model.l4(h))
			h = F.relu(model.l2(h))
		elif layer ==2 :
			h = F.average_pooling_2d(F.relu(model.conv1(x)),4)
			h = self.norm2(h,test= not train)                               
			h = F.relu(model.l1(h))
			h = F.relu(model.l2(h))
		elif layer ==3 :
			h = F.max_pooling_2d(F.relu(model.conv1(x)),2)
			h = self.norm2(h,test= not train)
			h = F.max_pooling_2d(F.relu(model.conv2(h)),2)
			h = self.norm1(h,test= not train)
			h = F.relu(model.l3(h))
			h = F.relu(model.l2(h))
		elif layer ==4 :
			h = F.max_pooling_2d(F.relu(model.conv4(x)),2)
			h = self.norm3(h,test= not train)
			h = F.spatial_pyramid_pooling_2d(F.relu(model.conv3(h)),2,F.MaxPooling2D)
			h = self.norm6(h,test= not train)
			h = F.relu(model.l5(h))
			h = F.relu(model.l2(h))
		elif layer ==5 :
			h = F.average_pooling_2d(F.relu(model.conv1(x)),2)
			h = self.norm2(h,test= not train)
			h = F.average_pooling_2d(F.relu(model.conv2(h)),2)
			h = self.norm1(h,test= not train)
			h = F.relu(model.l3(h))
			h = F.relu(model.l2(h))
			
		'''
		print (h.data.shape)
		
		#t=t.T.data
		print (t.data)
		
		print (h.data.shape)
		#print (h.data.T)
		print (t.data.shape)

		'''
		loss = F.mean_squared_error(h, t)
		return loss
Пример #15
0
    def __call__(self, x):
        h = F.relu(self.mlpconv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)
        h = F.dropout(h, ratio=0.5)

        h = F.relu(self.mlpconv2(h))
        h = F.average_pooling_2d(h, 3, stride=2)
        h = F.dropout(h, ratio=0.5)

        h = self.mlpconv3(h)
        h = F.average_pooling_2d(h, h.shape[2])
        y = F.reshape(h, (x.shape[0], 10))
        return y
Пример #16
0
def forward(model, x_data, ratio=0.1, train=True):
    """
	@summary: フィードフォワードを行う
	"""

    x = chainer.Variable(x_data, volatile=False)
    h = F.max_pooling_2d(F.relu(model.conv1(x)), 3, stride=2)
    h = F.average_pooling_2d(F.relu(model.conv2(h)), 3, stride=2)
    h = F.average_pooling_2d(F.relu(model.conv3(h)), 3, stride=2)
    h = F.dropout(F.relu(model.fl5(h)), train=train, ratio=ratio)
    y = model.fl6(h)

    return y
Пример #17
0
    def forward(self, x_data, y_data, train=True):
        x = chainer.Variable(x_data, volatile=not train)
        t = chainer.Variable(y_data, volatile=not train)

        h = F.relu(self.conv1(x))
        h = F.local_response_normalization(
            F.max_pooling_2d(h, 3, stride=2), n=5)

        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(
            F.local_response_normalization(h, n=5), 3, stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        if train:
            loss1 = F.average_pooling_2d(h, 5, stride=3)
            loss1 = F.relu(self.loss1_conv(loss1))
            loss1 = F.relu(self.loss1_fc1(loss1))
            loss1 = self.loss1_fc2(loss1)
            loss1 = F.softmax_cross_entropy(loss1, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        if train:
            loss2 = F.average_pooling_2d(h, 5, stride=3)
            loss2 = F.relu(self.loss2_conv(loss2))
            loss2 = F.relu(self.loss2_fc1(loss2))
            loss2 = self.loss2_fc2(loss2)
            loss2 = F.softmax_cross_entropy(loss2, t)

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        h = F.dropout(F.average_pooling_2d(h, 7, stride=1), 0.4, train=train)
        h = self.loss3_fc(h)
        loss3 = F.softmax_cross_entropy(h, t)

        if train:
            loss = 0.3 * (loss1 + loss2) + loss3
        else:
            loss = loss3
        accuracy = F.accuracy(h, t)
        return loss, accuracy
Пример #18
0
    def __call__(self, x, t):
        h = F.relu(self.conv1(x))
        h = F.local_response_normalization(
            F.max_pooling_2d(h, 3, stride=2), n=5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(
            F.local_response_normalization(h, n=5), 3, stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss1_conv(l))
        l = F.relu(self.loss1_fc1(l))
        l = self.loss1_fc2(l)
        loss1 = F.softmax_cross_entropy(l, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss2_conv(l))
        l = F.relu(self.loss2_fc1(l))
        l = self.loss2_fc2(l)
        loss2 = F.softmax_cross_entropy(l, t)

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.loss3_fc(F.dropout(h, 0.4))
        loss3 = F.softmax_cross_entropy(h, t)

        loss = 0.3 * (loss1 + loss2) + loss3
        accuracy = F.accuracy(h, t)

        chainer.report({
            'loss': loss,
            'loss1': loss1,
            'loss2': loss2,
            'loss3': loss3,
            'accuracy': accuracy
        }, self)
        return loss
Пример #19
0
    def __call__(self, x, t):
        h = F.relu(self['conv1/7x7_s2'](x))
        h = F.local_response_normalization(
            F.max_pooling_2d(h, 3, stride=2), n=5, alpha=(1e-4)/5, k=1)
        h = F.relu(self['conv2/3x3_reduce'](h))
        h = F.relu(self['conv2/3x3'](h))
        h = F.max_pooling_2d(F.local_response_normalization(
            h, n=5, alpha=(1e-4)/5, k=1), 3, stride=2)

        h = self.call_inception(h, 'inception_3a')
        h = self.call_inception(h, 'inception_3b')
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.call_inception(h, 'inception_4a')

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self['loss1/conv'](l))
        l = F.dropout(F.relu(self['loss1/fc'](l)), 0.7, train=self.train)
        l = self['loss1/classifier'](l)
        loss1 = F.softmax_cross_entropy(l, t)

        h = self.call_inception(h, 'inception_4b')
        h = self.call_inception(h, 'inception_4c')
        h = self.call_inception(h, 'inception_4d')

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self['loss2/conv'](l))
        l = F.dropout(F.relu(self['loss2/fc'](l)), 0.7, train=self.train)
        l = self['loss2/classifier'](l)
        loss2 = F.softmax_cross_entropy(l, t)

        h = self.call_inception(h, 'inception_4e')
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.call_inception(h, 'inception_5a')
        h = self.call_inception(h, 'inception_5b')

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self['loss3/classifier'](F.dropout(h, 0.4, train=self.train))
        loss3 = F.softmax_cross_entropy(h, t)

        loss = 0.3 * (loss1 + loss2) + loss3
        accuracy = F.accuracy(h, t)
        chainer.report({
            'loss': loss,
            'loss1': loss1,
            'loss2': loss2,
            'loss3': loss3,
            'accuracy': accuracy
        }, self)
        return loss
Пример #20
0
    def __call__(self, x, t):
        test = not self.train
        finetune = self.finetune

        h = self.call_conv_bn_sc(x, 'conv1/7x7_s2', test=test, finetune=finetune)
        h = F.max_pooling_2d(h, 3, stride=2, pad=1)
        h = self.call_conv_bn_sc(h, 'conv2/3x3_reduce', test=test, finetune=finetune)
        h = self.call_conv_bn_sc(h, 'conv2/3x3', test=test)
        h = F.max_pooling_2d(h, 3, stride=2, pad=1)
        h = self.call_inception_bn(h, 'inception_3a', test=test, finetune=finetune)
        h = self.call_inception_bn(h, 'inception_3b', test=test, finetune=finetune)
        h = self.call_inception_bn(h, 'inception_3c', test=test, finetune=finetune)

        a = F.average_pooling_2d(h, 5, stride=3)
        a = self.call_conv_bn_sc(a, 'loss1/conv', test=test, finetune=finetune)
        a = self.call_fc_bn_sc(a, 'loss1/fc', test=test, finetune=finetune)
        a = self['loss1/classifier'](a)
        loss1 = F.softmax_cross_entropy(a, t)

        h = self.call_inception_bn(h, 'inception_4a', test=test, finetune=finetune)
        h = self.call_inception_bn(h, 'inception_4b', test=test, finetune=finetune)
        h = self.call_inception_bn(h, 'inception_4c', test=test, finetune=finetune)
        h = self.call_inception_bn(h, 'inception_4d', test=test, finetune=finetune)
        h = self.call_inception_bn(h, 'inception_4e', test=test, finetune=finetune)

        b = F.average_pooling_2d(h, 5, stride=3)
        b = self.call_conv_bn_sc(b, 'loss2/conv', test=test, finetune=finetune)
        b = self.call_fc_bn_sc(b, 'loss2/fc', test=test, finetune=finetune)
        b = self['loss2/classifier'](b)
        loss2 = F.softmax_cross_entropy(b, t)

        h = self.call_inception_bn(h, 'inception_5a', test=test, finetune=finetune)
        h = self.call_inception_bn(h, 'inception_5b', test=test, finetune=finetune)

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self['loss3/classifier'](h)
        loss3 = F.softmax_cross_entropy(h, t)

        loss = 0.3 * (loss1 + loss2) + loss3
        accuracy = F.accuracy(h, t)
        chainer.report({
            'loss': loss,
            'loss1': loss1,
            'loss2': loss2,
            'loss3': loss3,
            'accuracy': accuracy
        }, self)
        return loss
Пример #21
0
	def forward_one_step(self, x, test):
		f = activations[self.activation_function]
		chain = [x]

		# Hidden convolutinal layers
		for i in range(self.n_hidden_layers):
			u = getattr(self, "layer_%i" % i)(chain[-1])
			if self.apply_batchnorm:
				if i == 0 and self.apply_batchnorm_to_input is False:
					pass
				else:
					u = getattr(self, "batchnorm_%i" % i)(u, test=test)
			chain.append(f(u))

		if self.projection_type == "fully_connection":
			u = self.projection_layer(chain[-1])
			if self.apply_batchnorm:
				u = self.projection_batchnorm(u, test=test)
			chain.append(f(u))

		elif self.projection_type == "global_average_pooling":
			batch_size = chain[-1].data.shape[0]
			n_maps = chain[-1].data[0].shape[0]
			chain.append(F.average_pooling_2d(chain[-1], self.top_filter_size))
			chain.append(F.reshape(chain[-1], (batch_size, n_maps)))
			u = self.projection_layer(chain[-1])
			if self.apply_batchnorm:
				u = self.projection_batchnorm(u, test=test)
			chain.append(f(u))

		else:
			raise NotImplementedError()

		return chain[-1]
Пример #22
0
    def __call__(self, x, test=False):
        # add gaussian noise
        #xp = cuda.get_array_module(x.data)
        #with cuda.get_device_from_id(self.device):
        #    noise = xp.random.randn(*x.shape) * 0.0015
        #    x.data += noise

        h = self.conv00(x, test)
        h = self.conv01(h, test)
        h = self.conv02(h, test)
        h = F.max_pooling_2d(h, (2, 2))  # 32 -> 16
        h = self.bn0(h, test)
        h = F.dropout(h, train=not test)
    
        h = self.conv10(h, test)
        h = self.conv11(h, test)
        h = self.conv12(h, test)
        h = F.max_pooling_2d(h, (2, 2))  # 16 -> 8
        h = self.bn1(h, test)
        h = F.dropout(h, train=not test)
    
        h = self.conv20(h, test)  # 8 -> 6
        h = self.conv21(h, test)
        h = self.conv22(h, test)
        h = self.conv23(h, test)
        h = F.average_pooling_2d(h, (6, 6))  # 6 -> 1
        h = self.bn2(h, test)
        h = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
        
        return h
Пример #23
0
    def check_backward_consistency_regression(self, x_data, gy_data,
                                              use_cudnn='always'):
        # Regression test to two-dimensional average pooling layer.

        if len(self.dims) != 2:
            return

        ksize = self.ksize
        stride = self.stride
        pad = self.pad
        xp = cuda.get_array_module(x_data)

        # Backward computation for N-dimensional average pooling layer.
        x_nd = chainer.Variable(xp.array(x_data))
        with chainer.using_config('use_cudnn', use_cudnn):
            y_nd = functions.average_pooling_nd(
                x_nd, ksize, stride=stride, pad=pad)

        y_nd.grad = gy_data
        y_nd.backward()

        # Backward computation for two-dimensional average pooling layer.
        x_2d = chainer.Variable(xp.array(x_data))
        with chainer.using_config('use_cudnn', use_cudnn):
            y_2d = functions.average_pooling_2d(
                x_2d, ksize, stride=stride, pad=pad)

        y_2d.grad = gy_data
        y_2d.backward()

        # Test that the two result gradients are close enough.
        testing.assert_allclose(x_nd.grad, x_2d.grad)
Пример #24
0
 def __call__(self, x, test=False):
     h = self.convunit0(x, test)
     h = self.convunit1(h, test)
     h = F.average_pooling_2d(h, (7, 7))
     h = self.linear(h)
     #h = F.sigmoid(h)
     return h
Пример #25
0
 def __call__(self, x):
     h = x
     h = self.__dict__["P1_1"](F.leaky_relu(h))
     h = self.__dict__["BN1_1"](h)
     h = self.__dict__["P1_2"](F.leaky_relu(h))
     h = self.__dict__["BN1_2"](h)
     h = F.max_pooling_2d(F.leaky_relu(h), ksize=3, stride=2, cover_all=False)
     h = self.__dict__["P2_1"](h)
     h = self.__dict__["BN2_1"](h)
     h = self.__dict__["P2_2"](F.leaky_relu(h))
     h = self.__dict__["BN2_2"](h)
     h = self.__dict__["P2_2"](F.leaky_relu(h))
     h = self.__dict__["BN2_3"](h)
     h = F.max_pooling_2d(F.leaky_relu(h), ksize=3, stride=2, cover_all=False)
     h = self.__dict__["P3_1"](h)
     h = self.__dict__["BN3_1"](h)
     h = self.__dict__["P3_2"](F.leaky_relu(h))
     h = self.__dict__["BN3_2"](h)
     h = self.__dict__["P3_3"](F.leaky_relu(h))
     #h = self.__dict__["BN3_3"](h)
     h = F.average_pooling_2d(F.leaky_relu(h), ksize=6)
     #h = self.__dict__["BN3_3"](h)
     h = self.__dict__["L1"](F.leaky_relu(h))
     h = self.__dict__["L2"](h)
     y = h
     #h = F.spatial_pyramid_pooling_2d(F.leaky_relu(h), 3)
     #y = F.reshape(h,(len(h.data),self.F_unit))
     return y
def forward(x_data, y_data, print_conf_matrix=False):
    '''
    Neural net architecture
    :param x_data:
    :param y_data:
    :param train:
    :return:
    '''
    x, t = Variable(x_data), Variable(y_data)

    h1 = F.relu(model.l1(x))
    h1 = F.max_pooling_2d(h1,max_pool_window_1,stride=max_pool_stride_1)

    h2 = F.dropout(F.relu(model.l2(h1)))
    h2 = F.average_pooling_2d(h2, avg_pool_window_2, stride=avg_pool_stride_2)
    h2 = F.max_pooling_2d(h2,max_pool_window_2,stride=max_pool_stride_2)

    y = model.l3(h2)

    # display confusion matrix
    if print_conf_matrix:
        pdb.set_trace()
        print confusion_matrix(cuda.to_cpu(t.data), cuda.to_cpu(y.data).argmax(axis=1))

    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
def forward_single(x_data, _size, train=False):
    datum = x_data[0].transpose([1, 2, 0]) / 255.0
    datum = datum.transpose([2, 0, 1])
    c, h, w = datum.shape
    datum = datum.reshape([1, c, h, w])
    x = Variable(datum)

    
    h = model.conv1(x)
    h = model.norm1(h)
    h = F.relu(h)
    h = F.max_pooling_2d(h, 3, stride=2)

    h = model.conv2(h)
    h = model.norm2(h)
    h = F.relu(h)
    h = F.max_pooling_2d(h, 3, stride=2)

    h = model.conv3(h)
    h = model.norm3(h)
    h = F.relu(h)
    h = F.average_pooling_2d(h, 3, stride=2)
    
    h = model.conv4(h)

    h = F.softmax(h)
    y = h.data

    """ positive 領域 """
    fmap = resize(y[0][1], _size).astype(np.float32)
    return fmap
Пример #28
0
    def __call__(self, x, test=False):
        # add gaussian noise
        xp = cuda.get_array_module(x.data)
        with cuda.get_device(self.device):
            noise = xp.random.randn(*x.shape) * 0.15
            x.data += noise
        
        # (conv -> act -> bn) x 3 -> maxpool -> dropout
        h = self.bn_conv0(self.act(self.conv0(x), 0.1), test)
        h = self.bn_conv1(self.act(self.conv1(h), 0.1), test)
        h = self.bn_conv2(self.act(self.conv2(h), 0.1), test)
        h = F.max_pooling_2d(h, (2, 2))  # 32 -> 16
        h = F.dropout(h, 0.5, not test)
        
        # (conv -> act -> bn) x 3 -> maxpool -> dropout
        h = self.bn_conv3(self.act(self.conv3(h), 0.1), test)
        h = self.bn_conv4(self.act(self.conv4(h), 0.1), test)
        h = self.bn_conv5(self.act(self.conv5(h), 0.1), test)
        h = F.max_pooling_2d(h, (2, 2))  # 16 -> 8
        h = F.dropout(h, 0.5, not test)
        
        # conv -> act -> bn -> (nin -> act -> bn) x 2
        h = self.bn_conv6(self.act(self.conv6(h), 0.1), test) # 8 -> 6
        h = self.bn_conv7(self.act(self.conv7(h), 0.1), test)
        h = self.bn_conv8(self.act(self.conv8(h), 0.1), test)

        h = F.average_pooling_2d(h, (6, 6))
        h = self.linear(h)
        
        return h
    def compute(s):

        datum = x_data[0].transpose([1, 2, 0]) / 255.0
        datum = rescale(datum, s).astype(np.float32)
        datum = datum.transpose([2, 0, 1])
        
        c, h, w = datum.shape
        datum = datum.reshape([1, c, h, w])

        x = Variable(datum)

        h = model.conv1(x)
        h = model.norm1(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, 3, stride=2)

        h = model.conv2(h)
        h = model.norm2(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, 3, stride=2)

        h = model.conv3(h)
        h = model.norm3(h)
        h = F.relu(h)
        h = F.average_pooling_2d(h, 3, stride=2)
    
        h = model.conv4(h)
    
        h = F.softmax(h)
        y = h.data

        """ positive 領域 """
        fmap = resize(y[0][1], _size).astype(np.float32)
        global_output.append(fmap)
Пример #30
0
    def check_backward_consistency_regression(
            self, x_data, gy_data, backend_config):
        # Regression test to two-dimensional average pooling layer.
        ksize = self.ksize
        stride = self.stride
        pad = self.pad
        pad_value = self.pad_value

        # Backward computation for N-dimensional average pooling layer.
        x_nd = chainer.Variable(x_data)
        with backend_config:
            y_nd = functions.average_pooling_nd(
                x_nd, ksize, stride=stride, pad=pad, pad_value=pad_value)
        y_nd.grad = gy_data
        y_nd.backward()

        # Backward computation for two-dimensional average pooling layer.
        x_2d = chainer.Variable(x_data)
        with backend_config:
            y_2d = functions.average_pooling_2d(
                x_2d, ksize, stride=stride, pad=pad)
        y_2d.grad = gy_data
        y_2d.backward()

        # Test that the two result gradients are close enough.
        testing.assert_allclose(x_nd.grad, x_2d.grad, **self.tolerance)
Пример #31
0
    def __call__(self, x, t):
        self.clear()
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.res2(h, self.train)
        h = self.res3(h, self.train)
        h = self.res4(h, self.train)
        h = self.res5(h, self.train)
        h = F.average_pooling_2d(h, 7, stride=1)
        if t == "feature":
            return h
        h = self.fc(h)

        if self.train:
            self.loss = F.softmax_cross_entropy(h, t)
            self.accuracy = F.accuracy(h, t)
            return self.loss
        else:
            return h
Пример #32
0
    def __call__(self, x):
        h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x))), slope=0.1)
        h = F.max_pooling_2d(h, ksize=3, stride=2, pad=0)

        h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h))), slope=0.1)
        h = F.max_pooling_2d(h, ksize=3, stride=2, pad=0)

        h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h))), slope=0.1)
        h = F.max_pooling_2d(h, ksize=3, stride=2, pad=0)

        h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h))), slope=0.1)
        h = F.max_pooling_2d(h, ksize=3, stride=2, pad=0)

        h = self.conv5(h)
        h = F.average_pooling_2d(h, ksize=(h.shape[-2], h.shape[-1]))
        h = self.conv6(h)
        # h = F.dropout(h, ratio=0.4)
        # h = self.out(h)
        return h.reshape((h.shape[0], -1))
Пример #33
0
    def check_forward(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.average_pooling_2d(x,
                                         3,
                                         stride=2,
                                         pad=1,
                                         use_cudnn=use_cudnn)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        self.assertEqual(self.gy.shape, y_data.shape)
        for k in six.moves.range(2):
            for c in six.moves.range(3):
                x = self.x[k, c]
                expect = numpy.array([[x[0:2, 0:2].sum(), x[0:2, 1:3].sum()],
                                      [x[1:4, 0:2].sum(), x[1:4, 1:3].sum()]
                                      ]) / 9
                testing.assert_allclose(expect, y_data[k, c],
                                        **self.check_forward_options)
Пример #34
0
    def __call__(self, x):
        """
        Function that computes forward

        Parametors
        ----------------
        x: Variable
           input image data. this shape is (N, C, H, W)
        """
        h = F.leaky_relu(self.c0(x))
        h = F.leaky_relu(self.bn1(self.c1(h)))
        h = F.leaky_relu(self.bn2(self.c2(h)))
        h = F.leaky_relu(self.bn3(self.c3(h)))
        # mimcing global max pooling with average_pooling_2d
        h = self.c4(h)
        logits = F.average_pooling_2d(h, ksize=(4, 4))
        logits = F.squeeze(logits, axis=(1, 2, 3))  # scalarにするため.必要なし?

        return logits
Пример #35
0
    def first_forward(self, x, num_lm):
        self.rnn_1(Variable(
            xp.zeros((num_lm, self.n_unit)).astype(xp.float32)))
        h2 = F.relu(
            self.l_norm_cc1(
                self.context_cnn_1(F.average_pooling_2d(x, 4, stride=4))))
        h3 = F.relu(
            self.l_norm_cc2(
                self.context_cnn_2(F.max_pooling_2d(h2, 2, stride=2))))
        h4 = F.relu(
            self.l_norm_cc3(
                self.context_cnn_3(F.max_pooling_2d(h3, 2, stride=2))))
        h4r = F.relu(self.context_full(h4))
        h5 = F.relu(self.rnn_2(h4r))

        l = F.sigmoid(self.attention_loc(h5))
        s = F.sigmoid(self.attention_scale(h5))
        b = F.sigmoid(self.baseline(Variable(h5.data)))
        return l, s, b
Пример #36
0
    def __call__(self, x):
        ha = F.average_pooling_2d(x, ksize=3, stride=1, pad=1)
        ha = self.conv_a(ha)

        hb = self.conv_b(x)

        hc = self.conv_c1(x)
        hc = self.conv_c2(hc)
        hc = self.conv_c3(hc)

        hd = self.conv_d1(x)
        hd = self.conv_d2(hd)
        hd = self.conv_d3(hd)
        hd = self.conv_d4(hd)
        hd = self.conv_d5(hd)

        h = F.concat((ha, hb, hc, hd), axis=1)
        # output dimensions: size=(17, 17), channels=base_filter_num*32
        return h
Пример #37
0
 def forward(self, x):
     h = self.conv_bn(x)
     h = self.conv_ds_2(h)
     h = self.conv_ds_3(h)
     h = self.conv_ds_4(h)
     h = self.conv_ds_5(h)
     h = self.conv_ds_6(h)
     h = self.conv_ds_7(h)
     h = self.conv_ds_8(h)
     h = self.conv_ds_9(h)
     h = self.conv_ds_10(h)
     h = self.conv_ds_11(h)
     h = self.conv_ds_12(h)
     h = self.conv_ds_13(h)
     h = self.conv_ds_14(h)
     h = F.average_pooling_2d(h, 7, stride=1)
     h = self.fc(h)
     #h = F.softmax(h)
     return h
Пример #38
0
 def __call__(self, x):
     h = self.conv_bn(x)
     h = self.conv_ds_2(h)
     h = self.conv_ds_3(h)
     h = self.conv_ds_4(h)
     h = self.conv_ds_5(h)
     h = self.conv_ds_6(h)
     h = self.conv_ds_7(h)
     h = self.conv_ds_8(h)
     h = self.conv_ds_9(h)
     h = self.conv_ds_10(h)
     h = self.conv_ds_11(h)
     h = self.conv_ds_12(h)
     h = self.conv_ds_13(h)
     h = self.conv_ds_14(h)
     h = F.average_pooling_2d(h, 7, stride=1)
     # x = F.average(x, axis=(2, 3),keepdims=True)
     h = self.fc7(h)
     return h
Пример #39
0
    def __call__(self, x):
        h = self.bn1(self.conv1(x))
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.res2(h)
        h = self.res3(h)
        h = self.res4(h)
        if hasattr(self, 'res5'):
            h = self.res5(h)
        if hasattr(self, 'res6'):
            h = self.res6(h)
        if hasattr(self, 'res7'):
            h = self.res7(h)
        if self.class_labels is not None:
            _, _, height, width = h.shape
            h = F.average_pooling_2d(h, (height, width), stride=1)
        if self.class_labels is not None:
            h = self.fc(h)

        return h
Пример #40
0
    def __call__(self, x, t, train=True, finetune=False):

        h = x

        # First conv layer
        h = self[0](h)

        # Residual blocks
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # BN, relu, pool, final layer
        h = self[-2](h)
        h = F.relu(h)
        h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
        h = self[-1](h)
        h = F.reshape(h, h.data.shape[:2])

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
def rasterize(faces,
              textures,
              image_size=256,
              anti_aliasing=True,
              near=0.1,
              far=100,
              eps=1e-3,
              background_color=(0, 0, 0)):
    if anti_aliasing:
        images = Rasterize(image_size * 2, near, far, eps,
                           background_color)(faces, textures)
        images = images.transpose((0, 3, 1, 2))
        images = cf.average_pooling_2d(images, 2, 2)
    else:
        images = Rasterize(image_size, near, far, eps,
                           background_color)(faces, textures)
        images = images.transpose((0, 3, 1, 2))
    images = images[:, :, ::-1, :]
    return images
Пример #42
0
    def forward(self, x, t=None):
        h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
        h = self.mlpconv4(F.dropout(h))
        h = F.reshape(F.average_pooling_2d(h, 6), (len(x), self.n_class))

        self.pred = F.softmax(h)

        if t is None:
            assert not chainer.config.train
            return

        self.loss = F.softmax_cross_entropy(h, t)
        self.acc = F.accuracy(self.pred, t)

        chainer.report({'loss': self.loss, 'accuracy': self.acc}, self)

        return self.loss
    def compute_policy(self, h):
        h = F.average_pooling_2d(h, 3)
        h = h.reshape((len(h), 1, self.L_stages+12))
        h = self.f(self.dbn1(self.dc1(h)))
        h = self.f(self.dbn2(self.dc2(h)))
        h = self.f(self.dbn3(self.dc3(h)))
        h = self.f(self.dbn4(self.dc4(h)))
        h = self.f(self.dbn5(self.dc5(h)))
        h = self.dc6(h)

        probs = []
        acts = []

        for i in range(self.action_size):
            p = SoftmaxDistribution(h[:, i, :])
            a = p.sample()
            probs.append(p)
            acts.append(a)

        return probs, acts
Пример #44
0
    def __call__(self, x_high, x_low):
        x_high = self.conv_high(x_high)
        x_low = self.conv_low(x_low)

        w_high = F.average_pooling_2d(x_high, ksize=x_high.shape[2:])
        w_high = self.conv_w_high(w_high)
        w_high = self.relu(w_high)
        w_high = self.sigmoid(w_high)

        w_low = x_low.mean(axis=1, keepdims=True)
        w_low = self.conv_w_low(w_low)
        w_low = self.sigmoid(w_low)

        x_high = self.up(x_high)

        x_high = x_high * w_low
        x_low = x_low * w_high

        out = x_high + x_low
        return out
Пример #45
0
    def __call__(self, x):

        h = self.conv1(x)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.conv1_reduce(h)

        h = self.conv2(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.conv2_reduce(h)

        h = self.conv3(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.conv3_reduce(h)

        h = self.conv4(h)
        h = F.average_pooling_2d(h, 3, stride=1)

        h = self.fc1(h)

        return h
Пример #46
0
    def check_forward_consistency_regression(self, x_data, use_cudnn='always'):
        # Regression test to average_pooling_2d.

        if len(self.dims) != 2:
            return

        ksize = self.ksize
        stride = self.stride
        pad = self.pad

        with chainer.using_config('use_cudnn', use_cudnn):
            y_nd = functions.average_pooling_nd(x_data,
                                                ksize,
                                                stride=stride,
                                                pad=pad)
            y_2d = functions.average_pooling_2d(x_data,
                                                ksize,
                                                stride=stride,
                                                pad=pad)
        testing.assert_allclose(y_nd.data, y_2d.data)
    def foward(self, x):
        out = self.model.conv1(x)
        out = F.elu(out)
        out = self.model.conv2(out)

        out = F.max_pooling_2d(out, 2)
        out = F.elu(out)
        out = self.model.conv3(out)
        out = F.elu(out)
        out = self.model.conv4(out)
        out = F.elu(out)

        out = F.average_pooling_2d(out, 6)
        out = F.dropout(out)
        out = self.model.linear1(out)
        out = F.elu(out)
        out = F.dropout(out)
        out = self.model.linear2(out)

        return out
    def check_forward(self, inputs, backend_config):
        # TODO(niboshi): Support it
        if backend_config.use_chainerx and self.dtype == numpy.float16:
            raise unittest.SkipTest('ChainerX does not support float16')

        y_expect, = self.forward_cpu(inputs)

        inputs = backend_config.get_array(inputs)
        if not self.c_contiguous:
            with backend_config:
                inputs = _to_fcontiguous(inputs)

        with backend_config:
            x, = inputs
            y = functions.average_pooling_2d(x, 3, stride=2, pad=1)
        assert y.data.dtype == self.dtype
        y_data = cuda.to_cpu(y.data)

        assert self.output_shape == y_data.shape
        testing.assert_allclose(y_expect, y_data, **self.check_forward_options)
Пример #49
0
    def __call__(self, x):
        xp = chainer.cuda.get_array_module(x.data)
        sh, sw = self.conv1.stride
        c_out, c_in, kh, kw = self.conv1.W.data.shape
        b, c, hh, ww = x.data.shape
        if sh == 1 and sw == 1:
            shape_out = (b, c_out, hh, ww)
        else:
            hh = (hh + 2 - kh) // sh + 1
            ww = (ww + 2 - kw) // sw + 1
            shape_out = (b, c_out, hh, ww)
        h = x
        if x.data.shape[2:] != shape_out[2:]:
            x = F.average_pooling_2d(x, 1, 2)
        if x.data.shape[1] != c_out:
            n, c, hh, ww = x.data.shape
            pad_c = c_out - c
            p = xp.zeros((n, pad_c, hh, ww), dtype=xp.float32)
            p = chainer.Variable(p)
            x = F.concat((x, p), axis=1)

        h = self.bn1(h)
        h = self.conv1(h)
        h = self.bn2(h)
        if self.activation is not None:
            h = self.activation(h)
        h = self.conv2(h)
        h = self.bn3(h)

        # 再掲:skip==True -> shakeする
        if not chainer.config.train:
            skip = False
            scale = (1 - self.skip_ratio) + self.expect_alpha * self.skip_ratio
            return h * scale + x
        else:
            skip = np.random.rand() < self.skip_ratio

        if skip:
            return shake_noise_multiplier(h, self.alpha, self.beta) + x
        else:
            return h + x
Пример #50
0
    def __call__(self, x, t):
        h = F.max_pooling_2d(F.leaky_relu(self.conv1(x)), 2, 2)
        h = F.max_pooling_2d(F.leaky_relu(self.conv2(h)), 2, 2)
        h = F.leaky_relu(self.conv3(h))
        h = F.leaky_relu(self.conv4(h))
        h = F.leaky_relu(self.conv5(h))
        h = F.max_pooling_2d(F.leaky_relu(self.conv6(h)), 2, 2)

        h = F.leaky_relu(self.conv7(h))
        h = F.leaky_relu(self.conv8(h))
        h = F.leaky_relu(self.conv9(h))
        h = F.leaky_relu(self.conv10(h))
        h = F.leaky_relu(self.conv11(h))
        h = F.leaky_relu(self.conv12(h))
        h = F.leaky_relu(self.conv13(h))
        h = F.leaky_relu(self.conv14(h))
        h = F.leaky_relu(self.conv15(h))
        h = F.max_pooling_2d(F.leaky_relu(self.conv16(h)), 2, 2)

        h = F.leaky_relu(self.conv17(h))
        h = F.leaky_relu(self.conv18(h))
        h = F.leaky_relu(self.conv19(h))

        if self.pre_train:
            h = F.average_pooling_2d(h, 2, 2)
            h = self.fc_pre(h)
            self.loss = F.softmax_cross_entropy(h, t)
            self.accuracy = F.accuracy(h, t)
            return self.loss
        else:
            h = F.leaky_relu(self.conv20(h))
            h = F.leaky_relu(self.conv21(h))
            h = F.leaky_relu(self.conv22(h))
            h = F.leaky_relu(self.conv23(h))
            h = F.leaky_relu(self.conv24(h))
            self.h = h
            h = F.leaky_relu(self.fc25(h))
            h = F.relu(self.fc26(h))
            #self.loss = self.loss_func(h, t)
            #self.accuracy = self.loss
            self.img = (x, h)
    def __init__(self, n_layers, n_out, stride=(1, 2, 2), layer_names=None):
        super().__init__()
        kwargs = {'initialW': normal.HeNormal(scale=1.0)}
        if (n_layers - 2) % 3 == 0:
            block = [(n_layers - 2) // 9] * 3
        else:
            raise ValueError(
                'The n_layers argument should be mod({} - 2, 3) == 0,  \
                 but {} was given.'.format(n_layers, n_layers))

        with self.init_scope():
            self.conv1 = L.Convolution2D(3, 16, 3, 1, 1, **kwargs)
            self.res2 = PreBuildingBlock(block[0], 16, 16, 64, stride[0],
                                         **kwargs)
            self.res3 = PreBuildingBlock(block[1], 64, 32, 128, stride[1],
                                         **kwargs)
            self.res4 = PreBuildingBlock(block[2], 128, 64, 256, stride[2],
                                         **kwargs)
            self.bn4 = L.BatchNormalization(256)
            self.fc5 = L.Linear(None, n_out)

        self.functions = collections.OrderedDict([
            ('conv1', [self.conv1]),
            ('res2', [self.res2]),
            ('res3', [self.res3]),
            ('res4', [self.res4]),
            ('pool4', [self.bn4, F.relu,
                       lambda x: F.average_pooling_2d(x, 8, stride=1)]),
            ('fc5', [self.fc5]),
        ])

        if layer_names is None:
            layer_names = list(self.functions.keys())[-1]
        if (not isinstance(layer_names, str) and
                all([isinstance(name, str) for name in layer_names])):
            return_tuple = True
        else:
            return_tuple = False
            layer_names = [layer_names]
        self._return_tuple = return_tuple
        self._layer_names = layer_names
Пример #52
0
    def __call__(self, images, localizations):
        points = F.spatial_transformer_grid(localizations, self.target_shape)
        rois = F.spatial_transformer_sampler(images, points)

        h = F.relu(self.bn0(self.conv0(rois)))
        if self.use_dropout:
            h = F.dropout(h, ratio=self.dropout_ratio)
        h = F.relu(self.bn1(self.conv1(h)))
        if self.use_dropout:
            h = F.dropout(h, ratio=self.dropout_ratio)

        h = self.rs1(h)
        h = self.rs2(h)
        h = F.max_pooling_2d(h, 2, stride=2)
        h = self.rs3(h)
        self.vis_anchor = h

        h = F.average_pooling_2d(h, 5, stride=1)

        if self.uses_original_data:
            # merge data of all 4 individual images in channel dimension
            batch_size, num_channels, height, width = h.shape
            h = F.reshape(h,
                          (batch_size // 4, 4 * num_channels, height, width))

        h = F.relu(self.fc1(h))

        # for each timestep of the localization net do the 'classification'
        h = F.reshape(h, (self.num_timesteps, -1, self.fc1.out_size))
        overall_predictions = []
        for timestep in F.separate(h, axis=0):
            # go 2x num_labels plus 1 timesteps because of ctc loss
            lstm_predictions = []
            self.lstm.reset_state()
            for _ in range(self.num_labels * 2 + 1):
                lstm_prediction = self.lstm(timestep)
                classified = self.classifier(lstm_prediction)
                lstm_predictions.append(classified)
            overall_predictions.append(lstm_predictions)

        return overall_predictions, rois, points
Пример #53
0
    def check_forward_consistency_regression(self, x_data, use_cudnn=True):
        # Regression test to average_pooling_2d.

        if len(self.dims) != 2:
            return

        ksize = self.ksize
        stride = self.stride
        pad = self.pad

        y_nd = functions.average_pooling_nd(x_data,
                                            ksize,
                                            stride=stride,
                                            pad=pad,
                                            use_cudnn=use_cudnn)
        y_2d = functions.average_pooling_2d(x_data,
                                            ksize,
                                            stride=stride,
                                            pad=pad,
                                            use_cudnn=use_cudnn)
        testing.assert_allclose(y_nd.data, y_2d.data)
Пример #54
0
    def predict(self, x_data):
        x = chainer.Variable(x_data, volatile=True)

        h = F.relu(self.conv1(x))
        h = F.relu(self.conv1a(h))
        h = F.relu(self.conv1b(h))
        h = F.max_pooling_2d(h, 3, stride=2)
        h = F.relu(self.conv2(h))
        h = F.relu(self.conv2a(h))
        h = F.relu(self.conv2b(h))
        h = F.max_pooling_2d(h, 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv3a(h))
        h = F.relu(self.conv3b(h))
        h = F.max_pooling_2d(h, 3, stride=2)
        h = F.dropout(h, train=False)
        h = F.relu(self.conv4(h))
        h = F.relu(self.conv4a(h))
        h = F.relu(self.conv4b(h))
        h = F.reshape(F.average_pooling_2d(h, 6), (x_data.shape[0], 1000))
        return F.softmax(h)
Пример #55
0
    def __call__(self, x):
        h = chainer.Variable(x)

        h = self.conv1(h)
        self.conv1.out_size = h.shape[-2:]
        h = self.block3(h)

        h = self.trans_conv1(h)
        self.trans_conv1.out_size = h.shape[-2:]
        h = self.block4(h)

        h = self.trans_conv2(h)
        self.trans_conv2.out_size = h.shape[-2:]
        h = self.block5(h)

        h = F.relu(self.bn5(h))
        h = F.average_pooling_2d(h, h.shape[2:])

        h = self.fc7(h)

        return h
Пример #56
0
 def __call__(self, x, t=None):
     h = F.relu(self.bn1(self.conv1(x)))
     h = self.conv2(F.relu(self.bn2(h)))
     h = F.max_pooling_2d(h, 2)
     h = F.dropout(h, 0.25)
     h = self.conv3(F.relu(self.bn3(h)))
     h = F.max_pooling_2d(h, 2)
     h = F.dropout(h, 0.25)
     h = self.conv4(F.relu(self.bn4(h)))
     h = F.max_pooling_2d(h, 2)
     h = F.dropout(h, 0.25)
     h = self.conv5(F.relu(self.bn5(h)))
     h = F.average_pooling_2d(h, 4)
     h = self.fc(h)
     if t is not None:
         self.loss = F.softmax_cross_entropy(h, t)
         self.accuracy = F.accuracy(h, t)
         return self.loss
     else:
         self.pred = F.softmax(h)
         return self.pred
Пример #57
0
    def __call__(self, x):
        h = self.conv1(self.bn1(x))
        h = self.conv2(F.relu(self.bn2(h)))
        if self.stride == 2:
            h = self.conv3(pgp(F.relu(self.bn3(h)), 2))
        else:
            h = self.conv3(F.relu(self.bn3(h)))

        h = self.bn4(h)
        batch, h_channel, h_height, h_width = h.shape
        del h_channel
        with chainer.cuda.get_device_from_array(x.data):
            zero = chainer.Variable(
                self.xp.zeros((batch, self.zero_ch, h_height, h_width),
                              dtype=self.xp.float32))
        if self.stride == 2:
            h0 = F.concat(
                (pgp(F.average_pooling_2d(x, 2, 1, 1)[:, :, 1:, 1:], 2), zero))
            return h + h0
        else:
            return h + F.concat((x, zero))
Пример #58
0
    def forward(self, x, t):
        h = self.bn1(self.conv1(x))
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.res2(h)
        h = self.res3(h)
        h = self.res4(h)
        h = self.res5(h)
        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.fc(h)

        #loss = F.softmax_cross_entropy(h, t)
        loss = self.softmax_cross_entropy(h, t)
        if self.compute_accuracy:
            chainer.report(
                {
                    'loss': loss,
                    'accuracy': F.accuracy(h, np.argmax(t, axis=1))
                }, self)
        else:
            chainer.report({'loss': loss}, self)
        return loss
Пример #59
0
    def __call__(self, x):
        """Feed-Forward calculation."""
        bs = len(x)
        h = F.relu(self.bn1(self.conv1(x)))

        h = self.block1(h)
        h = self.trans1(h)

        h = self.block2(h)
        h = self.trans2(h)

        h = self.block3(h)
        h = self.trans3(h)

        h = self.bn4(self.block4(h))
        h = F.relu(h)

        h = F.average_pooling_2d(h, h.shape[2])
        h = F.reshape(h, (bs, -1))

        return self.prob(h)
Пример #60
0
    def __call__(self, x):  # => 3   × 256
        h = self.conv1(x)  # => 64  ×  128
        h = F.max_pooling_2d(h, 3, 2)  # => 64  ×  64

        n = 1 / 2 / 8
        h = self.block1_1(h, 1 * n)  # => 64  ×  64
        h = self.block1_2(h, 2 * n)  # => 64  ×  64

        h = self.block2_1(h, 3 * n)  # => 128 ×  32
        h = self.block2_2(h, 4 * n)  # => 128 ×  32

        h = self.block3_1(h, 5 * n)  # => 256 ×   16
        h = self.block3_2(h, 6 * n)  # => 256 ×   16

        h = self.block4_1(h, 7 * n)  # => 512 ×   8
        h = self.block4_2(h, 8 * n)  # => 512 ×   8

        h = F.average_pooling_2d(h, h.shape[2])  # global average pooling

        # h = self.l1(h)
        return F.tanh(self.l2(h))