Exemple #1
0
 def forward(self, x):
     out = F.relu(F.max_pool2d(self.conv1(x), 2))
     out = F.relu(F.max_pool2d(self.conv2(out), 2))
     out = out.view(-1, 320)
     out = F.relu(self.fc1(out))
     out = self.fc2(out)
     return F.log_softmax(out, dim=1)
    def forward(self, x):
        x = self.conv1(x)
        x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)

        x = self.block1(x)
        x = self.group1(x)
        x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)

        x = self.block2(x)
        x = self.group2(x)
        x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)

        x = self.block3(x)
        x = self.group3(x)
        x = self.block4(x)
        x = self.group4(x)
        x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)

        x = x.view(x.size(0), -1)
        fc = self.fc(x)
        x = F.dropout(fc, training=self.training)
        
        output = list()
        for name, fun in self.fc_dict.iteritems():
            out = fun(x)
            output.append(out)

        return output, fc
 def forward(self, x):
     x = F.max_pool2d(F.relu(self.conv1(x)), 2)
     x = F.max_pool2d(F.relu(self.conv2(x)), 2)
     x = x.view(-1, 64 * 7 * 7)  # reshape Variable
     x = F.relu(self.fc1(x))
     x = self.fc2(x)
     return F.log_softmax(x, dim=-1)
 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
     x = x.view(-1, 320)
     x = F.relu(self.fc1(x))
     x = F.dropout(x, training=self.training)
     return F.log_softmax(self.fc2(x))
Exemple #5
0
    def forward(self, X):
        h = F.relu(self.conv1_1(X), inplace=True)
        h = F.relu(self.conv1_2(h), inplace=True)
        # relu1_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv2_1(h), inplace=True)
        h = F.relu(self.conv2_2(h), inplace=True)
        # relu2_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv3_1(h), inplace=True)
        h = F.relu(self.conv3_2(h), inplace=True)
        h = F.relu(self.conv3_3(h), inplace=True)
        # relu3_3 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv4_1(h), inplace=True)
        h = F.relu(self.conv4_2(h), inplace=True)
        h = F.relu(self.conv4_3(h), inplace=True)
        # relu4_3 = h

        h = F.relu(self.conv5_1(h), inplace=True)
        h = F.relu(self.conv5_2(h), inplace=True)
        h = F.relu(self.conv5_3(h), inplace=True)
        relu5_3 = h

        return relu5_3
Exemple #6
0
 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2(x), 2))
     x = x.view(-1, 320)
     x = F.relu(self.fc1(x))
     x = self.fc2(x)
     return x
 def forward(self, x):
     if self.transform_input:
         x = x.clone()
         x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
         x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
         x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
     else: warn("Input isn't transformed")
     x = self.Conv2d_1a_3x3(x)
     x = self.Conv2d_2a_3x3(x)
     x = self.Conv2d_2b_3x3(x)
     x = F.max_pool2d(x, kernel_size=3, stride=2)
     x = self.Conv2d_3b_1x1(x)
     x = self.Conv2d_4a_3x3(x)
     x = F.max_pool2d(x, kernel_size=3, stride=2)
     x = self.Mixed_5b(x)
     x = self.Mixed_5c(x)
     x = self.Mixed_5d(x)
     x = self.Mixed_6a(x)
     x = self.Mixed_6b(x)
     x = self.Mixed_6c(x)
     x = self.Mixed_6d(x)
     x = self.Mixed_6e(x)
     x = self.Mixed_7a(x)
     x = self.Mixed_7b(x)
     x_for_attn = x = self.Mixed_7c(x)
     # 8 x 8 x 2048
     x = F.avg_pool2d(x, kernel_size=8)
     # 1 x 1 x 2048
     x_for_capt = x = x.view(x.size(0), -1)
     # 2048
     x = self.fc(x)
     # 1000 (num_classes)
     return x_for_attn, x_for_capt, x
    def forward(self, x):
        x1 = self.conv1(x)
        x1 = F.max_pool2d(x1, 3, stride=2)
        x2 = self.fire2(x1)
        x3 = self.fire3(x2)
        if self.bypass:
            x3 = x3 + x2
        x4 = self.fire4(x3)
        x4 = F.max_pool2d(x4, 3, stride=2)
        x5 = self.fire5(x4)
        if self.bypass:
            x5 = x5 + x4
        x6 = self.fire6(x5)
        x7 = self.fire7(x6)
        if self.bypass:
            x7 = x7 + x6
        x8 = self.fire8(x7)
        x8 = F.max_pool2d(x8, 3, stride=2)
        x9 = self.fire9(x8)
        if self.bypass:
            x9 = x9 + x8
        x9 = F.dropout(x9, training=self.training)
        x10 = F.relu(self.conv10(x9))
        f = F.avg_pool2d(x10, x10.size()[2:]).view(x10.size(0), -1)

        if not self.training:
            return f

        if self.loss == {'xent'}:
            return f
        elif self.loss == {'xent', 'htri'}:
            return f, f
        else:
            raise KeyError("Unsupported loss: {}".format(self.loss))
Exemple #9
0
 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2(x), 2))
     x = x.view(-1, 7*7*64)
     x = F.relu(self.fc1(x))
     x = F.dropout(x, 0.4)
     x = self.fc2(x)
     return F.log_softmax(x, dim=1)
 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2(x), 2))
     x = x.view(-1, 1600)
     x = F.relu(self.fc1(x))
     x = F.dropout(x, training=self.training)
     x = self.fc2(x)
     return th.abs(10 - x)
Exemple #11
0
 def forward(self, x):
     x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # max pooling over a 2x2 window
     x = F.max_pool2d(F.relu(self.conv2(x)), 2) # square x can only specify single number
     x = x.view(-1, self.num_flat_features(x))
     x = F.relu(self.fc1(x))
     x = F.relu(self.fc2(x))
     x = self.fc3(x)
     return x
Exemple #12
0
 def forward(self, x):
    steps=x.shape[0]
    batch=x.shape[1]
    x=x.view(x.shape[0]*x.shape[1],1,-1,11)
    out = F.max_pool2d(self.conv1(x), (2, 1))
    out = F.max_pool2d(self.conv2(out), (2, 2))
    out= out.view(steps,batch,-1)
    return out
 def forward(self, x, y, z):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2(x), 2))
     x = x.view(-1, 1600)
     x = F.relu(self.fc1(x))
     x = F.dropout(x, training=self.training)
     x = self.fc2(x)
     return F.log_softmax(x), F.log_softmax(x), F.log_softmax(x)
 def forward(self, x):
     x = F.max_pool2d(F.relu(self.convolution_0(x)), (2, 2))
     x = F.max_pool2d(F.relu(self.convolution_1(x)), (2, 2))
     x = x.view(-1, 32 * 5 * 5)
     x = F.relu(self.fully_connected_0(x))
     x = F.relu(self.fully_connected_1(x))
     x = self.fully_connected_2(x)
     return x
Exemple #15
0
	def forward(self,x):
		x = F.max_pool2d(F.relu(self.conv1(x)),(2,2))
		x = F.max_pool2d(F.relu(self.conv2(x)),2)
		x = x.view(x.size()[0],-1)
		x = F.relu(self.fc1(x))
		x = F.relu(self.fc2(x))
		x = self.fc3(x)
		return x
Exemple #16
0
 def forward(self, x):
     x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
     x = F.max_pool2d(F.relu(self.conv2(x)), 2)
     x = x.view(-1, self.num_flat_features(x))
     x = F.relu(self.fc1(x))
     x = F.relu(self.fc2(x))
     x = self.fc3(x)
     return x
 def forward(self, x):
     x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
     x = F.max_pool2d(F.relu(self.conv2(x)), 2)  # when shape is square, a single number is fine.
     x = x.view(-1, self.num_flat_features(x))
     x = F.relu(self.fc1(x))
     x = F.relu(self.fc2(x))
     x = self.fc3(x)
     return x
Exemple #18
0
 def forward(self, x):
     x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # Max pooling over a (2, 2) window
     x = F.max_pool2d(F.relu(self.conv2(x)), 2) # If the size is a square you can only specify a single number
     x = x.view(-1, self.num_flat_features(x))
     x = F.relu(self.fc1(x))
     x = F.relu(self.fc2(x))
     x = self.fc3(x)
     return x
Exemple #19
0
def siam(patch, params):
    o = conv2d(patch, params, 'conv0', stride=3)
    o = F.max_pool2d(F.relu(o), 2, 2)
    o = conv2d(o, params, 'conv1')
    o = F.max_pool2d(F.relu(o), 2, 2)
    o = conv2d(o, params, 'conv2')
    o = F.relu(o)
    return o.view(o.size(0), -1)
Exemple #20
0
def deepcompare_2ch(input, params):
    o = conv2d(input, params, 'conv0', stride=3)
    o = F.max_pool2d(F.relu(o), 2, 2)
    o = conv2d(o, params, 'conv1')
    o = F.max_pool2d(F.relu(o), 2, 2)
    o = conv2d(o, params, 'conv2')
    o = F.relu(o).view(o.size(0), -1)
    return linear(o, params, 'fc')
 def forward(self, x):
     x = F.relu(self.conv1(x))
     x = F.max_pool2d(x, kernel_size=2, stride=2)
     x = F.relu(self.conv2(x))
     x = F.max_pool2d(x, kernel_size=2, stride=2)
     x = x.view([-1, 21 * 21 * 64])
     x = self.fc1(x)
     x = F.dropout(x, p=0.4, training=self.training)
     return F.softmax(self.fc2(x))
Exemple #22
0
 def forward(self, x):
     x = F.relu(self.conv1(x))
     x = F.max_pool2d(x, 2, 2)
     x = F.relu(self.conv2(x))
     x = F.max_pool2d(x, 2, 2)
     x = x.view(-1, 4 * 4 * 50)
     x = F.relu(self.fc1(x))
     x = self.fc2(x)
     return F.log_softmax(x, dim=1)
Exemple #23
0
 def forward(self, x):
     if self.transform_input:
         x = x.clone()
         x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
         x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
         x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
     # 299 x 299 x 3
     x = self.Conv2d_1a_3x3(x)
     # 149 x 149 x 32
     x = self.Conv2d_2a_3x3(x)
     # 147 x 147 x 32
     x = self.Conv2d_2b_3x3(x)
     # 147 x 147 x 64
     x = F.max_pool2d(x, kernel_size=3, stride=2)
     # 73 x 73 x 64
     x = self.Conv2d_3b_1x1(x)
     # 73 x 73 x 80
     x = self.Conv2d_4a_3x3(x)
     # 71 x 71 x 192
     x = F.max_pool2d(x, kernel_size=3, stride=2)
     # 35 x 35 x 192
     x = self.Mixed_5b(x)
     # 35 x 35 x 256
     x = self.Mixed_5c(x)
     # 35 x 35 x 288
     x = self.Mixed_5d(x)
     # 35 x 35 x 288
     x = self.Mixed_6a(x)
     # 17 x 17 x 768
     x = self.Mixed_6b(x)
     # 17 x 17 x 768
     x = self.Mixed_6c(x)
     # 17 x 17 x 768
     x = self.Mixed_6d(x)
     # 17 x 17 x 768
     x = self.Mixed_6e(x)
     # 17 x 17 x 768
     if self.training and self.aux_logits:
         aux = self.AuxLogits(x)
     # 17 x 17 x 768
     x = self.Mixed_7a(x)
     # 8 x 8 x 1280
     x = self.Mixed_7b(x)
     # 8 x 8 x 2048
     x = self.Mixed_7c(x)
     # 8 x 8 x 2048
     x = F.avg_pool2d(x, kernel_size=8)
     # 1 x 1 x 2048
     x = F.dropout(x, training=self.training)
     # 1 x 1 x 2048
     x = x.view(x.size(0), -1)
     # 2048
     x = self.fc(x)
     # 1000 (num_classes)
     if self.training and self.aux_logits:
         return x, aux
     return x
 def forward(self, x):
     x = F.relu(self.conv1(x))
     x = F.max_pool2d(x, 2)
     x = F.relu(self.conv2(x))
     x = F.max_pool2d(x, 2)
     out = x.view(x.size(0), -1)
     out = F.relu(self.fc1(out))
     out = self.fc2(out)
     return out
 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
     #x = x.view(-1, 320)
     x = x.view((x.size(0), -1))
     x = F.relu(self.fc1(x))
     #x = F.dropout(x, training=self.training)
     x = self.fc2(x)
     return x
Exemple #26
0
 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(self.conv2(x))
     x = F.relu(F.max_pool2d(self.conv3(x), 2))
     x = F.relu(self.conv4(x))
     x = x.view(-1, 4200)
     x = F.relu(self.fc1(x))
     action_scores = self.fc2(x)
     return F.softmax(action_scores)
Exemple #27
0
    def forward(self, src, lengths=None):
        """See :func:`onmt.encoders.encoder.EncoderBase.forward()`"""

        batch_size = src.size(0)
        # (batch_size, 64, imgH, imgW)
        # layer 1
        src = F.relu(self.layer1(src[:, :, :, :] - 0.5), True)

        # (batch_size, 64, imgH/2, imgW/2)
        src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))

        # (batch_size, 128, imgH/2, imgW/2)
        # layer 2
        src = F.relu(self.layer2(src), True)

        # (batch_size, 128, imgH/2/2, imgW/2/2)
        src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))

        #  (batch_size, 256, imgH/2/2, imgW/2/2)
        # layer 3
        # batch norm 1
        src = F.relu(self.batch_norm1(self.layer3(src)), True)

        # (batch_size, 256, imgH/2/2, imgW/2/2)
        # layer4
        src = F.relu(self.layer4(src), True)

        # (batch_size, 256, imgH/2/2/2, imgW/2/2)
        src = F.max_pool2d(src, kernel_size=(1, 2), stride=(1, 2))

        # (batch_size, 512, imgH/2/2/2, imgW/2/2)
        # layer 5
        # batch norm 2
        src = F.relu(self.batch_norm2(self.layer5(src)), True)

        # (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
        src = F.max_pool2d(src, kernel_size=(2, 1), stride=(2, 1))

        # (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
        src = F.relu(self.batch_norm3(self.layer6(src)), True)

        # # (batch_size, 512, H, W)
        all_outputs = []
        for row in range(src.size(2)):
            inp = src[:, :, row, :].transpose(0, 2) \
                .transpose(1, 2)
            row_vec = torch.Tensor(batch_size).type_as(inp.data) \
                .long().fill_(row)
            pos_emb = self.pos_lut(row_vec)
            with_pos = torch.cat(
                (pos_emb.view(1, pos_emb.size(0), pos_emb.size(1)), inp), 0)
            outputs, hidden_t = self.rnn(with_pos)
            all_outputs.append(outputs)
        out = torch.cat(all_outputs, 0)

        return hidden_t, out, lengths
    def forward(self, input, lengths=None):
        "See :obj:`onmt.modules.EncoderBase.forward()`"

        batch_size = input.size(0)
        # (batch_size, 64, imgH, imgW)
        # layer 1
        input = F.relu(self.layer1(input[:, :, :, :]-0.5), True)

        # (batch_size, 64, imgH/2, imgW/2)
        input = F.max_pool2d(input, kernel_size=(2, 2), stride=(2, 2))

        # (batch_size, 128, imgH/2, imgW/2)
        # layer 2
        input = F.relu(self.layer2(input), True)

        # (batch_size, 128, imgH/2/2, imgW/2/2)
        input = F.max_pool2d(input, kernel_size=(2, 2), stride=(2, 2))

        #  (batch_size, 256, imgH/2/2, imgW/2/2)
        # layer 3
        # batch norm 1
        input = F.relu(self.batch_norm1(self.layer3(input)), True)

        # (batch_size, 256, imgH/2/2, imgW/2/2)
        # layer4
        input = F.relu(self.layer4(input), True)

        # (batch_size, 256, imgH/2/2/2, imgW/2/2)
        input = F.max_pool2d(input, kernel_size=(1, 2), stride=(1, 2))

        # (batch_size, 512, imgH/2/2/2, imgW/2/2)
        # layer 5
        # batch norm 2
        input = F.relu(self.batch_norm2(self.layer5(input)), True)

        # (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
        input = F.max_pool2d(input, kernel_size=(2, 1), stride=(2, 1))

        # (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
        input = F.relu(self.batch_norm3(self.layer6(input)), True)

        # # (batch_size, 512, H, W)
        all_outputs = []
        for row in range(input.size(2)):
            inp = input[:, :, row, :].transpose(0, 2)\
                                     .transpose(1, 2)
            row_vec = torch.Tensor(batch_size).type_as(inp.data)\
                                              .long().fill_(row)
            pos_emb = self.pos_lut(Variable(row_vec))
            with_pos = torch.cat(
                (pos_emb.view(1, pos_emb.size(0), pos_emb.size(1)), inp), 0)
            outputs, hidden_t = self.rnn(with_pos)
            all_outputs.append(outputs)
        out = torch.cat(all_outputs, 0)

        return hidden_t, out
Exemple #29
0
 def stream(input, name):
     o = conv2d(input, params, name + '.conv0')
     o = F.max_pool2d(F.relu(o), 2, 2)
     o = conv2d(o, params, name + '.conv1')
     o = F.max_pool2d(F.relu(o), 2, 2)
     o = conv2d(o, params, name + '.conv2')
     o = F.relu(o)
     o = conv2d(o, params, name + '.conv3')
     o = F.relu(o)
     return o.view(o.size(0), -1)
Exemple #30
0
 def forward(self, x):
     out = F.relu(self.conv1(x))
     out = F.max_pool2d(out, 2)
     out = F.relu(self.conv2(out))
     out = F.max_pool2d(out, 2)
     out = out.view(out.size(0), -1)
     out = F.relu(self.fc1(out))
     out = F.relu(self.fc2(out))
     out = self.fc3(out)
     return out
 def count_neurons(self, image_dim):
     x = Variable(torch.rand(1, *image_dim))
     x = F.relu(F.max_pool2d(self.convolution1(x), 3, 2))
     x = F.relu(F.max_pool2d(self.convolution2(x), 3, 2))
     x = F.relu(F.max_pool2d(self.convolution3(x), 3, 2))
     return x.data.view(1, -1).size(1)
Exemple #32
0
def ConvBlockFunction(x, w, b, w_bn, b_bn):
    x = F.conv2d(x, w, b, padding=1)
    x = F.batch_norm(x, running_mean=None, running_var=None, weight=w_bn, bias=b_bn, training=True)
    x = F.relu(x)
    x = F.max_pool2d(x, kernel_size=2, stride=2)
    return x
Exemple #33
0
    def forward(self, x, loc, label_class=None, label_box=None):
        '''
        Param:
        x:           FloatTensor(batch_num, 3, H, W)
        loc:         FloatTensor(batch_num, 4)
        label_class: LongTensor(batch_num, N_max) or None
        label_box:   FloatTensor(batch_num, N_max, 4) or None

        Return 1:
        loss: FloatTensor(batch_num)

        Return 2:
        cls_i_preds: LongTensor(batch_num, topk)
        cls_p_preds: FloatTensor(batch_num, topk)
        reg_preds:   FloatTensor(batch_num, topk, 4)
        '''
        w_2_in = self.w_relu(self.w_2_in)
        w_3_in = self.w_relu(self.w_3_in)
        w_2_in /= torch.sum(w_2_in, dim=0) + self.eps
        w_3_in /= torch.sum(w_3_in, dim=0) + self.eps

        C3, C4, C5 = self.backbone(x)

        D5 = self.bi1_prj1_5(C5)
        D5 = self.bi1_prj1_5bn(D5)
        D4 = self.bi1_prj1_4(C4)
        D4 = self.bi1_prj1_4bn(D4)
        D3 = self.bi1_prj1_3(C3)
        D3 = self.bi1_prj1_3bn(D3)

        E4 = self.bi1_prj2_4(
            (w_2_in[0][0] * D4 + w_2_in[1][0] * self.upsample(D5)))

        F3 = self.bi1_prj3_3(
            (w_2_in[0][1] * D3 + w_2_in[1][1] * self.upsample(E4)))
        F4 = self.bi1_prj3_4((w_3_in[0][0] * self.downsample(F3) +
                              w_3_in[1][0] * E4 + w_3_in[2][0] * D4))
        F5 = self.bi1_prj3_5(
            (w_2_in[0][2] * D5 + w_2_in[1][2] * self.downsample(F4)))

        G4 = self.bi2_prj2_4(
            (w_2_in[0][3] * F4 + w_2_in[1][3] * self.upsample(F5)))

        H3 = self.bi2_prj3_3(
            (w_2_in[0][4] * F3 + w_2_in[1][4] * self.upsample(G4)))
        H4 = self.bi2_prj3_4((w_3_in[0][1] * self.downsample(H3) +
                              w_3_in[1][1] * G4 + w_3_in[2][1] * F4))
        H5 = self.bi2_prj3_5(
            (w_2_in[0][5] * F5 + w_2_in[1][5] * self.downsample(H4)))

        P6 = self.conv_out6(C5)
        P7 = self.conv_out7(self.relu(P6))

        P3, P4, P5, P6, P7 = H3, H4, H5, P6, P7

        # log = [w_2_in.data.cpu().numpy(), w_3_in.data.cpu().numpy()]
        # np.save('./bifpn_weight/weight_log', log)
        # print('w_2_in = {0}, w_3_in={1}'.format(w_2_in.data.cpu().numpy(), w_3_in.data.cpu().numpy()))

        if self.balanced_fpn:
            # kernel_size, stride, padding, dilation, False, False
            P3 = F.max_pool2d(P3, 3, 2, 1, 1, False, False)
            P5 = self.upsample(P5)
            P4 = (P3 + P4 + P5) / 3.0
            # kernel_size, stride, padding, False, False
            P5 = F.avg_pool2d(P4, 3, 2, 1, False, False)
            P3 = self.upsample(P4)

        pred_list = [P3, P4, P5, P6, P7]
        assert len(pred_list) == self.scales
        # assert len(pred_list) == len(self.scales)

        cls_out = []
        reg_out = []
        for item in pred_list:
            cls_i = self.conv_cls(item)
            reg_i = self.conv_reg(item)
            # cls_i: [b, an*classes, H, W] -> [b, H*W*an, classes]
            cls_i = cls_i.permute(0, 2, 3, 1).contiguous()
            cls_i = cls_i.view(cls_i.shape[0], -1, self.classes)
            # reg_i: [b, an*4, H, W] -> [b, H*W*an, 4]
            reg_i = reg_i.permute(0, 2, 3, 1).contiguous()
            reg_i = reg_i.view(reg_i.shape[0], -1, 4)
            cls_out.append(cls_i)
            reg_out.append(reg_i)

        # cls_out[b, hwan, classes]
        # reg_out[b, hwan, 4]
        cls_out = torch.cat(cls_out, dim=1)
        reg_out = torch.cat(reg_out, dim=1)

        if (label_class is not None) and (label_box is not None):
            targets_cls, targets_reg = self._encode(
                label_class, label_box, loc)  # (b, hwan), (b, hwan, 4)
            mask_cls = targets_cls > -1  # (b, hwan)
            mask_reg = targets_cls > 0  # (b, hwan)
            num_pos = torch.sum(mask_reg, dim=1).clamp_(min=self.scales)  # (b)
            # num_pos = torch.sum(mask_reg, dim=1).clamp_(min=len(self.scales)) # (b)
            loss = []
            for b in range(targets_cls.shape[0]):
                cls_out_b = cls_out[b][mask_cls[b]]  # (S+-, classes)
                reg_out_b = reg_out[b][mask_reg[b]]  # (S+, 4)
                targets_cls_b = targets_cls[b][mask_cls[b]]  # (S+-)
                targets_reg_b = targets_reg[b][mask_reg[b]]  # # (S+, 4)
                loss_cls_b = sigmoid_focal_loss(cls_out_b, targets_cls_b, 2.0,
                                                0.25).sum().view(1)
                loss_reg_b = smooth_l1_loss(reg_out_b, targets_reg_b,
                                            0.11).sum().view(1)
                loss.append((loss_cls_b + loss_reg_b) / float(num_pos[b]))
            return torch.cat(loss, dim=0)  # (b
        else:
            return self._decode(cls_out, reg_out, loc)
 def forward(self, x):
     x = F.max_pool2d(F.pad(x, (0, 1, 0, 1), mode='replicate'), 2, stride=1)
     return x
    def forward(self, x):
        if self.transform_input:
            x_ch0 = torch.unsqueeze(x[:, 0],
                                    1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
            x_ch1 = torch.unsqueeze(x[:, 1],
                                    1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
            x_ch2 = torch.unsqueeze(x[:, 2],
                                    1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
            x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
        # 299 x 299 x 3
        x = self.Conv2d_1a_3x3(x)
        # 149 x 149 x 32
        x = self.Conv2d_2a_3x3(x)
        # 147 x 147 x 32
        x = self.Conv2d_2b_3x3(x)
        # 147 x 147 x 64
        x = F.max_pool2d(x, kernel_size=3, stride=2)
        # 73 x 73 x 64
        x = self.Conv2d_3b_1x1(x)
        # 73 x 73 x 80
        x = self.Conv2d_4a_3x3(x)
        # 71 x 71 x 192
        x = F.max_pool2d(x, kernel_size=3, stride=2)
        # 35 x 35 x 192
        x = self.Mixed_5b(x)
        # 35 x 35 x 256
        x = self.Mixed_5c(x)
        # 35 x 35 x 288
        x = self.Mixed_5d(x)
        # 35 x 35 x 288
        y = self.Mixed_6a(x)
        # 17 x 17 x 768
        y = self.Mixed_6b(y)
        # 17 x 17 x 768
        y = self.Mixed_6c(y)
        # 17 x 17 x 768
        y = self.Mixed_6d(y)
        # 17 x 17 x 768
        y = self.Mixed_6e(y)
        # 17 x 17 x 768
        if self.training and self.aux_logits:
            aux = self.AuxLogits(y)
        # 17 x 17 x 768
        z = self.Mixed_7a(y)
        # 8 x 8 x 1280
        z = self.Mixed_7b(z)
        # 8 x 8 x 2048
        z = self.Mixed_7c(z)
        # 8 x 8 x 2048
        if self.is_fusion:
            return x, y, z

        z = F.avg_pool2d(z, kernel_size=8)
        # 1 x 1 x 2048
        z = F.dropout(z, training=self.training)
        # 1 x 1 x 2048
        z = z.view(z.size(0), -1)
        # 2048
        z = self.fc_(z)
        # 1000 (num_classes)
        if self.training and self.aux_logits:
            return z, aux
        return z
Exemple #36
0
    def forward(self, x):

        out = F.relu(
            self.en1_bn(F.max_pool2d(self.en1(self.encoder1(x)), 2, 2)))
        out1 = F.relu(
            self.enf1_bn(
                F.interpolate(self.enf1(self.encoderf1(x)),
                              scale_factor=(2, 2),
                              mode='bilinear')))
        tmp = out
        out = torch.add(
            out,
            F.interpolate(F.relu(self.inte1_1bn(self.intere1_1(out1))),
                          scale_factor=(0.25, 0.25),
                          mode='bilinear'))
        out1 = torch.add(
            out1,
            F.interpolate(F.relu(self.inte1_2bn(self.intere1_2(tmp))),
                          scale_factor=(4, 4),
                          mode='bilinear'))

        u1 = out
        o1 = out1

        out = F.relu(
            self.en2_bn(F.max_pool2d(self.en2(self.encoder2(out)), 2, 2)))
        out1 = F.relu(
            self.enf2_bn(
                F.interpolate(self.enf2(self.encoderf2(out1)),
                              scale_factor=(2, 2),
                              mode='bilinear')))
        tmp = out
        out = torch.add(
            out,
            F.interpolate(F.relu(self.inte2_1bn(self.intere2_1(out1))),
                          scale_factor=(0.0625, 0.0625),
                          mode='bilinear'))
        out1 = torch.add(
            out1,
            F.interpolate(F.relu(self.inte2_2bn(self.intere2_2(tmp))),
                          scale_factor=(16, 16),
                          mode='bilinear'))

        u2 = out
        o2 = out1

        out = F.relu(
            self.en3_bn(F.max_pool2d(self.en3(self.encoder3(out)), 2, 2)))
        out1 = F.relu(
            self.enf3_bn(
                F.interpolate(self.enf3(self.encoderf3(out1)),
                              scale_factor=(2, 2),
                              mode='bilinear')))
        tmp = out
        out = torch.add(
            out,
            F.interpolate(F.relu(self.inte3_1bn(self.intere3_1(out1))),
                          scale_factor=(0.015625, 0.015625),
                          mode='bilinear'))
        out1 = torch.add(
            out1,
            F.interpolate(F.relu(self.inte3_2bn(self.intere3_2(tmp))),
                          scale_factor=(64, 64),
                          mode='bilinear'))

        ### End of encoder block

        # print(out.shape,out1.shape)

        out = F.relu(
            self.de1_bn(
                F.interpolate(self.de1(self.decoder1(out)),
                              scale_factor=(2, 2),
                              mode='bilinear')))
        out1 = F.relu(
            self.def1_bn(F.max_pool2d(self.def1(self.decoderf1(out1)), 2, 2)))
        tmp = out
        out = torch.add(
            out,
            F.interpolate(F.relu(self.intd1_1bn(self.interd1_1(out1))),
                          scale_factor=(0.0625, 0.0625),
                          mode='bilinear'))
        out1 = torch.add(
            out1,
            F.interpolate(F.relu(self.intd1_2bn(self.interd1_2(tmp))),
                          scale_factor=(16, 16),
                          mode='bilinear'))

        out = torch.add(out, u2)
        out1 = torch.add(out1, o2)

        out = F.relu(
            self.de2_bn(
                F.interpolate(self.de2(self.decoder2(out)),
                              scale_factor=(2, 2),
                              mode='bilinear')))
        out1 = F.relu(
            self.def2_bn(F.max_pool2d(self.def2(self.decoderf2(out1)), 2, 2)))
        tmp = out
        out = torch.add(
            out,
            F.interpolate(F.relu(self.intd2_1bn(self.interd2_1(out1))),
                          scale_factor=(0.25, 0.25),
                          mode='bilinear'))
        out1 = torch.add(
            out1,
            F.interpolate(F.relu(self.intd2_2bn(self.interd2_2(tmp))),
                          scale_factor=(4, 4),
                          mode='bilinear'))

        out = torch.add(out, u1)
        out1 = torch.add(out1, o1)

        out = F.relu(
            self.de3_bn(
                F.interpolate(self.de3(self.decoder3(out)),
                              scale_factor=(2, 2),
                              mode='bilinear')))
        out1 = F.relu(
            self.def3_bn(F.max_pool2d(self.def3(self.decoderf3(out1)), 2, 2)))

        out = torch.add(out, out1)

        out = F.relu(self.final(out))

        # out = self.soft(out)
        # print(out.shape)
        return out
Exemple #37
0
    def forward(ctx, input, return_aggregation, win_size, peak_filter):
        ctx.num_flags = 4

        # assert win_size % 2 == 1, 'Window size for peak finding must be odd.'
        # offset = (win_size - 1) // 2
        # padding = torch.nn.ConstantPad2d(offset, float('-inf'))
        # padded_maps = padding(input)
        # batch_size, num_channels, h, w = padded_maps.size()
        # element_map = torch.arange(0, h * w).long().view(1, 1, h, w)[:, :, offset: -offset, offset: -offset]
        # element_map = element_map.to(input.device)
        # _, indices = F.max_pool2d(
        #     padded_maps,
        #     kernel_size=win_size,
        #     stride=1,
        #     return_indices=True)
        # peak_map = (indices == element_map)

        peak_map = None
        if type(win_size) == type([1, 2, 3]):
            for win in win_size:
                offset = (win - 1) // 2
                padding = torch.nn.ConstantPad2d(offset, float('-inf'))
                padded_maps = padding(input)
                batch_size, num_channels, h, w = padded_maps.size()
                element_map = torch.arange(0, h * w).long().view(
                    1, 1, h, w)[:, :, offset:-offset, offset:-offset]
                element_map = element_map.to(input.device)
                _, indices = F.max_pool2d(padded_maps,
                                          kernel_size=win,
                                          stride=1,
                                          return_indices=True)
                if peak_map is None:
                    peak_map = (indices == element_map)
                else:
                    peak_map += peak_map

        else:
            offset = (win_size - 1) // 2
            padding = torch.nn.ConstantPad2d(offset, float('-inf'))
            padded_maps = padding(input)
            batch_size, num_channels, h, w = padded_maps.size()
            element_map = torch.arange(0, h * w).long().view(1, 1, h,
                                                             w)[:, :,
                                                                offset:-offset,
                                                                offset:-offset]
            element_map = element_map.to(input.device)
            _, indices = F.max_pool2d(padded_maps,
                                      kernel_size=win_size,
                                      stride=1,
                                      return_indices=True)
            peak_map = (indices == element_map)
            np_input_person = padded_maps.cpu().data.numpy()[0, 16, :, :]
            person_indices = indices.cpu().data.numpy()[0, 16, :, :]
            np_peak_map_person = peak_map.cpu().data.numpy()[0, 16, :, :]

        # peak filtering
        if peak_filter:
            mask = input >= peak_filter(input)
            peak_map = (peak_map * mask)
        peak_list = torch.nonzero(peak_map)
        ctx.mark_non_differentiable(peak_list)

        # peak aggregation
        if return_aggregation:
            peak_map = peak_map.float()
            ctx.save_for_backward(input, peak_map)
            return peak_list, (input * peak_map).view(
                batch_size, num_channels, -1).sum(2) / peak_map.view(
                    batch_size, num_channels, -1).sum(2)
        else:
            return peak_list
 def forward(self, x):
     out = self.conv(F.relu(self.bn(x)))
     out = F.max_pool2d(out, 2)
     return out
Exemple #39
0
 def forward(self, x):
     p = int(np.floor((self.kernel_size - 1) / 2))
     p2d = (p, p, p, p)
     return F.max_pool2d(F.pad(x, p2d), self.kernel_size, stride=2)
Exemple #40
0
 def forward(self, x):
     #        print('x',x.size())
     y = self.encode(x)  # y,x尺寸一致
     #        print('y',y.size())
     y_small = F.max_pool2d(y, kernel_size=2, stride=2)
     return y, y_small
Exemple #41
0
 def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
     out = self.layers(x)
     return out, F.max_pool2d(out, 2)
Exemple #42
0
     1000,
 ),
 ("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
 (
     "matmul",
     MF.matmul,
     torch.matmul,
     [(64, 32), (32, 64)],
     [(2048, 1024), (1024, 2048)],
     True,
     1000,
 ),
 (
     "max_pool2d",
     lambda x: MF.max_pool2d(x, 2),
     lambda x: TF.max_pool2d(x, 2),
     [(2, 32, 16, 16)],
     [(64, 512, 16, 16)],
     True,
     1000,
 ),
 (
     "normal",
     lambda x: mge.random.normal(0, 1, x.shape),
     lambda x: torch.randn(x.shape, device="cuda"),
     [(100, 100)],
     [(64, 512, 16, 16)],
     True,
     1000,
 ),
 (
Exemple #43
0
 def forward(self, x):
     return [F.max_pool2d(x, 1, 2, 0)]
    def forward(self, rgb_inputs, depth_inputs):

        ########  DEPTH ENCODER  ########

        # Stage 1
        x = self.conv11d(depth_inputs)
        x_1 = self.CBR1_D(x)
        x, id1_d = F.max_pool2d(x_1,
                                kernel_size=2,
                                stride=2,
                                return_indices=True)

        # Stage 2
        x_2 = self.CBR2_D(x)
        x, id2_d = F.max_pool2d(x_2,
                                kernel_size=2,
                                stride=2,
                                return_indices=True)

        # Stage 3
        x_3 = self.CBR3_D(x)
        x, id3_d = F.max_pool2d(x_3,
                                kernel_size=2,
                                stride=2,
                                return_indices=True)
        x = self.dropout3_d(x)

        # Stage 4
        x_4 = self.CBR4_D(x)
        x, id4_d = F.max_pool2d(x_4,
                                kernel_size=2,
                                stride=2,
                                return_indices=True)
        x = self.dropout4_d(x)

        # Stage 5
        x_5 = self.CBR5_D(x)

        ########  RGB ENCODER  ########

        # Stage 1
        y = self.CBR1_RGB(rgb_inputs)
        y = torch.add(y, x_1)
        y, id1 = F.max_pool2d(y, kernel_size=2, stride=2, return_indices=True)

        # Stage 2
        y = self.CBR2_RGB(y)
        y = torch.add(y, x_2)
        y, id2 = F.max_pool2d(y, kernel_size=2, stride=2, return_indices=True)

        # Stage 3
        y = self.CBR3_RGB(y)
        y = torch.add(y, x_3)
        y, id3 = F.max_pool2d(y, kernel_size=2, stride=2, return_indices=True)
        y = self.dropout3(y)

        # Stage 4
        y = self.CBR4_RGB(y)
        y = torch.add(y, x_4)
        y, id4 = F.max_pool2d(y, kernel_size=2, stride=2, return_indices=True)
        y = self.dropout4(y)

        # Stage 5
        y = self.CBR5_RGB(y)
        y = torch.add(y, x_5)
        y_size = y.size()

        y, id5 = F.max_pool2d(y, kernel_size=2, stride=2, return_indices=True)
        y = self.dropout5(y)

        ########  DECODER  ########

        # Stage 5 dec
        y = F.max_unpool2d(y, id5, kernel_size=2, stride=2, output_size=y_size)
        y = self.CBR5_Dec(y)

        # Stage 4 dec
        y = F.max_unpool2d(y, id4, kernel_size=2, stride=2)
        y = self.CBR4_Dec(y)

        # Stage 3 dec
        y = F.max_unpool2d(y, id3, kernel_size=2, stride=2)
        y = self.CBR3_Dec(y)

        # Stage 2 dec
        y = F.max_unpool2d(y, id2, kernel_size=2, stride=2)
        y = self.CBR2_Dec(y)

        # Stage 1 dec
        y = F.max_unpool2d(y, id1, kernel_size=2, stride=2)
        y = self.CBR1_Dec(y)

        return y
Exemple #45
0
    def forward(self, x):
        # Stage 1
        x11 = F.relu(self.conv1_1(x))
        x12 = F.relu(self.conv1_2(x11))
        x1p, id1 = F.max_pool2d(x12,
                                kernel_size=(2, 2),
                                stride=(2, 2),
                                return_indices=True)

        # Stage 2
        x21 = F.relu(self.conv2_1(x1p))
        x22 = F.relu(self.conv2_2(x21))
        x2p, id2 = F.max_pool2d(x22,
                                kernel_size=(2, 2),
                                stride=(2, 2),
                                return_indices=True)

        # Stage 3
        x31 = F.relu(self.conv3_1(x2p))
        x32 = F.relu(self.conv3_2(x31))
        x33 = F.relu(self.conv3_3(x32))
        x3p, id3 = F.max_pool2d(x33,
                                kernel_size=(2, 2),
                                stride=(2, 2),
                                return_indices=True)

        # Stage 4
        x41 = F.relu(self.conv4_1(x3p))
        x42 = F.relu(self.conv4_2(x41))
        x43 = F.relu(self.conv4_3(x42))
        x4p, id4 = F.max_pool2d(x43,
                                kernel_size=(2, 2),
                                stride=(2, 2),
                                return_indices=True)

        # Stage 5
        x51 = F.relu(self.conv5_1(x4p))
        x52 = F.relu(self.conv5_2(x51))
        x53 = F.relu(self.conv5_3(x52))
        x5p, id5 = F.max_pool2d(x53,
                                kernel_size=(2, 2),
                                stride=(2, 2),
                                return_indices=True)

        # Stage 6
        x61 = F.relu(self.conv6_1(x5p))

        # Stage 6d
        x61d = F.relu(self.deconv6_1(x61))

        # Stage 5d
        x5d = F.max_unpool2d(x61d, id5, kernel_size=2, stride=2)
        x51d = F.relu(self.deconv5_1(x5d))

        # Stage 4d
        x4d = F.max_unpool2d(x51d, id4, kernel_size=2, stride=2)
        x41d = F.relu(self.deconv4_1(x4d))

        # Stage 3d
        x3d = F.max_unpool2d(x41d, id3, kernel_size=2, stride=2)
        x31d = F.relu(self.deconv3_1(x3d))

        # Stage 2d
        x2d = F.max_unpool2d(x31d, id2, kernel_size=2, stride=2)
        x21d = F.relu(self.deconv2_1(x2d))

        # Stage 1d
        x1d = F.max_unpool2d(x21d, id1, kernel_size=2, stride=2)
        x12d = F.relu(self.deconv1_1(x1d))

        # Should add sigmoid? github repo add so.
        raw_alpha = self.deconv1(x12d)
        pred_mattes = F.sigmoid(raw_alpha)

        if self.stage <= 1:
            return pred_mattes, 0

        # Stage2 refine conv1
        refine0 = torch.cat((x[:, :3, :, :], pred_mattes), 1)
        refine1 = F.relu(self.refine_conv1(refine0))
        refine2 = F.relu(self.refine_conv2(refine1))
        refine3 = F.relu(self.refine_conv3(refine2))
        # Should add sigmoid?
        # sigmoid lead to refine result all converge to 0...
        #pred_refine = F.sigmoid(self.refine_pred(refine3))
        pred_refine = self.refine_pred(refine3)

        pred_alpha = F.sigmoid(raw_alpha + pred_refine)

        #print(pred_mattes.mean(), pred_alpha.mean(), pred_refine.sum())

        return pred_mattes, pred_alpha
    def forward(self, im_data, im_info, gt_boxes, num_boxes):
        batch_size = im_data.size(0)

        im_info = im_info.data
        gt_boxes = gt_boxes.data
        num_boxes = num_boxes.data

        # feed image data to base model to obtain base feature map
        base_feat = self.RCNN_base(im_data)

        # feed base feature map tp RPN to obtain rois
        rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)

        # if it is training phrase, then use ground trubut bboxes for refining
        if self.training:
            roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)
            rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = roi_data

            rois_label = Variable(rois_label.view(-1).long())
            rois_target = Variable(rois_target.view(-1, rois_target.size(2)))
            rois_inside_ws = Variable(rois_inside_ws.view(-1, rois_inside_ws.size(2)))
            rois_outside_ws = Variable(rois_outside_ws.view(-1, rois_outside_ws.size(2)))
        else:
            rois_label = None
            rois_target = None
            rois_inside_ws = None
            rois_outside_ws = None
            rpn_loss_cls = torch.zeros(1)
            rpn_loss_bbox = torch.zeros(1)

        rois = Variable(rois)
        # do roi pooling based on predicted rois

        if cfg.POOLING_MODE == 'crop':
            # pdb.set_trace()
            # pooled_feat_anchor = _crop_pool_layer(base_feat, rois.view(-1, 5))
            grid_xy = _affine_grid_gen(rois.view(-1, 5), base_feat.size()[2:], self.grid_size)
            grid_yx = torch.stack([grid_xy.data[:,:,:,1], grid_xy.data[:,:,:,0]], 3).contiguous()
            pooled_feat = self.RCNN_roi_crop(base_feat, Variable(grid_yx).detach())
            if cfg.CROP_RESIZE_WITH_MAX_POOL:
                pooled_feat = F.max_pool2d(pooled_feat, 2, 2)
        elif cfg.POOLING_MODE == 'align':
            pooled_feat = self.RCNN_roi_align(base_feat, rois.view(-1, 5))
        elif cfg.POOLING_MODE == 'pool':
            pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5))

        # feed pooled features to top model
        pooled_feat = self._head_to_tail(pooled_feat)

        # compute bbox offset
        bbox_pred = self.RCNN_bbox_pred(pooled_feat)
        if self.training and not self.class_agnostic:
            # select the corresponding columns according to roi labels
            bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)
            bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))
            bbox_pred = bbox_pred_select.squeeze(1)

        # compute object classification probability
        cls_score = self.RCNN_cls_score(pooled_feat)
        cls_prob = F.softmax(cls_score, 1)

        RCNN_loss_cls = torch.zeros(1)
        RCNN_loss_bbox = torch.zeros(1)

        if self.training:
            # classification loss
            RCNN_loss_cls = F.cross_entropy(cls_score, rois_label)

            # bounding box regression L1 loss
            RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws)


        cls_prob = cls_prob.view(batch_size, rois.size(1), -1)
        bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)

        # return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label
        return rois, cls_prob, bbox_pred
Exemple #47
0
    def forward(self, x):

        out = F.relu(self.en1_bn(F.max_pool2d(self.encoder1(x), 2,
                                              2)))  #U-Net branch
        out1 = F.relu(
            self.enf1_bn(
                F.interpolate(self.encoderf1(x),
                              scale_factor=(2, 2),
                              mode='bilinear')))  #Ki-Net branch
        tmp = out
        out = torch.add(out,
                        F.interpolate(F.relu(
                            self.inte1_1bn(self.intere1_1(out1))),
                                      scale_factor=(0.25, 0.25),
                                      mode='bilinear'))  #CRFB
        out1 = torch.add(out1,
                         F.interpolate(F.relu(
                             self.inte1_2bn(self.intere1_2(tmp))),
                                       scale_factor=(4, 4),
                                       mode='bilinear'))  #CRFB

        u1 = out  #skip conn
        o1 = out1  #skip conn

        out = F.relu(self.en2_bn(F.max_pool2d(self.encoder2(out), 2, 2)))
        out1 = F.relu(
            self.enf2_bn(
                F.interpolate(self.encoderf2(out1),
                              scale_factor=(2, 2),
                              mode='bilinear')))
        tmp = out
        out = torch.add(
            out,
            F.interpolate(F.relu(self.inte2_1bn(self.intere2_1(out1))),
                          scale_factor=(0.0625, 0.0625),
                          mode='bilinear'))
        out1 = torch.add(
            out1,
            F.interpolate(F.relu(self.inte2_2bn(self.intere2_2(tmp))),
                          scale_factor=(16, 16),
                          mode='bilinear'))

        u2 = out
        o2 = out1

        out = F.relu(self.en3_bn(F.max_pool2d(self.encoder3(out), 2, 2)))
        out1 = F.relu(
            self.enf3_bn(
                F.interpolate(self.encoderf3(out1),
                              scale_factor=(2, 2),
                              mode='bilinear')))
        tmp = out
        out = torch.add(
            out,
            F.interpolate(F.relu(self.inte3_1bn(self.intere3_1(out1))),
                          scale_factor=(0.015625, 0.015625),
                          mode='bilinear'))
        out1 = torch.add(
            out1,
            F.interpolate(F.relu(self.inte3_2bn(self.intere3_2(tmp))),
                          scale_factor=(64, 64),
                          mode='bilinear'))

        ### End of encoder block

        ### Start Decoder

        out = F.relu(
            self.de1_bn(
                F.interpolate(self.decoder1(out),
                              scale_factor=(2, 2),
                              mode='bilinear')))  #U-NET
        out1 = F.relu(self.def1_bn(F.max_pool2d(self.decoderf1(out1), 2,
                                                2)))  #Ki-NET
        tmp = out
        out = torch.add(
            out,
            F.interpolate(F.relu(self.intd1_1bn(self.interd1_1(out1))),
                          scale_factor=(0.0625, 0.0625),
                          mode='bilinear'))
        out1 = torch.add(
            out1,
            F.interpolate(F.relu(self.intd1_2bn(self.interd1_2(tmp))),
                          scale_factor=(16, 16),
                          mode='bilinear'))

        out = torch.add(out, u2)  #skip conn
        out1 = torch.add(out1, o2)  #skip conn

        out = F.relu(
            self.de2_bn(
                F.interpolate(self.decoder2(out),
                              scale_factor=(2, 2),
                              mode='bilinear')))
        out1 = F.relu(self.def2_bn(F.max_pool2d(self.decoderf2(out1), 2, 2)))
        tmp = out
        out = torch.add(
            out,
            F.interpolate(F.relu(self.intd2_1bn(self.interd2_1(out1))),
                          scale_factor=(0.25, 0.25),
                          mode='bilinear'))
        out1 = torch.add(
            out1,
            F.interpolate(F.relu(self.intd2_2bn(self.interd2_2(tmp))),
                          scale_factor=(4, 4),
                          mode='bilinear'))

        out = torch.add(out, u1)
        out1 = torch.add(out1, o1)

        out = F.relu(
            self.de3_bn(
                F.interpolate(self.decoder3(out),
                              scale_factor=(2, 2),
                              mode='bilinear')))
        out1 = F.relu(self.def3_bn(F.max_pool2d(self.decoderf3(out1), 2, 2)))

        out = torch.add(out, out1)  # fusion of both branches

        out = F.relu(self.final(out))  #1*1 conv

        # out = self.soft(out)

        return out
Exemple #48
0
 def forward(self, x):
     return F.max_pool2d(x, (x.size(-2), x.size(-1)))
Exemple #49
0
    def net_forward(x, layer_by_layer=False, from_layer=0):
        x = x - nrms_mean  # cannot be inplace
        x.div_(nrms_std)
        if not layer_by_layer:
            return net(x)
        cldr = list(net.children())
        if args.model.startswith('resnet'):
            x = net.conv1(x)
            x = net.bn1(x)
            x = net.relu(x)
            x = net.maxpool(x)

            x = net.layer1(x)
            x = net.layer2(x)
            x = net.layer3(x)
            x = net.layer4(x)

            outputs = [net.avgpool(x)]
            flat_features = outputs[-1].view(x.size(0), -1)
            outputs.append(net.fc(flat_features))
        elif args.model.startswith('inception'):
            # 299 x 299 x 3
            x = net.Conv2d_1a_3x3(x)
            # 149 x 149 x 32
            x = net.Conv2d_2a_3x3(x)
            # 147 x 147 x 32
            x = net.Conv2d_2b_3x3(x)
            # 147 x 147 x 64
            x = F.max_pool2d(x, kernel_size=3, stride=2)
            # 73 x 73 x 64
            x = net.Conv2d_3b_1x1(x)
            # 73 x 73 x 80
            x = net.Conv2d_4a_3x3(x)
            # 71 x 71 x 192
            x = F.max_pool2d(x, kernel_size=3, stride=2)
            # 35 x 35 x 192
            x = net.Mixed_5b(x)
            # 35 x 35 x 256
            x = net.Mixed_5c(x)
            # 35 x 35 x 288
            x = net.Mixed_5d(x)
            # 35 x 35 x 288
            x = net.Mixed_6a(x)
            # 17 x 17 x 768
            x = net.Mixed_6b(x)
            # 17 x 17 x 768
            x = net.Mixed_6c(x)
            # 17 x 17 x 768
            x = net.Mixed_6d(x)
            # 17 x 17 x 768
            x = net.Mixed_6e(x)
            # 17 x 17 x 768
            x = net.Mixed_7a(x)
            # 8 x 8 x 1280
            x = net.Mixed_7b(x)
            # 8 x 8 x 2048
            x = net.Mixed_7c(x)
            # 8 x 8 x 2048
            x = F.avg_pool2d(x, kernel_size=8)
            # 1 x 1 x 2048
            outputs = [F.dropout(x, training=net.training)]
            # 1 x 1 x 2048
            flat_features = outputs[-1].view(x.size(0), -1)
            # 2048
            outputs.append(net.fc(flat_features))
            # 1000 (num_classes)
        else:
            outputs = [net.features(x)]
            for cidx, c in enumerate(net.classifier.children()):
                flat_features = outputs[-1].view(x.size(0), -1)
                outputs.append(c(flat_features))
        return outputs
Exemple #50
0
    def forward(self, x):
        features = None
        # --> fixed-size input: batch x 3 x 299 x 299
        x = nn.functional.interpolate(x,
                                      size=(299, 299),
                                      mode='bilinear',
                                      align_corners=False)  #上采样或者下采样至给定size
        # 299 x 299 x 3

        x = self.Conv2d_1a_3x3(x)
        # 149 x 149 x 32
        x = self.Conv2d_2a_3x3(x)
        # 147 x 147 x 32
        x = self.Conv2d_2b_3x3(x)
        # 147 x 147 x 64
        x = F.max_pool2d(x, kernel_size=3, stride=2)
        # 73 x 73 x 64
        x = self.Conv2d_3b_1x1(x)
        # 73 x 73 x 80
        x = self.Conv2d_4a_3x3(x)
        # 71 x 71 x 192

        x = F.max_pool2d(x, kernel_size=3, stride=2)
        # 35 x 35 x 192
        x = self.Mixed_5b(x)
        # 35 x 35 x 256
        x = self.Mixed_5c(x)
        # 35 x 35 x 288
        x = self.Mixed_5d(x)
        # 35 x 35 x 288

        x = self.Mixed_6a(x)
        # 17 x 17 x 768
        x = self.Mixed_6b(x)
        # 17 x 17 x 768
        x = self.Mixed_6c(x)
        # 17 x 17 x 768
        x = self.Mixed_6d(x)
        # 17 x 17 x 768
        x = self.Mixed_6e(x)
        # 17 x 17 x 768

        # image region features
        # features = x
        # 17 x 17 x 768

        x = self.Mixed_7a(x)
        # 8 x 8 x 1280
        x = self.Mixed_7b(x)
        # 8 x 8 x 2048
        x = self.Mixed_7c(x)
        # 8 x 8 x 2048
        # x = F.avg_pool2d(x, kernel_size=8)
        # 1 x 1 x 2048
        # x = F.dropout(x, training=self.training)
        # 1 x 1 x 2048
        # x = x.view(x.size(0), -1)   # for visual_feature_extraction.py  use this as the output
        x = x.mean(dim=(2, 3))
        # 2048

        # global image features
        # cnn_code = self.emb_cnn_code(x)
        # 512
        # if features is not None:
        #     features = self.emb_features(features)
        return x  #cnn_code  #1024
Exemple #51
0
    def forward(self, x):

        #print (x.size())
        x = self.init_conv1(x)
        
        x = self.init_bn1(x)
        x = F.relu(x, True)
        #conv_x1 = x

        x = self.init_res1(x)
        x = self.init_conv2(x)
        #print ("Skip", x.size())
        
        #print ("Skip", conv_x.size())
        x = self.init_bn2(x)
        x = F.relu(x, True)
        conv_x2 = x
        x = F.max_pool2d(x, 2)
        ### can be on 1x1 cov
        x = self.init_res2(x)
        x = self.init_res3(x)
        
       
        x = self.core_hourglass(x)
        #x = self.core_hourglass1(x)
        #x = self.core_hourglass2(x)
        x = self.tail_res0(x)
        x = self.tail_res00(x)
        

        x = self.tail_deconv1(x)
        x = self.tail_bn1(x)
        x = F.relu(x, True)
        #print ("Dce 1", x.size())
        

        #split = x
        #print x.shape 
        x = self.tail_res1(x)
        #x = self.tail_res1_1(x)


        ###################
        x = torch.cat((x, conv_x2),1)
        x= self.init_conv_union(x)
        x= self.init_bn_union(x) 
        x = F.relu(x, True)
        ###################
        x = self.tail_resx(x) 

        x = self.tail_deconv2(x)
        x = self.tail_bn2(x)
        x = F.relu(x, True)
        x = self.dropout(x)

        #print ("Dce 2", x.size())
        #split = x


        x = self.tail_res2(x)
        #out1 = x
        ###################
        #x = torch.cat((x,split),1)
        #x= self.init_conv_union1(x)
        #x= self.init_bn_union1(x) 
        #x = F.relu(x, True)
        ###################
        # Right tail
        #x = self.rtail_deconv2(split)
        #x = self.rtail_bn2(x)
        #x = F.relu(x, True)
        
        #x = self.rtail_res2(x)
        x = self.rtail_res2_2(x)
        #x = self.rtail_res2_3(x)



        #out2 = x
        #out = torch.cat((out1,out2),1)
        #out1 = self.init_conv3(out1)        
        out2 = self.init_conv4(x)        

        return out2
Exemple #52
0
 def forward(self, x):
     x = self.pre(x)
     x = F.max_pool2d(x, 2)
     return self.conv_bn2(self.conv_bn1(x)) + x
def non_maximum_suppression(a):
    ap = F.max_pool2d(a, 3, stride=1, padding=1)
    mask = (a == ap).float().clamp(min=0.0)
    return a * mask
Exemple #54
0
 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 3))
     x = x.view(-1, 192)
     x = self.fc(x)
     return F.log_softmax(x, dim=1)
	def get_conv_output(self, x):
		# Layer 1: Convolutional + 3x3 Max Pooling + Batch Norm
		conv1_out = self.conv1(x)
		pool1_out = F.max_pool2d(conv1_out, 3)
		bn1_out = self.bn1(pool1_out)
		
		# Layer 2: Convolutional + Batch Norm
		conv2_out = self.conv2(bn1_out)
		bn2_out = self.bn2(conv2_out)
		
		# Layer 3: Convolutional + 2x2 Max Pooling + Batch Norm
		conv3_out = self.conv3(bn2_out)
		pool3_out = F.max_pool2d(conv3_out, 2)
		bn3_out = self.bn3(pool3_out)
		
		# Layer 4: Convolutional + Batch Norm
		conv4_out = self.conv4(bn3_out)
		bn4_out = self.bn4(conv4_out)
		
		# Layer 5: Convolutional + 2x2 Max Pooling + Batch Norm
		conv5_out = self.conv5(bn4_out)
		pool5_out = F.max_pool2d(conv5_out, 2)
		bn5_out = self.bn5(pool5_out)
		
		# Layer 6: Convolutional + Batch Norm
		conv6_out = self.conv6(bn5_out)
		bn6_out = self.bn6(conv6_out)
		
		# Layer 7: Convolutional + 2x2 Max Pooling + Batch Norm
		conv7_out = self.conv7(bn6_out)
		pool7_out = F.max_pool2d(conv7_out, 2)
		bn7_out = HF.modified_bn(self.bn7, pool7_out)
		
		# Layer 8: Convolutional + Batch Norm
		conv8_out = self.conv8(bn7_out)
		bn8_out = HF.modified_bn(self.bn8, conv8_out)
		
		# Build dictionary containing outputs of each layer
		conv_out = {
			self.CONV1: conv1_out,
			self.POOL1: pool1_out,
			self.BN1: bn1_out,
			self.CONV2: conv2_out,
			self.BN2: bn2_out,
			self.CONV3: conv3_out,
			self.POOL3: pool3_out,
			self.BN3: bn3_out,
			self.CONV4: conv4_out,
			self.BN4: bn4_out,
			self.CONV5: conv5_out,
			self.POOL5: pool5_out,
			self.BN5: bn5_out,
			self.CONV6: conv6_out,
			self.BN6: bn6_out,
			self.CONV7: conv7_out,
			self.POOL7: pool7_out,
			self.BN7: bn7_out,
			self.CONV8: conv8_out,
			self.BN8: bn8_out,
		}
		return conv_out
Exemple #56
0
 def forward(self, x):
     x = self.conv(x)
     x = F.max_pool2d(x, kernel_size=x.size()[2:]).squeeze()
     return self.fc(x)
    def forward(self, x):
        nsample = x.size()[3]
        x = self.activation(self.bn_avg(self.conv_avg(x)))
        x = F.max_pool2d(x, kernel_size=(1, nsample)).squeeze(3)

        return x
Exemple #58
0
 def forward(self, x):
     x = self.static_padding(x)
     x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding,
                      self.dilation, self.ceil_mode, self.return_indices)
     return x
Exemple #59
0
 def max_pool2d(self, in_features, kernel_size, stride, padding):
     return F.max_pool2d(in_features, kernel_size=kernel_size, stride=stride, padding=padding)
Exemple #60
0
def max_filter(img):
    assert img.dim() == 3
    img = img.unsqueeze(0)
    img = tnf.max_pool2d(img, kernel_size=3, stride=1, padding=1)
    return img.squeeze(0)