示例#1
0
    def forward(self, x1, finger, x3):

        protein = x3.view(x3.size(0), -1).to(device)

        x1 = torch.unsqueeze(x1, 1)
        pic = self.conv(x1)
        # print(x1.shape, pic.shape)
        pic = self.bn(pic)
        pic = self.r_elu(pic)
        pic = self.layer1(pic)
        pic = self.layer2(pic)

        spp = spatial_pyramid_pool(
            pic, pic.size(0),
            [int(pic.size(2)), int(pic.size(3))], self.output_num)
        # print(spp.shape, "spp.shape")

        # print(spp.shape, "spp.shape")
        fc1 = F.relu(self.fc1(spp))
        fc2 = F.relu(self.fc2(fc1))

        sscomplex = torch.cat([fc2, finger, protein], dim=1)
        sscomplex = torch.relu(self.linear_final_step(sscomplex))

        if not bool(self.type):
            pred = self.linear_final(sscomplex)
            pic_output = torch.sigmoid(pred)
            return pic_output
示例#2
0
    def __init__(self,
                 input_nc=1,
                 ndf=64,
                 gpu_ids=[],
                 num_class=2):  #initialized variables
        super(Network, self).__init__()
        self.gpu_ids = gpu_ids
        self.output_num = (4, 2, 2, 1)

        # Use FIVE conv layers (conv, relu, maxpool)
        self.conv_layer1 = self._make_conv_layer(3, 32)
        self.conv_layer2 = self._make_conv_layer(32, 64)
        self.conv_layer3 = self._make_conv_layer(64, 124)
        self.conv_layer4 = self._make_conv_layer(124, 256)

        # Use FIVE max pooling layers

        # One 3D SPP layer
        self.spp_layer = spatial_pyramid_pool(self=None,
                                              previous_conv=x,
                                              num_sample=1,
                                              previous_conv_size=[
                                                  int(x.size(2)),
                                                  int(x.size(3)),
                                                  int(x.size(4))
                                              ],
                                              out_pool_size=self.output_num)

        # Two Fully Connected Layers
        self.fc1 = nn.Linear(4096, 2048)
        self.fc2 = nn.Linear(1024, 512)
        self.fc2 = nn.Linear(256, num_class)
示例#3
0
 def through_spp(self, x):  #spp_layer
     for i in range(BATCH_SIZE):
         y_piece = torch.unsqueeze(spatial_pyramid_pool(previous_conv = x[i,:], num_sample = R, 
                                     previous_conv_size = [x.size(3),x.size(4)], out_pool_size = [2, 2]), 0)
         if i == 0:
             y = y_piece
             #print(y_piece.shape)
         else:
             y = torch.cat((y, y_piece))
             #print(y.shape)
     return y
示例#4
0
    def forward(self, x):

        x = self.conv1(x)
        output_num = [8, 4, 1]
        x_spp = spatial_pyramid_pool(
            x, self.batch_size,
            [int(x.size(2)), int(x.size(3))], output_num)
        x = self.liner(x_spp)
        x = self.out(x)

        return x
 def forward(self, x):
     out = self.conv1(x)
     out = self.conv2(out)
     # Flatten
     # print(out.view(out.size(0), -1).size())
     # print(out.size())
     feature = spatial_pyramid_pool(
         self, out, out.size(0),
         [int(out.size(2)), int(out.size(3))], self.output_num)
     out = self.fc1(feature)
     out = self.fc2(out)
     return out, feature
示例#6
0
    def forward(self, x1, x2):

        x1 = torch.unsqueeze(x1, 1)
        drug1 = self.conv_drug1(x1)
        # print(x1.shape, pic.shape)
        drug1 = self.bn_drug1(drug1)
        drug1 = self.elu_drug1(drug1)
        drug1 = self.layer1_drug1(drug1)
        drug1 = self.layer2_drug1(drug1)

        drug2 = torch.unsqueeze(x2, 1)
        drug2 = self.conv_drug2(drug2)
        # print(x1.shape, pic.shape)
        drug2 = self.bn_drug2(drug2)
        drug2 = self.elu_drug2(drug2)
        drug2 = self.layer1_drug2(drug2)
        drug2 = self.layer2_drug2(drug2)

        # print(pic.shape, "pic.shape")

        spp_drug1 = spatial_pyramid_pool(drug1, drug1.size(0), [int(drug1.size(2)), int(drug1.size(3))],
                                         self.output_num)
        spp_drug2 = spatial_pyramid_pool(drug2, drug2.size(0), [int(drug2.size(2)), int(drug2.size(3))],
                                         self.output_num)
        # print(spp.shape, "spp.shape")
        fc1_drug1 = F.relu(self.fc1(spp_drug1))
        fc2_drug1 = F.relu(self.fc2(fc1_drug1))

        fc1_drug2 = F.relu(self.fc1(spp_drug2))
        fc2_drug2 = F.relu(self.fc2(fc1_drug2))

        # print(fc2.shape, "fc2.shape")

        sscomplex = torch.cat([fc2_drug1, fc2_drug2], dim=1)
        sscomplex = torch.relu(self.linear_final_step(sscomplex))

        if not bool(self.type):
            pred = self.linear_final(sscomplex)
            pic_output = torch.sigmoid(pred)
            return pic_output
示例#7
0
    def through_spp_new(self, x, ssw):  #x.shape = [BATCH_SIZE, 512, 14, 14] ssw_get.shape = [BATCH_SIZE, R, 4] y.shape = [BATCH_SIZE, R, 4096]
        for i in range(BATCH_SIZE):
            for j in range(ssw.size(1)):
                fmap_piece = torch.unsqueeze(x[i, :, floor(ssw[i, j, 0]) : floor(ssw[i, j, 0] + ssw[i, j, 2]), 
                                      floor(ssw[i, j, 1]) : floor(ssw[i, j, 1] + ssw[i, j, 3])], 0)
                fmap_piece = spatial_pyramid_pool(previous_conv = fmap_piece, num_sample = 1, 
                                        previous_conv_size = [fmap_piece.size(2),fmap_piece.size(3)], out_pool_size = [2, 2])
                if j == 0:
                    y_piece = fmap_piece
                    #print('fmap_piece.shape', fmap_piece.shape)
                else:

                    y_piece = torch.cat((y_piece, fmap_piece))
            if i == 0:
                y = torch.unsqueeze(y_piece, 0)
                #print('y_piece', y_piece.shape)
            else:
                y = torch.cat((y, torch.unsqueeze(y_piece, 0)))
        return y
示例#8
0
    def forward(self, x):
        x = self.conv1(x)
        x = self.LReLU1(x)

        x = self.conv2(x)
        x = F.leaky_relu(self.BN1(x))

        x = self.conv3(x)
        x = F.leaky_relu(self.BN2(x))

        x = self.conv4(x)
        # x = F.leaky_relu(self.BN3(x))
        # x = self.conv5(x)
        spp = spatial_pyramid_pool(
            x, 1, [int(x.size(2)), int(x.size(3))], self.output_num)
        # print(spp.size())
        fc1 = self.fc1(spp)
        fc2 = self.fc2(fc1)
        s = nn.Sigmoid()
        output = s(fc2)
        return output