Exemple #1
0
    def intermediate_forward(self, x, layer_index):
        feature_list = list(self.net.children())
        feature4 = feature_list[4]
        feature3 = feature_list[3]
        feature2 = feature_list[2]
        feature1 = feature_list[1]
        feature0 = feature_list[0]

        out = feature0(x)
        if layer_index == 0:
            out = F.max_pool2d(out, 32).view(out.size(0), -1)
            return out
        out = feature1(out)
        if layer_index == 1:
            out = F.max_pool2d(out, 32).view(out.size(0), -1)
            return out
        out = feature2(out)
        if layer_index == 2:
            out = F.max_pool2d(out, 32).view(out.size(0), -1)
            return out
        out = feature3(out)
        if layer_index == 3:
            out = F.avg_pool2d(out, 16).view(out.size(0), -1)
            return out
        out = feature4(out)
        if layer_index == 4:
            out = F.avg_pool2d(out, 4).view(out.size(0), -1)
            return out
Exemple #2
0
    def forward(self, input):
        x = input
        # This is a hack to get around the fact that the inception net
        # provided by torchvision does not provide an attribute of max_pool
        # in the inception net constructor
        for name, module in self.inception_net.named_children():
            if name == 'Conv2d_2b_3x3':
                x = self.inception_net.Conv2d_2b_3x3(x)
                x = F.max_pool2d(x, kernel_size=3, stride=2)
                continue

            if name == 'Conv2d_4a_3x3':
                x = self.inception_net.Conv2d_4a_3x3(x)
                x = F.max_pool2d(x, kernel_size=3, stride=2)
                continue
            x = module(x)

        # Adaptive average pooling for inceptionV3
        x = F.adaptive_avg_pool2d(x, (1, 1))
        # N x 2048 x 1 x 1
        x = F.dropout(x, training=self.training)
        # N x 2048 x 1 x 1
        x = torch.flatten(x, 1)
        # N x 2048
        v_out = self.output(self.vowel_output(x))
        c_out = self.output(self.consonants_output(x))

        return v_out, c_out
Exemple #3
0
    def feature_list(self, x):
        feature_list = list(self.net.children())
        feature5 = feature_list[5]
        feature4 = feature_list[4]
        feature3 = feature_list[3]
        feature2 = feature_list[2]
        feature1 = feature_list[1]
        feature0 = feature_list[0]

        out_list = []
        out = feature0(x)
        out_list.append(F.max_pool2d(out, 32).view(out.size(0), -1))
        out = feature1(out)
        out_list.append(F.max_pool2d(out, 32).view(out.size(0), -1))
        out = feature2(out)
        out_list.append(F.max_pool2d(out, 32).view(out.size(0), -1))
        out = feature3(out)
        out_list.append(F.avg_pool2d(out, 16).view(out.size(0), -1))
        out = feature4(out)
        out_list.append(F.avg_pool2d(out, 4).view(out.size(0), -1))

        out = feature5(out)
        out = F.avg_pool2d(out, 4).view(out.size(0), -1)
        out = self.gaussian_layer(out)
        out_list.append(out)

        return out_list
Exemple #4
0
 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
     x = x.view(-1, 320)
     x = F.relu(self.fc1(x))
     feature = F.dropout(x, training=self.training)
     return self.fc2(feature), feature
 def forward(self,x):
     x= F.max_pool2d(F.relu(self.conv1(x)), (2,2))
     x= F.max_pool2d(F.relu(self.conv2(x)), 2)
     x= x.view(-1, self.num_flat_features(x))
     x= F.relu(self.fc1(x))
     x= F.relu(self.fc2(x))
     x= self.fc3(x)
     return x
Exemple #6
0
 def forward(self, x):
     # x = self.gen(x)
     x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))  # 输入x经过卷积conv1之后,经过激活函数ReLU(原来这个词是激活函数的意思),使用2x2的窗口进行最大池化Max pooling,然后更新到x。
     x = F.max_pool2d(F.relu(self.conv2(x)), 2)  # 输入x经过卷积conv2之后,经过激活函数ReLU,使用2x2的窗口进行最大池化Max pooling,然后更新到x。
     x = x.view(-1, self.num_flat_features(x))  # view函数将张量x变形成一维的向量形式,总特征数并不改变,为接下来的全连接作准备。
     x = F.relu(self.fc1(x))  # 输入x经过全连接1,再经过ReLU激活函数,然后更新x
     x = F.relu(self.fc2(x))  # 输入x经过全连接2,再经过ReLU激活函数,然后更新x
     x = self.fc3(x)  # 输入x经过全连接3,然后更新x
     return x
Exemple #7
0
 def forward(self, x):
     # Max pooling over a (2, 2) window
     x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
     # If the size is a square you can only specify a single number
     x = F.max_pool2d(F.relu(self.conv2(x)), 2)
     x = x.view(-1, self.num_flat_features(x))
     x = F.relu(self.fc1(x))
     x = F.relu(self.fc2(x))
     x = self.fc3(x)
     return x
Exemple #8
0
    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2, 2)
        x = F.relu(self.conv2(x))
        x = F.max_pool2d(x, 2, 2)

        # print(x.shape) #(for size)
        x = x.view(-1, 4*4*50) # batch size: -1 (don't know)
        x = F.relu(self.fc1(x))
        x = self.fx2(x)
        return F.log_softmax(x, dim=1)
Exemple #9
0
    def forward(self, x):
        # transform the input
        x = self.stn(x)

        # Perform the usual forward pass
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
Exemple #10
0
    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x, inplace=True)  # inplace=True , True 는 직접 (복사본이 아닌 원본)값을 변경한다는 뜻
                                     # 복사본을 안 만듦으로써, 메모리를 아낄 수 있다.
        x = F.max_pool2d(x, (2, 2))
        x = self.conv2(x)
        x = F.relu(x, inplace=True)
        x = F.max_pool2d(x, (2, 2))
        x = x.view(x.shape[0], -1)
        x = self.fc1(x)
        x = F.relu(x, inplace=True)
        x = self.fc1(x)
        out = F.relu(x, inplace=True)

        return out
Exemple #11
0
    def _compute_block_mask(self, mask):
        block_mask = F.max_pool2d(input=mask[:, None, :, :],
                                  kernel_size=(self.block_size, self.block_size),
                                  stride=(1, 1),
                                  padding=self.block_size // 2)

        if self.block_size % 2 == 0:
            block_mask = block_mask[:, :, :-1, :-1]

        keeped = block_mask.numel() - block_mask.sum().to(torch.float32)  # prevent overflow in float16
        block_mask = 1 - block_mask.squeeze(1)

        return block_mask, keeped
Exemple #12
0
    def forward(self, x_level_0, x_level_1, x_level_2):
        # Feature Resizing过程
        if self.level == 0:
            level_0_resized = x_level_0
            level_1_resized = self.stride_level_1(x_level_1)
            level_2_downsampled_inter = F.max_pool2d(x_level_2,
                                                     3,
                                                     stride=2,
                                                     padding=1)
            level_2_resized = self.stride_level_2(level_2_downsampled_inter)
        elif self.level == 1:
            level_0_compressed = self.compress_level_0(x_level_0)
            level_0_resized = F.interpolate(level_0_compressed,
                                            2,
                                            mode='nearest')
            level_1_resized = x_level_1
            level_2_resized = self.stride_level_2(x_level_2)
        elif self.level == 2:
            level_0_compressed = self.compress_level_0(x_level_0)
            level_0_resized = F.interpolate(level_0_compressed,
                                            4,
                                            mode='nearest')
            if self.dim[1] != self.dim[2]:
                level_1_compressed = self.compress_level_1(x_level_1)
                level_1_resized = F.interpolate(level_1_compressed,
                                                2,
                                                mode='nearest')
            else:
                level_1_resized = F.interpolate(x_level_1, 2, mode='nearest')
            level_2_resized = x_level_2

        # 融合权重也是来自于网络学习
        level_0_weight_v = self.weight_level_0(level_0_resized)
        level_1_weight_v = self.weight_level_1(level_1_resized)
        level_2_weight_v = self.weight_level_2(level_2_resized)
        levels_weight_v = torch.cat(
            (level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)
        levels_weight = self.weight_levels(levels_weight_v)
        levels_weight = F.softmax(levels_weight, dim=1)  # alpha

        # 自适应融合
        fused_out_reduced = level_0_resized * levels_weight[:,0:1,:,:] +\
                            level_1_resized * levels_weight[:,1:2,:,:] +\
                            level_2_resized * levels_weight[:,2:,:,:]

        out = self.expand(fused_out_reduced)
        return out
 def forward(self,x,indices,size):
     output = self.features(x)
     return F.max_pool2d(output, 2, 2, return_indices=True), output.size()
def plot_conv_activation():
    model.eval()
    fig = plt.figure()

    k = 0
    for data_idx in range(5):
        data = example_data[data_idx + 16].view(1, 1, 28, 28).to(model.device)

        # data = example_data[data_idx + 15][0].to(model.device)#.view(784).to(model.device)

        # model.fc1.register_forward_hook(get_activation('fc1'))
        #model.fc2.register_forward_hook(get_activation('fc2'))
        model.conv2.register_forward_hook(get_activation('conv2'))
        model.fc1.register_forward_hook(get_activation('fc1'))

        pred_vals = model(data)[0]  # single vector batch
        #print(pred_vals)
        pred_index = torch.argmax(pred_vals)
        print(pred_index)

        fc1_w = model.fc1.weight  # [500, 1000]
        fc2_w = model.fc2.weight  # [4, 500]

        data = activation['conv2']
        data = F.relu(F.max_pool2d(data, 2))

        data = data.view(-1, 320)
        # bb = torch.relu(torch.matmul(data, torch.t(fc1_w)) + model.fc1.bias)

        bias_div = len(torch.t(fc1_w))

        mul_weights = torch.mul(data, fc1_w)
        mul_weights = torch.t(mul_weights)
        mul_weights = mul_weights + model.fc1.bias.div(bias_div)

        activated_weights = torch.relu(torch.sum(mul_weights, 0))
        for idx, val in enumerate(activated_weights):
            if val == 0.0:
                mul_weights[:, idx] = 0.0

        fc2_w = torch.t(fc2_w)
        mul_weights = torch.matmul(mul_weights, fc2_w)
        mul_weights = mul_weights + model.fc2.bias.div(bias_div)
        ''''
        We only need to compute "bias_div" once, because the input shape never changes. Therefore each bias
        needs to be divided always by the same quantity
        '''
        activated_weights = torch.relu(torch.sum(mul_weights, 0))
        for idx, val in enumerate(activated_weights):
            if val == 0.0:
                mul_weights[:, idx] = 0.0

        heatmap = torch.t(mul_weights)[3].cpu().detach().view(20, 16).numpy()

        heatmap = cv2.resize(heatmap, (28, 28))
        plt.matshow(heatmap)
        plt.show()
        exit(15)
        heatmap = cv2.resize(torch.t(mul_weights)[1], (28, 28))
        exit(3)

        data = example_data[data_idx + 15][0].view(28, 28)
        print('predicted:', pred_index)
        #k += 1
        #plt.subplot(10, 21, k)
        plt.tight_layout()
        im = data
        plt.imshow(im, interpolation='none', vmin=0, vmax=1)
        plt.xticks([])
        plt.yticks([])

        for vec in torch.t(mul_weights):
            k = 0
            matr = vec.view(1, 20, 4, 4)
            print(matr.shape)

            for el in matr[0]:
                print(el.shape)
                el = el.cpu().detach().numpy()
                print(el)
                k += 1
                plt.subplot(5, 4, k)
                plt.imshow(el)
            fig
            plt.show()

        exit(3)