예제 #1
0
 def features(self, x: torch.Tensor) -> torch.Tensor:
     """
     Returns the non-activated output of the second-last layer.
     :param x: input tensor (batch_size, input_size)
     :return: output tensor (100)
     """
     x = x.view(-1, num_flat_features(x))
     return self._features(x)
예제 #2
0
 def forward(self, x: torch.Tensor) -> torch.Tensor:
     """
     Compute a forward pass.
     :param x: input tensor (batch_size, input_size)
     :return: output tensor (output_size)
     """
     x = x.view(-1, num_flat_features(x))
     return self.net(x)
예제 #3
0
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Compute a forward pass.
        :param x: input tensor (batch_size, input_size)
        :return: output tensor (output_size)
        """
        x = x.view(-1, num_flat_features(x))
        if len(self.old_cols) > 0:
            with torch.no_grad():
                fc1_kb = [old(x) for old in self.old_fc1s]
                fc2_kb = [old(fc1_kb[i]) for i, old in enumerate(self.old_fc2s)]
            x = F.relu(self.fc1(x))

            y = self.adaptor1(torch.cat(fc1_kb, 1))
            x = F.relu(self.fc2(x) + y)

            y = self.adaptor2(torch.cat(fc2_kb, 1))
            x = self.classifier(x) + y
        else:
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.classifier(x)
        return x