예제 #1
0
 def forward(self, x):
   """Flattens <x>, permutes it, and processes it pixel by pixel."""
   x = flatten(x)[:, self.order]
   curr = x.new_zeros(x.shape[0], 100)
   for i in range(784):
     ins = x[:, i].view(-1, 1)
     curr = self.state_dense(curr) + self.input_dense(ins)
     curr = self.after(self.act(self.before(curr)))
   return self.output(curr)
예제 #2
0
    def __init__(self, train_data, eps=10**-12):
        logging.info("Training PCA...")

        # Normalize the training data.
        logging.info("Calculating means and variances...")
        self.means, self.stds = moments(train_data)
        self.stds += eps

        # Compute covariance matrix.
        logging.info("Computing covariance matrix...")
        cov = None
        milestone = 0.0
        for i, batch in enumerate(train_data):
            batch = batch[0]
            normalized = flatten((batch - self.means) / self.stds)
            gain = normalized.t().matmul(normalized)
            if cov is None:
                cov = gain
            else:
                cov += gain

            # Info logging.
            progress = (i + 1) / len(train_data)
            if progress - milestone >= 0.1 or progress == 1.0:
                logging.info("%d%% done", int(100 * progress))

        self.input_size = cov.size()[0]
        self.code_size = self.input_size

        # Compute the eigenvectors.
        _, self.W = map(torch.from_numpy, np.linalg.eigh(cov.numpy()))
        order = torch.arange(self.input_size - 1,
                             -1,
                             -1,
                             out=torch.LongTensor())
        self.W = self.W[:, order]
예제 #3
0
 def res(module, ins, outs):
     with torch.no_grad():
         h["out_vv"].append(
             torch.var(torch.var(flatten(outs), 1, unbiased=False),
                       unbiased=False))
예제 #4
0
 def code(self, x):
     return self.coder(flatten(x))
예제 #5
0
 def code(self, x):
     x = flatten((x - self.means) / self.stds)
     return x.matmul(self.W[:, :self.code_size])
예제 #6
0
 def code(self, x):
     x = flatten(x)
     return nn.functional.softmin(self.sharpness *
                                  self.dist_func(x, self.points),
                                  dim=-1)