def train(self):
     for epoch in range(10):
         for it, (x, y) in enumerate(self.data_loader):
             self.optim.zero_grad()
             x = torch.bernoulli(x)
             if cuda:
                 x = x.cuda()
             x = Variable(x.view(-1, 1, 28, 28))
             out = nn_.sigmoid(self.mdl((x, 0))[0]).permute(0, 3, 1, 2)
             loss = utils.bceloss(out, x).sum(1).sum(1).sum(1).mean()
             loss.backward()
             self.optim.step()
             if ((it + 1) % 100) == 0:
                 print 'Epoch: [%2d] [%4d/%4d] loss: %.8f' % \
                     (epoch+1, it+1,
                      self.data_loader.dataset.__len__() // 32,
                      loss.data[0])
Beispiel #2
0
 def train(self):
     
     
     for epoch in range(10):
         for it, (x, y) in enumerate(self.data_loader):
             self.optim.zero_grad()
             
             x = torch.bernoulli(x)
             x = Variable(x.view(-1, 784))
             out = nn_.sigmoid(self.mdl(x)[:,:,0])
             loss = utils.bceloss(out, x).sum(1).mean()
             
             loss.backward()
             self.optim.step()
             
             if ((it + 1) % 10) == 0:
                 print 'Epoch: [%2d] [%4d/%4d] loss: %.8f' % \
                     (epoch+1, it+1, 
                      self.data_loader.dataset.__len__() // 32,
                      loss.data[0])
              
             self.mdl.randomize()
Beispiel #3
0
 def evaluate(self, z):
     prob = self.sigmoid(self.logits)
     z = z * 0.5 + 0.5
     return -nn_.sum_from_one(bceloss(prob, z))
Beispiel #4
0
 def evaluate(self, input, output):
     prob = self.forward(input) * 0.5 + 0.5
     output = output * 0.5 + 0.5
     return -nn_.sum_from_one(bceloss(prob, output))