Esempio n. 1
0
 def loss(self):
     model = self.inference
     #loss = slim.losses.softmax_cross_entropy(model, self.y)
     loss = digits.classification_loss(model, self.y)
     accuracy = digits.classification_accuracy(model, self.y)
     self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
     return loss
Esempio n. 2
0
 def loss(self):
     model = self.inference
     loss = digits.classification_loss(model, self.y)
     acc_top1 = digits.classification_accuracy_top_n(model, self.y, 1)
     acc_top5 = digits.classification_accuracy_top_n(model, self.y, 5)
     self.summaries.append(tf.summary.scalar(acc_top1.op.name, acc_top1))
     self.summaries.append(tf.summary.scalar(acc_top5.op.name, acc_top5))
     return loss
Esempio n. 3
0
 def loss(self):
     loss = digits.classification_loss(self.inference, self.y)
     accuracy = digits.classification_accuracy(self.inference, self.y)
     self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
     return loss
Esempio n. 4
0
        label_ = torch.randint(3, (t1.size(0), ))
        label = label2onehot(label_, 3).cuda()
        real = torch.zeros(t1.size(0), t1.size(1), t1.size(2), t1.size(3))
        for i, l in enumerate(label_):
            if l == 0:
                real[i] = flair[i]
            elif l == 1:
                real[i] = t1ce[i]
            elif l == 2:
                real[i] = t1[i]
            else:
                print('erro!!!')
        out_src, out_cls = discriminator(real.float().cuda(),
                                         t2.float().cuda())
        d_loss_real = -torch.mean(out_src.sum([1, 2, 3]))
        d_loss_cls = classification_loss(out_cls, label)

        ############################################## discriminator
        fake = generator(t2.float().cuda(), label)
        out_src, out_cls = discriminator(fake.detach(), t2.float().cuda())
        d_loss_fake = torch.mean(out_src.sum([1, 2, 3]))

        # Compute loss for gradient penalty.
        alpha = torch.rand(real.size(0), 1, 1, 1).cuda()
        x_hat = (alpha * real.cuda().data +
                 (1 - alpha) * fake.data).requires_grad_(True)
        out_src, _ = discriminator(x_hat, t2.float().cuda())
        d_loss_gp = gradient_penalty(out_src, x_hat)
        #         d_loss_gp.backward(retain_graph=True)
        d_loss = d_loss_real + d_loss_fake + LAMBDA_CLS * d_loss_cls + LAMBDA_GP * d_loss_gp
        optimizer_d.zero_grad()
Esempio n. 5
0
 def loss(self):
     model = self.inference
     loss = digits.classification_loss(model, self.y)
     accuracy = digits.classification_accuracy(model, self.y)
     self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
     return loss