Ejemplo n.º 1
0
 def val_zsl(self, test_X, test_label, target_classes, second=False):
     start = 0
     ntest = test_X.size()[0]
     predicted_label = torch.LongTensor(test_label.size())
     # all_output = None
     for i in range(0, ntest, self.batch_size):
         end = min(ntest, start + self.batch_size)
         if self.cuda:
             output = self.model(
                 Variable(test_X[start:end].cuda(), volatile=True))
         else:
             output = self.model(Variable(test_X[start:end], volatile=True))
         # if all_output is None:
         #     all_output = output
         # else:
         #     all_output = torch.cat((all_output, output), 0)
         _, predicted_label[start:end] = torch.max(output.data, 1)
         start = end
     overall_acc = self.compute_acc_avg_per_class(
         util.map_label(test_label, target_classes), predicted_label,
         target_classes.size(0))
     acc_of_all = self.compute_each_class_acc(
         util.map_label(test_label, target_classes), predicted_label,
         target_classes.size(0))
     # return overall_acc, predicted_label, all_output, acc_of_all
     return overall_acc, acc_of_all
Ejemplo n.º 2
0
    def fit_zsl(self):
        first_acc = 0
        first_accs = torch.FloatTensor(5).fill_(0)
        first_all_pred = None
        first_all_output = None
        first_acc_of_all = None

        trun = lambda x: int(x * 100)

        all_length = self.test_unseen_feature.size(0)
        mapped_test_label = util.map_label(self.test_unseen_label,
                                           self.unseenclasses)

        for epoch in range(self.nepoch):
            for i in range(0, self.ntrain, self.batch_size):
                self.model.zero_grad()
                batch_input, batch_label = self.next_batch(self.batch_size)
                self.input.copy_(batch_input)
                self.label.copy_(batch_label)
                inputv = Variable(self.input)  # fake_feature
                labelv = Variable(self.label)  # fake_labels
                output = self.model(inputv)
                loss = self.criterion(
                    output, labelv)  # 使用fake_unseen_feature和labels来训练分类器
                loss.backward()
                self.optimizer.step()
            # using real testing data (of unseen classes) to test classifier2
            # overall_acc, pred, output, acc_of_all = self.val_zsl(self.test_unseen_feature, self.test_unseen_label,

            # testing only hit@1                                                      self.unseenclasses)
            # overall_acc, acc_of_all = self.val_zsl(self.test_unseen_feature, self.test_unseen_label,
            #                                                      self.unseenclasses)
            # testing the hit@1,2,5,..
            overall_acc, overall_acc_Hit = self.val_zsl_Hit(
                self.test_unseen_feature, self.test_unseen_label,
                self.unseenclasses)

            #  get the highest evaluation result
            if overall_acc > first_acc:
                first_acc = overall_acc
                first_accs = overall_acc_Hit
                # first_all_pred = pred
                # first_all_output = output
                # first_acc_of_all = acc_of_all

        print('First Acc: {:.2f}%'.format(first_acc * 100))
        print('First Acc: ', ['{:.2f}'.format(i * 100) for i in first_accs])
        # if self.args.PerClassAcc:
        #     # acc_per = ['{:.2f}'.format(x * 100) for x in list(first_acc_of_all)]
        #     # print(acc_per)
        #     # acc_per = collections.OrderedDict()  # output is ordered by input
        #     first_acc_of_all = first_acc_of_all.numpy()
        #     for i in range(len(first_acc_of_all)):
        #         x = first_acc_of_all[i]
        #         class_name = self.unseennames[i]
        #         # acc_per[class_name] = np.round(x * 100, 2)
        #         print('{}/{}, {}, acc: {:.2f} : '.format(i+1, len(first_acc_of_all), class_name, np.round(x * 100, 2)))
        #     # print(acc_per)
        sys.stdout.flush()
        return first_acc
Ejemplo n.º 3
0
    def val_zsl_Hit(self, test_X, test_label, target_classes, second=False):
        start = 0
        ntest = test_X.size()[0]
        predicted_label = torch.LongTensor(test_label.size())
        predicted_labels = torch.LongTensor(test_label.size(0),
                                            target_classes.size(0))
        # all_output = None
        for i in range(0, ntest, self.batch_size):
            end = min(ntest, start + self.batch_size)
            if self.cuda:
                output = self.model(
                    Variable(test_X[start:end].cuda(), volatile=True))
            else:
                output = self.model(Variable(test_X[start:end], volatile=True))
            # if all_output is None:
            #     all_output = output
            # else:
            #     all_output = torch.cat((all_output, output), 0)
            _, predicted_label[start:end] = torch.max(output.data, 1)
            _, predicted_labels[start:end] = output.data.sort(1,
                                                              descending=True)
            start = end
        # print("pred shape:", predicted_labels.shape)
        overall_acc = self.compute_acc_avg_per_class(
            util.map_label(test_label, target_classes), predicted_label,
            target_classes.size(0))
        overall_acc_Hit = self.compute_acc_avg_per_class_Hit(
            util.map_label(test_label, target_classes), predicted_labels,
            target_classes.size(0))
        # print("overall acc shape:", overall_acc.shape)
        # acc_of_all = self.compute_each_class_acc(util.map_label(test_label, target_classes), predicted_labels,
        #                                          target_classes.size(0))
        # return overall_acc, predicted_label, all_output, acc_of_all
        # return overall_acc, acc_of_all

        return overall_acc, overall_acc_Hit.squeeze()
Ejemplo n.º 4
0
    def val(self, test_X, test_label, target_classes):
        start = 0
        ntest = test_X.size()[0]
        predicted_label = torch.LongTensor(test_label.size())
        for i in range(0, ntest, self.batch_size):
            end = min(ntest, start + self.batch_size)
            if self.cuda:
                output = self.model(Variable(test_X[start:end].cuda(), volatile=True))
            else:
                output = self.model(Variable(test_X[start:end], volatile=True))
            _, predicted_label[start:end] = torch.max(output.data, 1)
            start = end

        acc = self.compute_per_class_acc(util.map_label(test_label, target_classes), predicted_label,
                                         target_classes.size(0))
        return acc
Ejemplo n.º 5
0
def sample():
    batch_feature, batch_label, batch_sem = data.next_batch(args.BatchSize)

    input_fea.copy_(batch_feature)
    input_sem.copy_(batch_sem)
    input_label.copy_(util.map_label(batch_label, data.seenclasses))
Ejemplo n.º 6
0
    ones = torch.ones(disc_interpolates.size())
    if args.Cuda:
        ones = ones.cuda()

    gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
                              grad_outputs=ones,
                              create_graph=True, retain_graph=True, only_inputs=True)[0]
    # args.GP_Weight = 10
    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * args.GP_Weight
    return gradient_penalty



# train a classifier on seen classes, obtain \theta of Equation (4)
pretrain_cls = classifier_pretrain.CLASSIFIER(data.train_feature, util.map_label(data.train_label, data.seenclasses),
                                     data.seenclasses.size(0), args.FeaSize, args.Cuda, 0.001, 0.5, 100, 2*args.BatchSize,
                                     args.Pretrained_Classifier)

# freeze the classifier during the optimization
for p in pretrain_cls.model.parameters():  # set requires_grad to False
    p.requires_grad = False



for epoch in range(args.Epoch):
    FP = 0
    mean_lossD = 0
    mean_lossG = 0

    for i in range(0, data.ntrain, args.BatchSize):
Ejemplo n.º 7
0
    gradients = autograd.grad(outputs=disc_interpolates,
                              inputs=interpolates,
                              grad_outputs=ones,
                              create_graph=True,
                              retain_graph=True,
                              only_inputs=True)[0]
    # args.GP_Weight = 10
    gradient_penalty = (
        (gradients.norm(2, dim=1) - 1)**2).mean() * args.GP_Weight
    return gradient_penalty


# train a classifier on seen classes, obtain \theta of Equation (4)
pretrain_cls = classifier_pretrain.CLASSIFIER(
    data.train_feature, util.map_label(data.train_label, data.seenclasses),
    data.seenclasses.size(0), args.FeaSize, args.Cuda, 0.001, 0.5, 100,
    2 * args.BatchSize, args.Pretrained_Classifier)

# freeze the classifier during the optimization
for p in pretrain_cls.model.parameters():  # set requires_grad to False
    p.requires_grad = False

for epoch in range(args.Epoch):
    FP = 0
    mean_lossD = 0
    mean_lossG = 0

    for i in range(0, data.ntrain, args.BatchSize):
        # print("batch...", i)
        # iteratively train the generator and discriminator