Beispiel #1
0
    def val(self, test_X, test_label, target_classes):
        output = self.model(Variable(test_X.cuda(), volatile=True))
        _, classifier_label = torch.max(output.data, 1)
        classifier_acc = self.compute_per_class_acc(
            util.map_label(test_label, target_classes), classifier_label.cpu(),
            target_classes.size(0))

        ensemble_output = output + self.opt.ensemble_ratio * Variable(
            self.opt.zsl_unseen_output)
        _, predicted_label = torch.max(ensemble_output.data, 1)
        ensemble_acc = self.compute_per_class_acc(
            util.map_label(test_label, target_classes), predicted_label.cpu(),
            target_classes.size(0))
        return ensemble_acc, classifier_acc
Beispiel #2
0
def run_eval_odmask(netAV, netVA, netOD, thresh, test_feature, test_label, regster_label, feature_register, opt, seen_classes=True):
    n_test = test_feature.size(0)
    predicted_label = torch.LongTensor(test_label.size())
    start = 0
    criterion = HLoss().cuda()
    entropy = []
    for i in range(0, n_test, opt.batch_size):
        end = min(n_test, start + opt.batch_size)
        with torch.no_grad():
            _, mix_output = netVA(Variable(test_feature[start:end].cuda()))
        if feature_register.size(0) > mix_output.size(0):
            feature_register = feature_register[0:mix_output.size(0), :, :]
        output_od = netOD(Variable(test_feature[start:end].cuda()))
        entropy_batch = criterion(output_od, batch=True)
        entropy.extend(entropy_batch.data.view(-1).cpu().numpy())
        mix_output = mix_output.unsqueeze(1).expand(feature_register.size())
        dis = mix_output - feature_register
        dis = torch.pow(dis, 2)
        dis = torch.sum(dis, dim=2)
        dis = torch.sqrt(dis)
        #print(dis.size())
        _, predicted_label[start:end] = torch.min(dis, dim=1)
        start = end
    seen_mask = torch.Tensor(np.array(entropy)) < thresh
    if not seen_classes:
        seen_mask = 1 - seen_mask

    od_sum_acc = torch.sum(seen_mask)
    mean_acc, sum_acc, num = compute_per_class_acc_od(util.map_label(test_label, regster_label), predicted_label, regster_label.size(0), seen_mask)

    return mean_acc, sum_acc, num, od_sum_acc
Beispiel #3
0
    def __init__(self, data, opt, _lr=0.001, _beta1=0.5, _nepoch=100, _batch_size=100):
        self.data = data
        self.dataset = opt['dataset']
        self.train_X = data.train_feature
        self.train_Y = util.map_label(data.train_label, data.seenclasses)
        self.batch_size = _batch_size
        self.nepoch = _nepoch
        self.nclass = data.seenclasses.size(0)
        self.input_dim = data.train_feature.shape[1]
        self.device = opt['device']
        self.model = LINEAR_LOGSOFTMAX(self.input_dim, self.nclass).to(self.device)
        self.model.apply(util.weights_init)
        self.criterion = nn.NLLLoss().to(self.device)

        self.input = torch.FloatTensor(_batch_size, self.input_dim).to(self.device)
        self.label = torch.LongTensor(_batch_size).to(self.device)

        self.lr = _lr
        self.beta1 = _beta1
        # setup optimizer
        self.optimizer = optim.Adam(self.model.parameters(), lr=_lr, betas=(_beta1, 0.999))

        self.index_in_epoch = 0
        self.epochs_completed = 0
        self.ntrain = self.train_X.size()[0]
        self.pretrain_classifier = opt['pretrain_classifier']

        if self.pretrain_classifier == '':
            self.fit()
        else:
            print("loading the pretrained classifer...")
            self.model.load_state_dict(torch.load(self.pretrain_classifier))
Beispiel #4
0
 def sample():  # get a batch of seen class data and attributes
     batch_feature, batch_label, batch_att = data.next_batch(
         opt['batch_size'])
     input_res.copy_(batch_feature)
     input_att.copy_(batch_att)
     input_label_ori.copy_(batch_label)
     input_label.copy_(util.map_label(batch_label, data.seenclasses))
Beispiel #5
0
    def val(self, test_X, test_label, target_classes, mask=None):
        start = 0
        ntest = test_X.size()[0]
        predicted_label = torch.LongTensor(test_label.size())
        self.model.eval()
        for i in range(0, ntest, self.batch_size):
            end = min(ntest, start + self.batch_size)
            if self.cuda:
                inputX = Variable(test_X[start:end].cuda())
            else:
                inputX = Variable(test_X[start:end])
            with torch.no_grad():
                if self.use_tde:
                    output, _ = self.model(inputX,
                                           label=None,
                                           embed=self.embed_mean)
                else:
                    output = self.model(inputX)
            _, predicted_label[start:end] = torch.max(output.data, 1)
            start = end

        acc = self.compute_per_class_acc(
            util.map_label(test_label, target_classes), predicted_label,
            target_classes.size(0), mask)
        return acc
Beispiel #6
0
 def sample_unseen():  # get a batch of unseen classes data and attributes
     batch_feature, batch_label, batch_att = data.next_batch_unseen(
         opt['batch_size'])
     unseen_res.copy_(batch_feature)
     unseen_att.copy_(batch_att)
     unseen_label_ori.copy_(batch_label)
     unseen_label.copy_(util.map_label(batch_label, data.unseenclasses))
def next_feed_dict(data, opt):
    batch_feature, batch_labels, batch_att = data.next_batch(opt.batch_size)
    batch_label = util.map_label(batch_labels, data.seenclasses)
    z_rand = np.random.normal(0, 1,
                              [opt.batch_size, opt.nz]).astype(np.float32)

    return batch_feature, batch_att, batch_label, z_rand
Beispiel #8
0
def run_eval_ol(netAV, netVA, test_feature, test_label, regster_label, feature_register, opt):
    n_test = test_feature.size(0)
    predicted_label = torch.LongTensor(test_label.size())
    start = 0
    for i in range(0, n_test, opt.batch_size):
        end = min(n_test, start + opt.batch_size)
        with torch.no_grad():
            _, mix_output = netVA(Variable(test_feature[start:end].cuda()))
        if feature_register.size(0) > mix_output.size(0):
            feature_register = feature_register[0:mix_output.size(0), :, :]
        mix_output_org = mix_output.clone()
        mix_output = mix_output.unsqueeze(1).expand(feature_register.size())
        dis = mix_output - feature_register
        dis = torch.pow(dis, 2)
        dis = torch.sum(dis, dim=2)
        dis = torch.sqrt(dis)
        #print(dis.size())
        _ , predicted_label[start:end] = torch.min(dis, dim=1)
        ######
        _ , register_nb_idx = torch.min(dis, dim=0)
        update_f = torch.index_select(mix_output_org, 0, register_nb_idx).unsqueeze(0)
        #print(update_f.size())
        feature_register = (feature_register + update_f) / 2
        ######
        start = end
    mean_acc, sum_acc, num = compute_per_class_acc_zsl(util.map_label(test_label, regster_label), predicted_label, regster_label.size(0))

    return mean_acc, sum_acc, num
Beispiel #9
0
def sample():
    # Sample a batch
    batch_feature, batch_label, batch_att = data.next_batch(opt.batch_size)
    if batch_feature.size(1) > opt.resSize:
        batch_feature = batch_feature[:, 0:opt.resSize]
    input_res.copy_(batch_feature)
    input_att.copy_(batch_att)
    input_label.copy_(util.map_label(batch_label, data.seenclasses))
Beispiel #10
0
    def __init__(self,
                 map,
                 latenSize,
                 data_loader,
                 _nclass,
                 _cuda,
                 generalized=True):
        self.test_seen_feature = data_loader.test_seen_feature
        self.test_seen_label = data_loader.test_seen_label
        self.test_unseen_feature = data_loader.test_unseen_feature
        self.test_unseen_label = data_loader.test_unseen_label
        self.seenclasses = data_loader.seenclasses
        self.unseenclasses = data_loader.unseenclasses
        self.similarity = nn.MSELoss(reduce=False)
        # self.attribute=data_loader.attribute.cuda()
        self.nclass = _nclass
        self.input_dim = self.test_seen_feature.size(1)
        self.latent_dim = latenSize
        self.cuda = _cuda
        self.std = 0.1
        self.criterion = nn.NLLLoss()
        self.l2_distance = nn.MSELoss(reduction='none')
        self.map = map
        for p in self.map.parameters():  # reset requires_grad
            p.requires_grad = False
        # setup optimizer
        self.index_in_epoch = 0
        self.epochs_completed = 0

        if generalized:
            self.attribute = data_loader.attribute.cuda()
            self.acc_seen = self.val(self.test_seen_feature,
                                     self.test_seen_label, self.seenclasses)
            self.acc_unseen = self.val(self.test_unseen_feature,
                                       self.test_unseen_label,
                                       self.unseenclasses)
            self.H = 2 * self.acc_seen * self.acc_unseen / (self.acc_seen +
                                                            self.acc_unseen)

            # print('Final: acc_seen=%.4f, acc_unseen=%.4f, h=%.4f' % (self.acc_seen, self.acc_unseen, self.H))
        else:
            self.attribute = data_loader.attribute_unseen.cuda()
            self.acc = self.val(
                self.test_unseen_feature,
                util.map_label(self.test_unseen_label, self.unseenclasses),
                util.map_label(self.unseenclasses, self.unseenclasses))
Beispiel #11
0
 def val(self, test_X, test_label, target_classes,second=False):
     start = 0
     ntest = test_X.size()[0]
     predicted_label = torch.LongTensor(test_label.size())
     all_output = None
     for i in range(0, ntest, self.batch_size):
         end = min(ntest, start+self.batch_size)
         if self.cuda:
             output = self.model(Variable(test_X[start:end].cuda(), volatile=True))
         else:
             output = self.model(Variable(test_X[start:end], volatile=True))
         if all_output is None:
             all_output = output
         else:
             all_output = torch.cat( (all_output, output), 0 )
         _, predicted_label[start:end] = torch.max(output.data, 1)
         start = end
     acc = self.compute_per_class_acc(util.map_label(test_label, target_classes), predicted_label, target_classes.size(0))
     acc_all = self.compute_every_class_acc(util.map_label(test_label, target_classes), predicted_label, target_classes.size(0))
     return acc, predicted_label, all_output,acc_all
Beispiel #12
0
 def compute_per_class_acc_gzsl(self, test_label, predicted_label,
                                target_classes, mask):
     acc_per_class = 0
     test_label = util.map_label(
         test_label, target_classes)  # required to map for both classifiers
     for i in range(target_classes.size(0)):
         idx = (test_label == i)
         acc_per_class += float(
             torch.sum((test_label[idx] == predicted_label[idx]) *
                       mask[idx])) / float(torch.sum(idx))
     acc_per_class = acc_per_class / float(target_classes.size(0))
     return acc_per_class
def	get_att_and_label( classes,	attribute, num):
	nclass	= classes.size(0)

	for i in range(nclass):
		iclass = classes[i]
		iclass_att = attribute[iclass]

		att = iclass_att.repeat(num, 1)
		input_att.narrow(0, i*num, num).copy_(att)
		input_label.narrow(0, i*num, num).copy_(iclass)
		
	input_label.copy_(util.map_label(input_label, data.seenclasses))
Beispiel #14
0
def eval_trainset(train_X, train_label, target_classes, batch_size):
    # extract image embed for unseen classes
    ntest = train_X.size()[0]
    nclass = target_classes.size(0)
    test_img_embed = torch.FloatTensor(ntest, opt.fc2_size)
    test_img_cls = torch.FloatTensor(ntest, opt.ntrain_class)
    #test_text_embed = torch.FloatTensor(nclass, opt.embedSize)
    start = 0
    for ii in range(0, ntest, batch_size):
        end = min(ntest, start + batch_size)
        test_feature = train_X[start:end]
        att_empty_1 = torch.zeros(end - start, opt.attSize)
        att_empty_2 = torch.zeros(end - start, opt.nclass_all)
        if opt.cuda:
            test_feature = test_feature.cuda()
            att_empty_1 = att_empty_1.cuda()
            att_empty_2 = att_empty_2.cuda()
        img_embed_com, _, img_cls, _, _ = netE(test_feature, att_empty_1,
                                               att_empty_2)
        img_embed_com_l2 = func.normalize(img_embed_com, p=2, dim=1)
        test_img_embed[start:end, :] = img_embed_com_l2.data.cpu()
        #test_img_cls[start:end, :] = img_cls.data.cpu()
        start = end

    acc_softmax = 0.0

    ## extrat attribute embeddings of seen classes
    res_empty = torch.zeros(nclass, opt.resSize)
    if opt.cuda:
        att_feature = data.att_seen.cuda()
        att_confuse_feature = data.att_confuse_seen.cuda()
        res_empty = res_empty.cuda()

    _, text_embed_com, _, _, _ = netE(res_empty, att_feature,
                                      att_confuse_feature)
    text_embed_com_l2 = func.normalize(text_embed_com, p=2, dim=1)
    test_text_embed = text_embed_com_l2.data.cpu()

    # KNN: compute sim matrix
    dis_data = torch.matmul(test_img_embed,
                            torch.transpose(test_text_embed, 0, 1))
    _, predicted_label = torch.max(dis_data, 1)
    acc_per_class = torch.FloatTensor(nclass).fill_(0)
    test_label = util.map_label(train_label, target_classes)
    for i in range(nclass):
        idx = (test_label == i)
        acc_per_class[i] = torch.sum(test_label[idx] == predicted_label[idx],
                                     dtype=torch.float) / torch.sum(idx)

    acc_knn = acc_per_class.mean()

    return acc_softmax, acc_knn
    def val(self, test_X, test_label, target_classes, epoch):
        start = 0
        ntest = test_X.shape[0]
        predicted_label = np.empty_like(test_label)

        self.input1 = tf.placeholder(tf.float32, [None, self.input_dim],
                                     name='test_features')

        self.classificationLogits = classificationLayer(self.input1,
                                                        self.nclass,
                                                        reuse=True,
                                                        isTrainable=False)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                       scope='classification')

            self.saver = tf.train.Saver(var_list=params)

            for var in params:
                print(var.name + "\t")

            string = self.modeldir + '/models_' + str(epoch) + '.ckpt'
            #print (string)
            #print (self.nepoch-1)
            try:
                self.saver.restore(sess, string)
            except:
                print("Previous weights not found of classifier")
                sys.exit(0)

            print("Model loaded")
            self.saver = tf.train.Saver()

            for i in range(0, ntest, self.batch_size):
                end = min(ntest, start + self.batch_size)
                output = sess.run([self.classificationLogits],
                                  feed_dict={self.input1: test_X[start:end]})
                #print (np.squeeze(np.array(output)).shape)
                predicted_label[start:end] = np.argmax(np.squeeze(
                    np.array(output)),
                                                       axis=1)
                start = end

            acc = self.compute_per_class_acc(
                util.map_label(test_label, target_classes), predicted_label,
                target_classes.shape[0])
        return acc
Beispiel #16
0
def sample(taxonomy=False):
    batch_feature, batch_label, batch_att, batch_label_genus, batch_att_genus, batch_label_family, batch_att_family = data.next_batch_3level(opt.batch_size)
    input_res.copy_(batch_feature)
    input_att.copy_(batch_att)
    input_att_genus.copy_(batch_att_genus)
    input_att_family.copy_(batch_att_family)
    input_label.copy_(util.map_label(batch_label, data.seenclasses))
    input_label_genus.copy_(batch_label_genus)
    input_label_family.copy_(batch_label_family)

    if taxonomy:
        if random.randint(0, 9) % 2 == 0:
            input_att.copy_(batch_att_genus)
        else:
            input_att.copy_(batch_att_family)
Beispiel #17
0
    def val_zsl(self, test_X, test_label, target_classes): 
        start = 0
        ntest = test_X.size()[0]
        predicted_label = torch.LongTensor(test_label.size())
        for i in range(0, ntest, self.batch_size):
            end = min(ntest, start+self.batch_size)
            if self.cuda:
                output = self.model(Variable(test_X[start:end].cuda(), volatile=True)) 
            else:
                output = self.model(Variable(test_X[start:end], volatile=True)) 
            _, predicted_label[start:end] = torch.max(output.data, 1)
            start = end

        acc = self.compute_per_class_acc_zsl(util.map_label(test_label, target_classes), predicted_label, target_classes.size(0))
        return acc
Beispiel #18
0
    def val(self, test_X, test_label, target_classes): 
        with torch.no_grad():
            start = 0
            ntest = test_X.size()[0]
            predicted_label = torch.LongTensor(test_label.size())
            for i in range(0, ntest, self.batch_size):
                end = min(ntest, start+self.batch_size)
                if self.cuda:
                    output = self.model(test_X[start:end].cuda()) 
                else:
                    output = self.model(test_X[start:end]) 
                _, predicted_label[start:end] = torch.max(output.detach(), 1)
                start = end

            acc = self.compute_per_class_acc(util.map_label(test_label, target_classes), predicted_label, target_classes.size(0))
        return acc
Beispiel #19
0
def gen_data():
    X = {}
    y = {}
    print('generating feature...')
    for k, G in netG.items():
        if G is None:  # Real
            X[k], y[k] = get_real_feature(data)
        else:
            reset_seed(opt)
            X[k], y[k] = generate_syn_feature(G, data.unseenclasses,
                                              data.attribute, opt.syn_num, opt)
        y[k] = util.map_label(y[k], data.unseenclasses)
        X[k] = X[k].cpu().numpy()
        y[k] = y[k].cpu().numpy()

    print('done')
    return X, y
Beispiel #20
0
    def val(self, test_X, test_label, target_classes):
        fake_test_attr = self.opt['fake_test_attr']
        start = 0
        ntest = test_X.size()[0]
        predicted_label = torch.LongTensor(test_label.size())
        all_output = torch.FloatTensor(test_label.size(0), self.nclass).cuda()
        for i in range(0, ntest, self.batch_size):
            end = min(ntest, start + self.batch_size)
            if self.cuda:
                output = self.model(fake_test_attr[start:end].cuda())
            else:
                output = self.model(fake_test_attr[start:end])
            _, predicted_label[start:end] = torch.max(output.data, 1)
            all_output[start:end, :] = output.data
            start = end

        acc = self.compute_per_class_acc(
            util.map_label(test_label, target_classes), predicted_label,
            target_classes.size(0))
        return acc, all_output
Beispiel #21
0
    def val(self, test_X, test_label, target_classes):
        start = 0
        ntest = test_X.size()[0]
        predicted_label = torch.LongTensor(test_label.size())
        for i in range(0, ntest, self.batch_size):
            end = min(ntest, start + self.batch_size)
            if self.cuda:
                mus, stds, dis_out, pred, encoder_out = self.map(
                    test_X[start:end].cuda())
                output = self.model(mus)
            else:
                mus, stds, dis_out, pred, encoder_out = self.map(
                    test_X[start:end])
                output = self.model(mus)
            _, predicted_label[start:end] = torch.max(output.data, 1)
            start = end

        acc = self.compute_per_class_acc(
            util.map_label(test_label, target_classes), predicted_label,
            target_classes.size(0))
        return acc
Beispiel #22
0
 def val(self, test_X, test_label, target_classes):
     start = 0
     ntest = test_X.size()[0]
     predicted_label = torch.LongTensor(test_label.size())
     for i in range(0, ntest, self.batch_size):
         end = min(ntest, start + self.batch_size)
         if self.cuda:
             output = self.model(
                 Variable(test_X[start:end].cuda(), requires_grad=False))
         else:
             output = self.model(
                 Variable(test_X[start:end], requires_grad=False))
         _, predicted_label[start:end] = torch.max(output.data, 1)
         start = end
     #print('test_label',test_label)
     #print('target_classes',target_classes)
     #print('util.map_label(test_label, target_classes',util.map_label(test_label, target_classes	))
     #print('predicted_label',predicted_label)
     acc = self.compute_per_class_acc(
         util.map_label(test_label, target_classes), predicted_label,
         target_classes.size(0))
     return acc
Beispiel #23
0
    def val(self, test_X, test_label, target_classes):
        start = 0
        ntest = test_X.size()[0]
        predicted_label = torch.LongTensor(test_label.size())
        for i in range(0, ntest, self.batch_size):
            end = min(ntest, start + self.batch_size)
            if self.cuda:
                with torch.no_grad():
                    inputX = Variable(test_X[start:end].cuda())
                # original codes
                # inputX = Variable(test_X[start:end].cuda(), volatile=True)
            else:
                with torch.no_grad():
                    inputX = Variable(test_X[start:end])
                # original codes
                inputX = Variable(test_X[start:end], volatile=True)
            output = self.model(inputX)
            _, predicted_label[start:end] = torch.max(output.data, 1)
            start = end

        acc = self.compute_per_class_acc(util.map_label(test_label, target_classes), predicted_label,
                                         target_classes.size(0))
        return acc
Beispiel #24
0
    def val(self, test_X, test_label, target_classes):
        start = 0
        ntest = test_X.size()[0]
        predicted_label = torch.LongTensor(test_label.size())
        for i in range(0, ntest, self.batch_size):
            end = min(ntest, start + self.batch_size)
            with torch.no_grad():
                if self.cuda:
                    output = self.model(
                        test_X[start:end].cuda())  ## , volatile=True??
                else:
                    output = self.model(
                        test_X[start:end])  ## , volatile=True??
            _, predicted_label[start:end] = torch.max(output.data, 1)
            start = end

        acc = self.compute_per_class_acc(
            util.map_label(test_label, target_classes), predicted_label,
            target_classes.size(0))
        #acc = self.compute_per_class_acc(test_label, predicted_label, target_classes.size(0))
        #ntest_tensor = torch.FloatTensor([ntest])
        #acc = torch.sum(util.map_label(test_label, target_classes) == predicted_label, dtype=torch.float) / ntest

        return acc
Beispiel #25
0
    def fit(self):
        for epoch in range(self.nepoch):
            for i in range(0, self.ntrain, self.batch_size):
                self.model.zero_grad()
                batch_input, batch_label = self.next_batch(self.batch_size)
                self.input.copy_(batch_input)
                self.label.copy_(batch_label)

                inputv = Variable(self.input)
                labelv = Variable(self.label)
                output = self.model(inputv)
                loss = self.criterion(output, labelv)
                loss.backward()
                self.optimizer.step()
            acc = self.val(self.train_X, self.train_Y, self.data.seenclasses)
            print('epoch:%d, acc %.4f' % (epoch, acc))
            val_acc = self.val(self.data.test_seen_feature,
                               util.map_label(self.data.test_seen_label, self.data.seenclasses), self.data.seenclasses)
            print('val_acc:%.4f' % val_acc)

        if self.pretrain_classifier == '':
            import os
            os.makedirs('./checkpoint', exist_ok=True)
            torch.save(self.model.state_dict(), './checkpoint/cl_' + self.dataset + '.pth')
Beispiel #26
0
    def __init__(self,
                 opt,
                 _train_X,
                 _train_Y,
                 data_loader,
                 _nclass,
                 _lr=0.001,
                 _beta1=0.5,
                 _nepoch=30,
                 _batch_size=100,
                 generalized=False):
        self.train_X = _train_X  # 15000x2048
        if generalized:
            self.train_Y = _train_Y
        else:
            self.train_Y = util.map_label(_train_Y,
                                          data_loader.unseenclasses)  # 15000
        self.test_seen_feature = data_loader.test_seen_feature
        self.test_seen_label = data_loader.test_seen_label
        self.test_unseen_feature = data_loader.test_unseen_feature
        self.test_unseen_label = data_loader.test_unseen_label
        self.seenclasses = data_loader.seenclasses
        self.unseenclasses = data_loader.unseenclasses
        self.batch_size = _batch_size
        self.nepoch = _nepoch
        self.nclass = _nclass  #200 or 50
        self.input_dim = data_loader.attribute.size(1)
        self.data_loader = data_loader
        self.attr_dim = self.data_loader.attribute.size(1)  #312
        self.cuda = True
        self.model = LINEAR_LOGSOFTMAX(self.input_dim,
                                       self.nclass)  # in:312, out:200
        self.model.apply(util.weights_init)

        self.criterion = nn.NLLLoss()
        self.input = torch.FloatTensor(_batch_size, self.input_dim)  #100x312
        self.label = torch.LongTensor(_batch_size)  #312
        self.opt = opt

        self.lr = _lr
        self.beta1 = _beta1
        # setup optimizer
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=_lr,
                                    betas=(_beta1, 0.999))

        if self.cuda:
            self.model.cuda()
            self.criterion.cuda()
            self.input = self.input.cuda()
            self.label = self.label.cuda()

        self.index_in_epoch = 0
        self.epochs_completed = 0
        self.ntrain = self.train_Y.size()[0]  #19557

        if generalized:
            self.acc_seen, self.seen_out, self.acc_unseen, self.unseen_out, self.H = self.fit(
            )
            # print('Final: acc_seen=%.4f, acc_unseen=%.4f, h=%.4f' % (self.acc_seen, self.acc_unseen, self.H))
            print('V2S Softmax Seen Acc:%.2f, Unseen Acc:%.2f, H Acc:%.2f' %
                  (self.acc_seen * 100, self.acc_unseen * 100, self.H * 100))
        else:
            self.acc, self.output = self.fit_zsl()
            print('V2S Softmax   : %.2f' % (self.acc * 100))
def load_data():
	train_label, attribute, train_feature = data.load_dataset()
	input_att.copy_(attribute)
	input_label.copy_(util.map_label(train_label, data.seenclasses))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1,	0.999))
optimizerC = optim.Adam(netC.parameters(), lr=opt.lr, betas=(opt.beta1,	0.999))

######  summary writer
writer = save_log(opt.log_path,mode)


# train	a classifier on	seen classes, obtain teacher

#print('train_feature', data.train_feature.shape)

if opt.need_teacher:
	if opt.teacher_type =='seen_classes':
		print('pretrain classifier for seen classes!')
		
		pretrain_cls =	classifier.CLASSIFIER(data.train_feature, util.map_label(data.train_label, data.seenclasses),   
											  data.test_seen_feature,data.test_seen_label,data.seenclasses,data,data.seenclasses.size(0), 
											  opt.resSize, opt.cuda, 0.001,	0.5, 100, 100, opt.pretrain_classifier,opt.teacher_type)
	elif opt.teacher_type =='all_classes':
		print('pretrain classifier for all classes!')		
		
		#############  train/test split  4/1
		feature_all =  torch.cat((data.train_feature,data.test_seen_feature,data.test_unseen_feature),0)
		label_all =  torch.cat((data.train_label,data.test_seen_label,data.test_unseen_label),0)
			
		data_num = label_all.shape[0]
		print('all_data_num',data_num)
		indices = list(range(data_num))
		np.random.shuffle(indices)
		train_indices, val_indices = indices[int(data_num*opt.val_split):], indices[:int(data_num*opt.val_split)]
		#print('train_indices',train_indices)
Beispiel #29
0
if opt.train == False and opt.test == False:
    print ("Program terminated as no train or test option is set true")
    sys.exit(0)
##################################################################################

### data reading
data = util.DATA_LOADER(opt)
print("#####################################")
print("# of training samples: ", data.ntrain)
print(data.seenclasses)
print(data.unseenclasses)
print(data.ntrain_class)
print(data.ntest_class)
print(data.train_mapped_label.shape)
print(data.allclasses)
print("#####################################")
##################################################################################

train_cls = CLASSIFIER(data.train_feature, util.map_label(data.train_label, data.seenclasses), data.seenclasses.shape[0], opt.resSize, opt.logdir,opt.modeldir,opt.lr, opt.beta1, opt.nepoch, opt.batch_size,'')
if opt.train:
    train_cls.train()
    
if opt.test:
    acc=train_cls.val(data.test_seen_feature,data.test_seen_label, data.seenclasses)
    print("Test Accuracy is:"+str(acc))
    acc=train_cls.val(data.train_feature,data.train_label, data.seenclasses)
    print("Train Accuracy is:"+str(acc))
    #acc=train_cls.val(data.test_unseen_feature,data.test_unseen_label, data.unseenclasses)
    #print("Test Different Labels Accuracy is:"+str(acc))
                                })
        syn_res = np.vstack((syn_res, syn_features))
        temp = np.repeat(iclass, [opt.syn_num], axis=0)
        #print (temp.shape)
        syn_label = np.concatenate((syn_label, temp))

    #print (syn_res.shape)
    #print (syn_label.shape)
    np.savetxt('syn_res.txt', syn_res, delimiter=',')
    np.savetxt('syn_label.txt', syn_label, delimiter=',')

############## evaluation ################################################
if opt.gzsl:
    train_X = np.concatenate((data.train_feature, syn_res), axis=0)
    train_Y = np.concatenate((data.train_label, syn_label), axis=0)
    nclass = opt.nclass_all
    train_cls = classifier2.CLASSIFICATION2(train_X, train_Y, data, nclass,
                                            'logs_gzsl_classifier',
                                            'models_gzsl_classifier', 0.001,
                                            0.5, 25, opt.syn_num, True)
    print('unseen=%.4f, seen=%.4f, h=%.4f' %
          (train_cls.acc_unseen, train_cls.acc_seen, train_cls.H))

else:
    train_cls = classifier2.CLASSIFICATION2(
        syn_res, util.map_label(syn_label, data.unseenclasses), data,
        data.unseenclasses.shape[0], 'logs_zsl_classifier',
        'models_zsl_classifier', 0.001, 0.5, 25, opt.syn_num, False)
    acc = train_cls.acc
    print('unseen class accuracy= ', acc)