Пример #1
0
    def val_forward(self, x):
        img, obj_label = x[0], x[2]
        batch_size = img.size(0)

        img_feat = self.image_embedder(img)

        attrs, objs = zip(*self.pairs)
        attrs = Variable(torch.LongTensor(attrs), volatile=True).cuda()
        objs = Variable(torch.LongTensor(objs), volatile=True).cuda()
        attr_embeds = self.compose(attrs, objs)

        dists = {}
        for i, (attr, obj) in enumerate(self.pairs):
            attr_embed = attr_embeds[i, None].expand(batch_size,
                                                     attr_embeds.size(1))
            dist = self.pdist_func(img_feat, attr_embed)  # (B, 1)
            dists[(attr, obj)] = dist.data
        attr_pred, obj_pred, score_tensor = utils.generate_prediction_tensors(
            dists,
            self.dset,
            obj_label.data,
            is_distance=True,
            source='manifold')

        return None, [attr_pred, obj_pred, score_tensor]
Пример #2
0
def evaluate_svms():

    attr_clfs = [
        pickle.load(open('%s/svm/attr_%d' % (args.data_dir, attr)))
        for attr in range(len(dataset.attrs))
    ]
    obj_clfs = [
        pickle.load(open('%s/svm/obj_%d' % (args.data_dir, obj)))
        for obj in range(len(dataset.objs))
    ]

    # Calibrate all classifiers first
    Y = [(train_attrs == attr).astype(np.int)
         for attr in range(len(dataset.attrs))]
    for attr in tqdm.tqdm(range(len(dataset.attrs))):
        clf = attr_clfs[attr]
        calibrated = CalibratedClassifierCV(clf, method='sigmoid', cv='prefit')
        calibrated.fit(X_train, Y[attr])
        attr_clfs[attr] = calibrated

    Y = [(train_objs == obj).astype(np.int)
         for obj in range(len(dataset.objs))]
    for obj in tqdm.tqdm(range(len(dataset.objs))):
        clf = obj_clfs[obj]
        calibrated = CalibratedClassifierCV(clf, method='sigmoid', cv='prefit')
        calibrated.fit(X_train, Y[obj])
        obj_clfs[obj] = calibrated

    # Generate all the scores
    attr_scores, obj_scores = [], []
    for attr in tqdm.tqdm(range(len(dataset.attrs))):
        clf = attr_clfs[attr]
        score = clf.predict_proba(X_test)[:, 1]
        attr_scores.append(score)
    attr_scores = np.vstack(attr_scores)

    for obj in tqdm.tqdm(range(len(dataset.objs))):
        clf = obj_clfs[obj]
        score = clf.predict_proba(X_test)[:, 1]
        obj_scores.append(score)
    obj_scores = np.vstack(obj_scores)

    attr_pred = torch.from_numpy(attr_scores).transpose(0, 1)
    obj_pred = torch.from_numpy(obj_scores).transpose(0, 1)

    x = [
        None,
        Variable(torch.from_numpy(test_attrs)).long(),
        Variable(torch.from_numpy(test_objs)).long(),
        Variable(torch.from_numpy(test_pairs)).long()
    ]
    attr_pred, obj_pred, _ = utils.generate_prediction_tensors(
        [attr_pred, obj_pred], dataset, x[2].data, source='classification')
    attr_match, obj_match, zsl_match, gzsl_match, fixobj_match = utils.performance_stats(
        attr_pred, obj_pred, x)
    print attr_match.mean(), obj_match.mean(), zsl_match.mean(
    ), gzsl_match.mean(), fixobj_match.mean()
Пример #3
0
    def val_forward(self, x):
        img = x[0]
        attr_pred = F.softmax(self.attr_clf(img), dim=1).data
        obj_pred = F.softmax(self.obj_clf(img), dim=1).data

        attr_pred, obj_pred, score_tensor = utils.generate_prediction_tensors(
            [attr_pred, obj_pred],
            self.dset,
            x[2].data,
            source='classification')
        return None, [attr_pred, obj_pred, score_tensor]
Пример #4
0
    def val_forward(self, x):
        img, obj_label = x[0], x[2]
        batch_size = img.size(0)

        attrs, objs = zip(*self.pairs)
        attrs = Variable(torch.LongTensor(attrs), volatile=True).cuda()
        objs = Variable(torch.LongTensor(objs), volatile=True).cuda()
        composed_clfs = self.compose(attrs, objs)

        scores = {}
        for i, (attr, obj) in enumerate(self.pairs):
            composed_clf = composed_clfs[i,
                                         None].expand(batch_size,
                                                      composed_clfs.size(1))
            score = F.sigmoid((img * composed_clf).sum(1)).unsqueeze(1)
            scores[(attr, obj)] = score.data
        attr_pred, obj_pred, score_tensor = utils.generate_prediction_tensors(
            scores,
            self.dset,
            obj_label.data,
            is_distance=False,
            source='manifold')

        return None, [attr_pred, obj_pred, score_tensor]
Пример #5
0
def evaluate_tensorcompletion():
    def parse_tensor(fl):
        tensor = scipy.io.loadmat(fl)
        nz_idx = zip(*(tensor['subs']))
        composite_clfs = np.zeros(
            (len(dataset.attrs), len(dataset.objs), X.shape[1]))
        composite_clfs[nz_idx[0], nz_idx[1],
                       nz_idx[2]] = tensor['vals'].squeeze()
        return composite_clfs, nz_idx, tensor['vals'].squeeze()

    # see recon error
    tr_file = 'tensor-completion/incomplete/%s.mat' % args.dataset
    ts_file = args.completed

    tr_clfs, tr_nz_idx, tr_vals = parse_tensor(tr_file)
    ts_clfs, ts_nz_idx, ts_vals = parse_tensor(ts_file)

    print tr_vals.min(), tr_vals.max(), tr_vals.mean()
    print ts_vals.min(), ts_vals.max(), ts_vals.mean()

    print 'Completed Tensor: %s' % args.completed

    # see train recon error
    err = 1.0 * ((tr_clfs[tr_nz_idx[0], tr_nz_idx[1], tr_nz_idx[2]] -
                  ts_clfs[tr_nz_idx[0], tr_nz_idx[1], tr_nz_idx[2]])**
                 2).sum() / (len(tr_vals))
    print 'recon error:', err

    # Create and scale classifiers for each pair
    clfs = {}
    test_pair_set = set(map(tuple, dataset.test_pairs.numpy().tolist()))
    for idx, (attr, obj) in tqdm.tqdm(enumerate(dataset.pairs),
                                      total=len(dataset.pairs)):
        clf = LinearSVC(fit_intercept=False)
        clf.fit(np.eye(2), [0, 1])

        if (attr, obj) in test_pair_set:
            X_ = X_test
            Y_ = (test_attrs == attr).astype(np.int) * (test_objs
                                                        == obj).astype(np.int)
            clf.coef_ = ts_clfs[attr, obj][None, :]
        else:
            X_ = X_train
            Y_ = (train_attrs == attr).astype(np.int) * (train_objs
                                                         == obj).astype(np.int)
            clf.coef_ = tr_clfs[attr, obj][None, :]

        calibrated = CalibratedClassifierCV(clf, method='sigmoid', cv='prefit')
        calibrated.fit(X_, Y_)
        clfs[(attr, obj)] = calibrated

    scores = {}
    for attr, obj in tqdm.tqdm(dataset.pairs):
        score = clfs[(attr, obj)].predict_proba(X_test)[:, 1]
        scores[(attr, obj)] = torch.from_numpy(score).float().unsqueeze(1)

    x = [
        None,
        Variable(torch.from_numpy(test_attrs)).long(),
        Variable(torch.from_numpy(test_objs)).long(),
        Variable(torch.from_numpy(test_pairs)).long()
    ]
    attr_pred, obj_pred, _ = utils.generate_prediction_tensors(
        scores, dataset, x[2].data, source='manifold')
    attr_match, obj_match, zsl_match, gzsl_match, fixobj_match = utils.performance_stats(
        attr_pred, obj_pred, x)
    print attr_match.mean(), obj_match.mean(), zsl_match.mean(
    ), gzsl_match.mean(), fixobj_match.mean()