示例#1
0
def evaluate_feats(db,
                   N,
                   feat_pools=feat_pools,
                   d_type='d1',
                   depths=[None, 300, 200, 100, 50, 30, 10, 5, 3, 1]):
    result = open(
        os.path.join(result_dir,
                     'feature_fusion-{}-{}feats.csv'.format(d_type, N)), 'w')

    for i in range(N):
        result.write("feat{},".format(i))
    result.write("depth,distance,MMAP")
    combinations = itertools.combinations(feat_pools, N)

    for combination in combinations:
        fusion = FeatureFusion(features=list(combination))

        for d in depths:
            APs = evaluate_class(db, f_instance=fusion, d_type=d_type, depth=d)
            cls_MAPs = []

            for cls, cls_APs in APs.items():
                MAP = np.mean(cls_APs)
                cls_MAPs.append(MAP)
            r = "{},{},{},{}".format(",".join(combination), d, d_type,
                                     np.mean(cls_MAPs))
            print(r)
            result.write('\n' + r)
        print()
    result.close()
示例#2
0
        return a


    np.random.seed(0)
    IMG = sigmoid(np.random.randn(2, 2, 3)) * 255
    IMG = IMG.astype(int)
    hist = color.histogram(IMG, type='global', n_bin=4)
    assert np.equal(np.where(hist > 0)[0], np.array([37, 43, 58, 61])).all(), "global histogram implement failed"
    hist = color.histogram(IMG, type='region', n_bin=4, n_slice=2)
    assert np.equal(np.where(hist > 0)[0], np.array([58, 125, 165, 235])).all(), "region histogram implement failed"

    # examinate distance
    np.random.seed(1)
    IMG = sigmoid(np.random.randn(4, 4, 3)) * 255
    IMG = IMG.astype(int)
    hist = color.histogram(IMG, type='region', n_bin=4, n_slice=2)
    IMG2 = sigmoid(np.random.randn(4, 4, 3)) * 255
    IMG2 = IMG2.astype(int)
    hist2 = color.histogram(IMG2, type='region', n_bin=4, n_slice=2)
    assert distance(hist, hist2, d_type='d1') == 2, "d1 implement failed"
    assert distance(hist, hist2, d_type='d2-norm') == 2, "d2 implement failed"

    # evaluate database
    APs = evaluate_class(db, f_class=Color, d_type=d_type, depth=depth)
    cls_MAPs = []
    for cls, cls_APs in APs.items():
        MAP = np.mean(cls_APs)
        print("Class {}, MAP {}".format(cls, MAP))
        cls_MAPs.append(MAP)
    print("MMAP", np.mean(cls_MAPs))
示例#3
0
                        inputs = torch.autograd.Variable(
                            torch.from_numpy(img).float())
                    d_hist = vgg_model(inputs)[pick_layer]
                    d_hist = np.sum(d_hist.data.cpu().numpy(), axis=0)
                    d_hist /= np.sum(d_hist)  # normalize
                    samples.append({
                        'img': d_img,
                        'cls': d_cls,
                        'hist': d_hist
                    })
                except BaseException:
                    pass
            cPickle.dump(
                samples, open(os.path.join(cache_dir, sample_cache), "wb",
                              True))

        return samples


if __name__ == "__main__":
    # evaluate database
    DB = Database()
    APs = evaluate_class(DB, f_class=VGGNetFeat, d_type=d_type, depth=depth)
    cls_MAPs = []

    for cls, cls_APs in APs.items():
        MAP = np.mean(cls_APs)
        print("Class {}, MAP {}".format(cls, MAP))
        cls_MAPs.append(MAP)
    print("MMAP", np.mean(cls_MAPs))
示例#4
0
        except:
            if verbose:
                print(
                    "Counting histogram..., config=%s, distance=%s, depth=%s" %
                    (sample_cache, d_type, depth))

            samples = []
            data = db.get_data()
            for d in data.itertuples():
                d_img, d_cls = getattr(d, "img"), getattr(d, "cls")
                d_hist = self.histogram(d_img, type=h_type, n_slice=n_slice)
                samples.append({'img': d_img, 'cls': d_cls, 'hist': d_hist})
            cPickle.dump(
                samples, open(os.path.join(cache_dir, sample_cache), "wb",
                              True))

        return samples


if __name__ == "__main__":
    db = Database()

    # evaluate database
    APs = evaluate_class(db, f_class=Daisy, d_type=d_type, depth=depth)
    cls_MAPs = []
    for cls, cls_APs in APs.items():
        MAP = np.mean(cls_APs)
        print("Class {}, MAP {}".format(cls, MAP))
        cls_MAPs.append(MAP)
    print("MMAP", np.mean(cls_MAPs))
示例#5
0
                    else:
                        inputs = torch.autograd.Variable(
                            torch.from_numpy(img).float())
                    d_hist = res_model(inputs)[pick_layer]
                    d_hist = d_hist.data.cpu().numpy().flatten()
                    d_hist /= np.sum(d_hist)  # normalize
                    samples.append({
                        'img': d_img,
                        'cls': d_cls,
                        'hist': d_hist
                    })
                except:
                    pass
            cPickle.dump(
                samples, open(os.path.join(cache_dir, sample_cache), "wb",
                              True))

        return samples


if __name__ == "__main__":
    # evaluate database
    db = Database()
    APs = evaluate_class(db, f_class=ResNetFeat, d_type=d_type, depth=depth)
    cls_MAPs = []
    for cls, cls_APs in APs.items():
        MAP = np.mean(cls_APs)
        print("Class {}, MAP {}".format(cls, MAP))
        cls_MAPs.append(MAP)
    print("MMAP", np.mean(cls_MAPs))
示例#6
0
                    "Counting histogram..., config=%s, distance=%s, depth=%s" %
                    (sample_cache, d_type, depth))

            samples = []
            data = db.get_data()
            for d in data.itertuples():
                d_img, d_cls = getattr(d, "img"), getattr(d, "cls")
                d_hist = self.histogram(d_img, type=h_type, n_slice=n_slice)
                samples.append({'img': d_img, 'cls': d_cls, 'hist': d_hist})
            cPickle.dump(
                samples, open(os.path.join(cache_dir, sample_cache), "wb",
                              True))

        return samples


if __name__ == "__main__":
    db = Database()

    # check shape
    assert edge_kernels.shape == (5, 2, 2)

    # evaluate database
    APs = evaluate_class(db, f_class=Edge, d_type=d_type, depth=depth)
    cls_MAPs = []
    for cls, cls_APs in APs.items():
        MAP = np.mean(cls_APs)
        print("Class {}, MAP {}".format(cls, MAP))
        cls_MAPs.append(MAP)
    print("MMAP", np.mean(cls_MAPs))
示例#7
0
    db = Database()

    # evaluate features double-wise
    evaluate_feats(db, N=2, d_type='d1')

    # evaluate features triple-wise
    evaluate_feats(db, N=3, d_type='d1')

    # evaluate features quadra-wise
    evaluate_feats(db, N=4, d_type='d1')

    # evaluate features penta-wise
    evaluate_feats(db, N=5, d_type='d1')

    # evaluate features hexa-wise
    evaluate_feats(db, N=6, d_type='d1')

    # evaluate features hepta-wise
    evaluate_feats(db, N=7, d_type='d1')

    # evaluate database
    fusion = FeatureFusion(features=['color', 'daisy'])
    APs = evaluate_class(db, f_instance=fusion, d_type=d_type, depth=depth)
    cls_MAPs = []

    for cls, cls_APs in APs.items():
        MAP = np.mean(cls_APs)
        print("Class {}, MAP {}".format(cls, MAP))
        cls_MAPs.append(MAP)
    print("MMAP", np.mean(cls_MAPs))
示例#8
0
                    asy_result.append(
                        pool.apply_async(self.lbp_map,
                                         args=(item.img, radius, n_points)))
                for result, item in tqdm(zip(asy_result, data.itertuples()),
                                         desc="get results"):
                    samples.append({
                        'img': item.img,
                        'cls': item.cls,
                        'hist': result.get()
                    })

            # save and return
            pickle.dump(samples,
                        open(os.path.join(cache_dir, sample_cache), "wb",
                             True))  # 保存在cache_dir/sample_cache下

        return samples


if __name__ == "__main__":
    db = Database()

    # evaluate database
    APs, match = evaluate_class(db, f_class=LBP, d_type=d_type, depth=depth)
    cls_MAPs = []
    for cls, cls_APs in APs.items():
        MAP = np.mean(cls_APs)
        print("Class {}, MAP {}".format(cls, MAP))
        cls_MAPs.append(MAP)
    print("MMAP", np.mean(cls_MAPs))