Esempio n. 1
0
def ScSPM_caltech101_l1():
    # path to the Caltech101 dataset
    # data_path =
    imd = img_dataset(data_path, maxdim=300, online=True, color=param_set.get("color"))
    imgs = imd()
    y = imd.y

    # define the architecture
    n_atoms = 1024
    patch_shape = (16, 16)
    step_size = 6
    n_descriptors = int(1e6)
    # the l1 penalty parameter
    _lambda = 0.15
    normalizer = l2_normalizer
    metadata = {'name': "ScSPM_caltech101",
                'desc': "ScSPM using sparse coding on top of SIFT features",
                'n_atoms': n_atoms,
                'lambda': _lambda,
                'patch_shape': patch_shape,
                'step_size': step_size,
                'n_descriptors': n_descriptors,
                'pooling': 'max pooling',
                'normalization': 'l2'}

    wm = workspace_manager(metadata=metadata)
    wm.set_metadata(metadata)
    wm.save("labels.npy", y)
    sp = spatial_pyramid("ScSPM", workspace=wm, mmap=False)
    n_jobs = 8
    # the sift extractor of the
    # initial phase of dictionary learning
    feature_extractor = sift_extractor(n_descriptors=n_descriptors, patch_shape=patch_shape, mmap=False, n_jobs=n_jobs)
    se = sparse_encoder(algorithm='lasso', params={'lambda': n_nonzero_coefs}, n_jobs=n_jobs)
    odc = online_dictionary_coder(n_atoms=n_atoms, sparse_coder=se,
                                  batch_size=int(1e3), verbose=True, mmap=False, n_jobs=n_jobs)

    # learn the dictionary
    sp.dict_learn(imgs, feature_extractor=feature_extractor, dict_learner=odc)
    se.n_jobs = 1
    # extract ScSPM features
    sc_spm = sc_spm_extractor(feature_extractor=dsift_extractor(step_size=step_size, patch_size=patch_shape[0]),
                              levels=(1, 2, 4), sparse_coder=se, pooling_operator=sc_max_pooling(),
                              normalizer=normalizer)

    sp.extract(imgs, pyramid_feat_extractor=sc_spm, n_jobs=n_jobs)
Esempio n. 2
0
def build_hmp_net(param_set,workspace=None):

    # path to the Caltech101 dataset
    data_path = "Caltech101"
    imd = img_dataset(data_path, maxdim=300, online=True, color=param_set.get("color"))
    imgs = imd()
    y = imd.y
    workspace.save("labels.npy", y)
    param_set["dataset_path"] = data_path

    hmp_net = hmp_network(dict_learners=param_set.get("dict_learners"), n_atoms=param_set.get("n_atoms"),
                          filter_sizes=param_set.get("filter_sizes"), n_nonzero_coefs=param_set.get("n_nonzero_coefs"),
                          step_sizes=param_set.get("step_sizes"), pooling_sizes=param_set.get("pooling_sizes"),
                          spp_pooler = param_set.get('spp_pooler'), pooling_steps=param_set.get("pooling_steps"),
                          spm_levels = param_set.get("spm_levels"), rebuild_spp = param_set.get('rebuild_spp'),
                          sparse_coders=param_set.get("sparse_coders"), feature_encoders=param_set.get("feature_encoders"),
                          workspace=workspace,pretrained_dicts = param_set.get('pretrained_dicts'),
                          n_training_patches=param_set.get("n_training_patches"), imgs=imgs,mmap=False,n_jobs=param_set.get("n_jobs"))

    hmp_net.build_hierarchy()
    print "finished!"