コード例 #1
0
def prune_conv(conv, dataset, num_patches, num_features):
    if not isinstance(conv[-1], pipeline.Pooler):
        raise TypeError, "The last layer should be a pooler."
    if not isinstance(conv[-2], pipeline.FeatureEncoder):
        raise TypeError, "The second last layer should be an encoder."
    logging.debug('Randomly sampling pooled features...')
    features = conv.sample(dataset, num_patches, True)
    if features.shape[1] != conv[-2].dictionary.shape[0]:
        raise ValueError, "Huh, I can't figure out the encoding method.\n"\
                "Feature shape: %d, dictionary size: %d" % \
                (features.shape[1], conv[-2].dictionary.shape[0])
    logging.debug('Perform feature selection...')
    covmat = mathutil.mpi_cov(features)
    if mpi.is_root():
        selected_idx = max_variance_feature_selection(covmat, num_features)
    else:
        selected_idx = None
    selected_idx = mpi.COMM.bcast(selected_idx)
    conv[-2].dictionary = conv[-2].dictionary[selected_idx]
    return covmat
コード例 #2
0
def prune_conv(conv, dataset, num_patches, num_features):
    if not isinstance(conv[-1], pipeline.Pooler):
        raise TypeError, "The last layer should be a pooler."
    if not isinstance(conv[-2], pipeline.FeatureEncoder):
        raise TypeError, "The second last layer should be an encoder."
    logging.debug('Randomly sampling pooled features...')
    features = conv.sample(dataset, num_patches, True)
    if features.shape[1] != conv[-2].dictionary.shape[0]:
        raise ValueError, "Huh, I can't figure out the encoding method.\n"\
                "Feature shape: %d, dictionary size: %d" % \
                (features.shape[1], conv[-2].dictionary.shape[0])
    logging.debug('Perform feature selection...')
    covmat = mathutil.mpi_cov(features)
    if mpi.is_root():
        selected_idx = max_variance_feature_selection(covmat, num_features)
    else:
        selected_idx = None
    selected_idx = mpi.COMM.bcast(selected_idx)
    conv[-2].dictionary = conv[-2].dictionary[selected_idx]
    return covmat
コード例 #3
0
def stl_demo():
    """Performs a demo classification on stl
    """
    logging.info('Loading stl data...')
    stl = visiondata.STL10Dataset(FLAGS.root, 'unlabeled', target_size=32)
    stl_train = visiondata.STL10Dataset(FLAGS.root, 'train', target_size=32)
    stl_test = visiondata.STL10Dataset(FLAGS.root, 'test', target_size=32)

    conv = pipeline.ConvLayer([
            pipeline.PatchExtractor([6, 6], 1), # extracts patches
            pipeline.MeanvarNormalizer({'reg': 10}), # normalizes the patches
            pipeline.LinearEncoder({},
                    trainer = pipeline.ZcaTrainer({'reg': 0.1})),
            pipeline.ThresholdEncoder({'alpha': 0.25, 'twoside': False},
                    trainer = pipeline.NormalizedKmeansTrainer(
                         {'k': FLAGS.fromdim, 'max_iter':100})),
            pipeline.SpatialPooler({'grid': (FLAGS.grid, FLAGS.grid), 'method': FLAGS.method}) # average pool
            ])
    logging.info('Training the pipeline...')
    conv.train(stl, 400000, exhaustive = True)
    
    logging.info('Extracting features...')
    X = conv.process_dataset(stl, as_2d = False)
    Xtrain = conv.process_dataset(stl_train, as_2d = False)
    Ytrain = stl_train.labels().astype(np.int)
    Xtest = conv.process_dataset(stl_test, as_2d = False)
    Ytest = stl_test.labels().astype(np.int)
    
    # before we do feature computation, try to do dimensionality reduction
    X.resize(np.prod(X.shape[:-1]), X.shape[-1])
    Xtrain.resize(np.prod(Xtrain.shape[:-1]), Xtrain.shape[-1])
    Xtest.resize(np.prod(Xtest.shape[:-1]), Xtest.shape[-1])
    
    m, std = classifier.feature_meanstd(X, 0.01)
    X -= m
    X /= std
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std
    
    covmat = mathutil.mpi_cov(X)
    
    current_dim = FLAGS.fromdim
    if FLAGS.svd == 1:
        eigval, eigvec = np.linalg.eigh(covmat)
    while current_dim >= 100:
        if current_dim < FLAGS.fromdim:
            if FLAGS.svd == 1:
                # directly do dimensionality reduction
                U = eigvec[:, -current_dim:]
                Xtrain_red = np.dot(Xtrain, U)
                Xtest_red = np.dot(Xtest, U)
            else:
                # do subsampling
                temp = code_ap.code_af(X, current_dim, tol=current_dim * 0.01)
                logging.info("selected %d dims" % len(temp[0]))
                sel = temp[0]
                sel = mpi.COMM.bcast(sel)
                Cpred = covmat[sel]
                Csel = Cpred[:,sel]
                W = np.linalg.solve(Csel, Cpred)
                # perform svd
                U, D, _ = np.linalg.svd(W, full_matrices = 0)
                U *= D
                Xtrain_red = np.dot(Xtrain[:, sel], U)
                Xtest_red = np.dot(Xtest[:, sel], U)
            Xtrain_red.resize(Ytrain.shape[0], Xtrain_red.size / Ytrain.shape[0])
            Xtest_red.resize(Ytest.shape[0], Xtest_red.size / Ytest.shape[0])
        else:
            Xtrain_red = Xtrain.copy()
            Xtest_red = Xtest.copy()
            Xtrain_red.resize(Ytrain.shape[0], Xtrain_red.size / Ytrain.shape[0])
            Xtest_red.resize(Ytest.shape[0], Xtest_red.size / Ytest.shape[0])
            
        w, b = classifier.l2svm_onevsall(Xtrain_red, Ytrain, 0.005,
                                         fminargs={'disp': 0, 'maxfun': 1000})
        accu_train = classifier.Evaluator.accuracy(Ytrain, np.dot(Xtrain_red, w) + b)
        accu_test = classifier.Evaluator.accuracy(Ytest, np.dot(Xtest_red, w) + b)
        logging.info('%d - %d, Training accuracy: %f' % (FLAGS.fromdim, current_dim, accu_train))
        logging.info('%d - %d, Testing accuracy: %f' % (FLAGS.fromdim, current_dim, accu_test))
        current_dim /= 2
コード例 #4
0
        ])
logging.info('Training the pipeline...')
conv.train(cifar, 400000, exhaustive = True)
"""
conv = pickle.load(open('cvpr_exemplar_centroids_conv.pickle'))
_, ap_result = pickle.load(open('cvpr_exemplar_centroids.pickle'))

logging.info('Extracting features...')
Xtrain = conv.process_dataset(cifar, as_2d = False)
# we simply use all the features to compute the covmat
Xtrain.resize(np.prod(Xtrain.shape[:-1]), Xtrain.shape[-1])

m, std = classifier.feature_meanstd(Xtrain, 0.01)
Xtrain -= m
Xtrain /= std
covmat = mathutil.mpi_cov(Xtrain)

# do subsampling
"""
ap_result = code_ap.code_af(Xtrain, todim)
"""
sel = ap_result[0]
sel = mpi.COMM.bcast(sel)
Cpred = covmat[sel]
Csel = Cpred[:,sel]
Crecon = np.dot(Cpred.T, np.dot(np.linalg.pinv(Csel), Cpred))
Crecon = (Crecon + Crecon.T) / 2
eigval = np.linalg.eigvals(covmat)
eigval_recon = np.linalg.eigvals(Crecon)

# random
コード例 #5
0
def cifar_demo():
    """Performs a demo classification on cifar
    """

    mpi.mkdir(FLAGS.output_dir)
    logging.info('Loading cifar data...')
    cifar = visiondata.CifarDataset(FLAGS.root, is_training=True)
    cifar_test = visiondata.CifarDataset(FLAGS.root, is_training=False)

    if FLAGS.trainer == "pink":
        trainer = pinker.SpatialPinkTrainer({
            'size': (FLAGS.patch, FLAGS.patch),
            'reg': 0.1
        })
    else:
        trainer = pipeline.ZcaTrainer({'reg': 0.1})

    conv = pipeline.ConvLayer([
        pipeline.PatchExtractor([FLAGS.patch, FLAGS.patch],
                                1),  # extracts patches
        pipeline.MeanvarNormalizer({'reg': 10}),  # normalizes the patches
        pipeline.LinearEncoder({}, trainer=trainer),
        pipeline.ThresholdEncoder({
            'alpha': 0.0,
            'twoside': False
        },
                                  trainer=pipeline.OMPTrainer({
                                      'k': FLAGS.fromdim,
                                      'max_iter': 100
                                  })),
        pipeline.SpatialPooler({
            'grid': (FLAGS.grid, FLAGS.grid),
            'method': FLAGS.method
        })  # average pool
    ])
    logging.info('Training the pipeline...')
    conv.train(cifar, 400000, exhaustive=True)

    logging.info('Extracting features...')
    Xtrain = conv.process_dataset(cifar, as_2d=False)
    Ytrain = cifar.labels().astype(np.int)
    Xtest = conv.process_dataset(cifar_test, as_2d=False)
    Ytest = cifar_test.labels().astype(np.int)

    # before we do feature computation, try to do dimensionality reduction
    Xtrain.resize(np.prod(Xtrain.shape[:-1]), Xtrain.shape[-1])
    Xtest.resize(np.prod(Xtest.shape[:-1]), Xtest.shape[-1])

    m, std = classifier.feature_meanstd(Xtrain, 0.01)
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std

    covmat = mathutil.mpi_cov(Xtrain)
    if False:
        # directly do dimensionality reduction
        eigval, eigvec = np.linalg.eigh(covmat)
        U = eigvec[:, -FLAGS.todim:]
        Xtrain = np.dot(Xtrain, U)
        Xtest = np.dot(Xtest, U)
    else:
        # do subsampling
        import code_ap
        temp = code_ap.code_af(Xtrain, FLAGS.todim)
        sel = temp[0]
        sel = mpi.COMM.bcast(sel)
        Cpred = covmat[sel]
        Csel = Cpred[:, sel]
        W = np.linalg.solve(Csel, Cpred)
        # perform svd
        U, D, _ = np.linalg.svd(W, full_matrices=0)
        U *= D
        Xtrain = np.dot(Xtrain[:, sel], U)
        Xtest = np.dot(Xtest[:, sel], U)
    Xtrain.resize(Ytrain.shape[0], Xtrain.size / Ytrain.shape[0])
    Xtest.resize(Ytest.shape[0], Xtest.size / Ytest.shape[0])
    """
    # This part is used to do post-pooling over all features nystrom subsampling
    # normalization
    Xtrain.resize(Xtrain.shape[0], np.prod(Xtrain.shape[1:]))
    Xtest.resize(Xtest.shape[0], np.prod(Xtest.shape[1:]))
    m, std = classifier.feature_meanstd(Xtrain, reg = 0.01)
    # to match Adam Coates' pipeline
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std
    
    covmat = mathutil.mpi_cov(Xtrain)
    eigval, eigvec = np.linalg.eigh(covmat)
    U = eigvec[:, -(200*FLAGS.grid*FLAGS.grid):]
    #U = eigvec[:,-400:] * np.sqrt(eigval[-400:])
    Xtrain = np.dot(Xtrain, U)
    Xtest = np.dot(Xtest, U)
    """

    w, b = classifier.l2svm_onevsall(Xtrain,
                                     Ytrain,
                                     0.002,
                                     fminargs={
                                         'disp': 0,
                                         'maxfun': 1000
                                     })
    accu_train = classifier.Evaluator.accuracy(Ytrain, np.dot(Xtrain, w) + b)
    accu_test = classifier.Evaluator.accuracy(Ytest, np.dot(Xtest, w) + b)
    logging.info('Training accuracy: %f' % accu_train)
    logging.info('Testing accuracy: %f' % accu_test)
コード例 #6
0
    
    logging.info('Training the pipeline...')
    conv.train(cifar, 400000, exhaustive=True)
    mpi.root_pickle(conv, 'cifar_conv.pickle')

# do pruning
try:
    selected_idx = pickle.load(open('cifar_selected_idx.pickle'))
    logging.info('Skipping first layer pruning')
except Exception, e:
    features = conv.sample(cifar, 200000, True)
    mpi.dump_matrix_multi(features, '/u/vis/ttmp/jiayq/cifar/cifar_feature_pooled_sample')
    m, std = mathutil.mpi_meanstd(features)
    features -= m
    features /= std
    covmat = mathutil.mpi_cov(features, reg = 0.01)
    if mpi.is_root():
        selected_idx = pcfs.max_variance_feature_selection(covmat, 800)
    else:
        selected_idx = None
    selected_idx = mpi.COMM.bcast(selected_idx)
    mpi.root_pickle((m, std, covmat), 'cifar_squared_correlation.pickle')
    mpi.root_pickle(selected_idx, 'cifar_selected_idx.pickle')
    
dictionary_all = conv[-2].dictionary

for i in [25,50,100,200,400,800,1600]:
    logging.info('Training with dictionary size %d' % i)
    #conv[-2].dictionary = np.ascontiguousarray(dictionary_all[selected_idx[:i]])
    conv[-2].dictionary = np.ascontiguousarray(dictionary_all[:i])
コード例 #7
0
    logging.info('Training the pipeline...')
    conv.train(cifar, 400000, exhaustive=True)
    mpi.root_pickle(conv, 'cifar_conv.pickle')

# do pruning
try:
    selected_idx = pickle.load(open('cifar_selected_idx.pickle'))
    logging.info('Skipping first layer pruning')
except Exception, e:
    features = conv.sample(cifar, 200000, True)
    mpi.dump_matrix_multi(
        features, '/u/vis/ttmp/jiayq/cifar/cifar_feature_pooled_sample')
    m, std = mathutil.mpi_meanstd(features)
    features -= m
    features /= std
    covmat = mathutil.mpi_cov(features, reg=0.01)
    if mpi.is_root():
        selected_idx = pcfs.max_variance_feature_selection(covmat, 800)
    else:
        selected_idx = None
    selected_idx = mpi.COMM.bcast(selected_idx)
    mpi.root_pickle((m, std, covmat), 'cifar_squared_correlation.pickle')
    mpi.root_pickle(selected_idx, 'cifar_selected_idx.pickle')

dictionary_all = conv[-2].dictionary

for i in [25, 50, 100, 200, 400, 800, 1600]:
    logging.info('Training with dictionary size %d' % i)
    #conv[-2].dictionary = np.ascontiguousarray(dictionary_all[selected_idx[:i]])
    conv[-2].dictionary = np.ascontiguousarray(dictionary_all[:i])
コード例 #8
0
def apcluster_k(feature, num_centers, corr = True, tol = 0):
    """perform the affinity propagation algorithm for the input codes.
    """
    logging.debug("ap: preparing similarity matrix")
    covmat = mathutil.mpi_cov(feature)
    std = np.diag(covmat)
    # normalize
    std = np.sqrt(std**2 + 0.01)
    if corr:
        # compute correlation. If corr is False, we will use the covariance
        # directly
        covmat /= std
        covmat /= std[:, np.newaxis]
    # compute the similarity matrix
    norm = np.diag(covmat) / 2
    covmat -= norm
    covmat -= norm[:, np.newaxis]
    # add a small noise to covmat
    noise = (covmat + np.finfo(np.float64).eps) * \
            np.random.rand(covmat.shape[0], covmat.shape[1])
    mpi.COMM.Bcast(noise)
    covmat += noise
    # The remaining part can just be carried out on root
    if mpi.is_root():
        # set preference
        pmax = covmat.max()
        #af = AffinityPropagation().fit(covmat, pmax)
        #num_max = len(af.cluster_centers_indices_)
        # in fact, num_max would always be covmat.shape[0] so we don't really
        # run ap
        num_max = covmat.shape[0]
        logging.debug("ap: pmax = %s, num = %d" % (pmax, num_max))
        pmin = covmat.min()
        af = AffinityPropagation().fit(covmat, pmin)
        # num_min is the theoretical min, but the python code seem to raise bugs...
        num_min = len(af.cluster_centers_indices_)
        logging.debug("ap: pmin = %s, num = %d" % (pmin, num_min))
        
        if num_centers < num_min:
            logging.warning("num_centers too small, will return %d centers" % (num_min,))
            return af.cluster_centers_indices_, af.labels_, covmat
    
        if num_centers > num_max:
            logging.warning("num_centers too large, will return everything.")
            return np.arange(covmat.shape[0], dtype=np.int), \
                   np.arange(covmat.shape[0], dtype=np.int)
        
        logging.debug("ap: start affinity propagation")
        
        # We will simply use bisection search to find the right number of centroids.
        for i in range(_AP_MAX_ITERATION):
            pref = (pmax + pmin) / 2
            af = AffinityPropagation().fit(covmat, pref)
            num = len(af.cluster_centers_indices_)
            logging.debug("ap try %d: pref = %s, num = %s" % (i + 1, pref, num))
            if num >= num_centers - tol and num <= num_centers + tol:
                break
            elif num < num_centers:
                pmin = pref
                num_min = num
            else:
                pmax = pref
                num_max = num
    else:
        af = None
    mpi.barrier()
    af = mpi.COMM.bcast(af)
    return af.cluster_centers_indices_, af.labels_, covmat
def cifar_demo():
    """Performs a demo classification on cifar
    """

    mpi.mkdir(FLAGS.output_dir)
    logging.info('Loading cifar data...')
    cifar = visiondata.CifarDataset(FLAGS.root, is_training=True)
    cifar_test = visiondata.CifarDataset(FLAGS.root, is_training=False)

    conv = pipeline.ConvLayer([
            pipeline.PatchExtractor([6, 6], 1), # extracts patches
            pipeline.MeanvarNormalizer({'reg': 10}), # normalizes the patches
            pipeline.LinearEncoder({},
                    trainer = pipeline.ZcaTrainer({'reg': 0.1})),
            pipeline.ThresholdEncoder({'alpha': 0.25, 'twoside': False},
                    trainer = pipeline.NormalizedKmeansTrainer(
                         {'k': FLAGS.fromdim, 'max_iter':100})),
            pipeline.SpatialPooler({'grid': (FLAGS.grid, FLAGS.grid), 'method': FLAGS.method}) # average pool
            ])
    logging.info('Training the pipeline...')
    conv.train(cifar, 400000, exhaustive = True)
    
    logging.info('Extracting features...')
    Xtrain = conv.process_dataset(cifar, as_2d = False)
    Ytrain = cifar.labels().astype(np.int)
    Xtest = conv.process_dataset(cifar_test, as_2d = False)
    Ytest = cifar_test.labels().astype(np.int)
    
    # before we do feature computation, try to do dimensionality reduction
    Xtrain.resize(np.prod(Xtrain.shape[:-1]), Xtrain.shape[-1])
    Xtest.resize(np.prod(Xtest.shape[:-1]), Xtest.shape[-1])
    
    m, std = classifier.feature_meanstd(Xtrain, 0.01)
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std
    
    covmat = mathutil.mpi_cov(Xtrain)
    
    current_dim = FLAGS.fromdim
    if FLAGS.svd == 1:
        eigval, eigvec = np.linalg.eigh(covmat)
    while current_dim >= 100:
        if current_dim < FLAGS.fromdim:
            if FLAGS.svd == 1:
                # directly do dimensionality reduction
                U = eigvec[:, -current_dim:]
                Xtrain_red = np.dot(Xtrain, U)
                Xtest_red = np.dot(Xtest, U)
            else:
                # do subsampling
                temp = code_ap.code_af(Xtrain, current_dim)
                logging.info("selected %d dims" % len(temp[0]))
                sel = temp[0]
                Xtrain_red = np.ascontiguousarray(Xtrain[:, sel])
                Xtest_red = np.ascontiguousarray(Xtest[:, sel])
            Xtrain_red.resize(Ytrain.shape[0], Xtrain_red.size / Ytrain.shape[0])
            Xtest_red.resize(Ytest.shape[0], Xtest_red.size / Ytest.shape[0])
        else:
            Xtrain_red = Xtrain.copy()
            Xtest_red = Xtest.copy()
            Xtrain_red.resize(Ytrain.shape[0], Xtrain_red.size / Ytrain.shape[0])
            Xtest_red.resize(Ytest.shape[0], Xtest_red.size / Ytest.shape[0])
            
        w, b = classifier.l2svm_onevsall(Xtrain_red, Ytrain, 0.005,
                                         fminargs={'disp': 0, 'maxfun': 1000})
        accu_train = classifier.Evaluator.accuracy(Ytrain, np.dot(Xtrain_red, w) + b)
        accu_test = classifier.Evaluator.accuracy(Ytest, np.dot(Xtest_red, w) + b)
        logging.info('%d - %d, Training accuracy: %f' % (FLAGS.fromdim, current_dim, accu_train))
        logging.info('%d - %d, Testing accuracy: %f' % (FLAGS.fromdim, current_dim, accu_test))
        current_dim /= 2
コード例 #10
0
def stl_demo():
    """Performs a demo classification on stl
    """
    logging.info('Loading stl data...')
    stl = visiondata.STL10Dataset(FLAGS.root, 'unlabeled', target_size=32)
    stl_train = visiondata.STL10Dataset(FLAGS.root, 'train', target_size=32)
    stl_test = visiondata.STL10Dataset(FLAGS.root, 'test', target_size=32)

    conv = pipeline.ConvLayer([
        pipeline.PatchExtractor([6, 6], 1),  # extracts patches
        pipeline.MeanvarNormalizer({'reg': 10}),  # normalizes the patches
        pipeline.LinearEncoder({}, trainer=pipeline.ZcaTrainer({'reg': 0.1})),
        pipeline.ThresholdEncoder({
            'alpha': 0.25,
            'twoside': False
        },
                                  trainer=pipeline.NormalizedKmeansTrainer({
                                      'k':
                                      FLAGS.fromdim,
                                      'max_iter':
                                      100
                                  })),
        pipeline.SpatialPooler({
            'grid': (FLAGS.grid, FLAGS.grid),
            'method': FLAGS.method
        })  # average pool
    ])
    logging.info('Training the pipeline...')
    conv.train(stl, 400000, exhaustive=True)

    logging.info('Extracting features...')
    X = conv.process_dataset(stl, as_2d=False)
    Xtrain = conv.process_dataset(stl_train, as_2d=False)
    Ytrain = stl_train.labels().astype(np.int)
    Xtest = conv.process_dataset(stl_test, as_2d=False)
    Ytest = stl_test.labels().astype(np.int)

    # before we do feature computation, try to do dimensionality reduction
    X.resize(np.prod(X.shape[:-1]), X.shape[-1])
    Xtrain.resize(np.prod(Xtrain.shape[:-1]), Xtrain.shape[-1])
    Xtest.resize(np.prod(Xtest.shape[:-1]), Xtest.shape[-1])

    m, std = classifier.feature_meanstd(X, 0.01)
    X -= m
    X /= std
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std

    covmat = mathutil.mpi_cov(X)

    current_dim = FLAGS.fromdim
    if FLAGS.svd == 1:
        eigval, eigvec = np.linalg.eigh(covmat)
    while current_dim >= 100:
        if current_dim < FLAGS.fromdim:
            if FLAGS.svd == 1:
                # directly do dimensionality reduction
                U = eigvec[:, -current_dim:]
                Xtrain_red = np.dot(Xtrain, U)
                Xtest_red = np.dot(Xtest, U)
            else:
                # do subsampling
                temp = code_ap.code_af(X, current_dim, tol=current_dim * 0.01)
                logging.info("selected %d dims" % len(temp[0]))
                sel = temp[0]
                sel = mpi.COMM.bcast(sel)
                Cpred = covmat[sel]
                Csel = Cpred[:, sel]
                W = np.linalg.solve(Csel, Cpred)
                # perform svd
                U, D, _ = np.linalg.svd(W, full_matrices=0)
                U *= D
                Xtrain_red = np.dot(Xtrain[:, sel], U)
                Xtest_red = np.dot(Xtest[:, sel], U)
            Xtrain_red.resize(Ytrain.shape[0],
                              Xtrain_red.size / Ytrain.shape[0])
            Xtest_red.resize(Ytest.shape[0], Xtest_red.size / Ytest.shape[0])
        else:
            Xtrain_red = Xtrain.copy()
            Xtest_red = Xtest.copy()
            Xtrain_red.resize(Ytrain.shape[0],
                              Xtrain_red.size / Ytrain.shape[0])
            Xtest_red.resize(Ytest.shape[0], Xtest_red.size / Ytest.shape[0])

        w, b = classifier.l2svm_onevsall(Xtrain_red,
                                         Ytrain,
                                         0.005,
                                         fminargs={
                                             'disp': 0,
                                             'maxfun': 1000
                                         })
        accu_train = classifier.Evaluator.accuracy(Ytrain,
                                                   np.dot(Xtrain_red, w) + b)
        accu_test = classifier.Evaluator.accuracy(Ytest,
                                                  np.dot(Xtest_red, w) + b)
        logging.info('%d - %d, Training accuracy: %f' %
                     (FLAGS.fromdim, current_dim, accu_train))
        logging.info('%d - %d, Testing accuracy: %f' %
                     (FLAGS.fromdim, current_dim, accu_test))
        current_dim /= 2
コード例 #11
0
def apcluster_k(feature, num_centers, corr=True, tol=0):
    """perform the affinity propagation algorithm for the input codes.
    """
    logging.debug("ap: preparing similarity matrix")
    covmat = mathutil.mpi_cov(feature)
    std = np.diag(covmat)
    # normalize
    std = np.sqrt(std**2 + 0.01)
    if corr:
        # compute correlation. If corr is False, we will use the covariance
        # directly
        covmat /= std
        covmat /= std[:, np.newaxis]
    # compute the similarity matrix
    norm = np.diag(covmat) / 2
    covmat -= norm
    covmat -= norm[:, np.newaxis]
    # add a small noise to covmat
    noise = (covmat + np.finfo(np.float64).eps) * \
            np.random.rand(covmat.shape[0], covmat.shape[1])
    mpi.COMM.Bcast(noise)
    covmat += noise
    # The remaining part can just be carried out on root
    if mpi.is_root():
        # set preference
        pmax = covmat.max()
        #af = AffinityPropagation().fit(covmat, pmax)
        #num_max = len(af.cluster_centers_indices_)
        # in fact, num_max would always be covmat.shape[0] so we don't really
        # run ap
        num_max = covmat.shape[0]
        logging.debug("ap: pmax = %s, num = %d" % (pmax, num_max))
        pmin = covmat.min()
        af = AffinityPropagation().fit(covmat, pmin)
        # num_min is the theoretical min, but the python code seem to raise bugs...
        num_min = len(af.cluster_centers_indices_)
        logging.debug("ap: pmin = %s, num = %d" % (pmin, num_min))

        if num_centers < num_min:
            logging.warning("num_centers too small, will return %d centers" %
                            (num_min, ))
            return af.cluster_centers_indices_, af.labels_, covmat

        if num_centers > num_max:
            logging.warning("num_centers too large, will return everything.")
            return np.arange(covmat.shape[0], dtype=np.int), \
                   np.arange(covmat.shape[0], dtype=np.int)

        logging.debug("ap: start affinity propagation")

        # We will simply use bisection search to find the right number of centroids.
        for i in range(_AP_MAX_ITERATION):
            pref = (pmax + pmin) / 2
            af = AffinityPropagation().fit(covmat, pref)
            num = len(af.cluster_centers_indices_)
            logging.debug("ap try %d: pref = %s, num = %s" %
                          (i + 1, pref, num))
            if num >= num_centers - tol and num <= num_centers + tol:
                break
            elif num < num_centers:
                pmin = pref
                num_min = num
            else:
                pmax = pref
                num_max = num
    else:
        af = None
    mpi.barrier()
    af = mpi.COMM.bcast(af)
    return af.cluster_centers_indices_, af.labels_, covmat
コード例 #12
0
        fid = open(model_file_first, 'w')
        pickle.dump(conv, fid)
        fid.close()
    mpi.barrier()

################################################################################
# Obtains statistics from the first layer
################################################################################
if os.path.exists(order_file):
    logging.info('skipping the feature selection layer...')
    order = np.load(order_file)
else:
    # now, since we cannot possibly store the stl intermediate features, we do
    # computation online and discard them on the fly
    feat = conv.sample(stl, 400000)
    covmat = mathutil.mpi_cov(feat)
    if mpi.is_root():
        # do greedy feature scoring
        order = pcfs.principal_component_feature_selection(
            covmat, feat.shape[1])
        np.save(order_file, order)
        np.save(covmat_file, covmat)
        residual = [np.diag(pcfs.conditional_covariance(covmat, order[:i])).sum() \
                    for i in range(1, feat.shape[1], 10)]
        try:
            from matplotlib import pyplot
            pyplot.plot(range(1, feat.shape[1], 10), residual)
            pyplot.show()
        except Exception, e:
            pass
    else:
コード例 #13
0
def cifar_demo():
    """Performs a demo classification on cifar
    """

    mpi.mkdir(FLAGS.output_dir)
    logging.info('Loading cifar data...')
    cifar = visiondata.CifarDataset(FLAGS.root, is_training=True)
    cifar_test = visiondata.CifarDataset(FLAGS.root, is_training=False)
    
    if FLAGS.trainer == "pink":
        trainer = pinker.SpatialPinkTrainer({'size': (FLAGS.patch, FLAGS.patch), 'reg': 0.1})
    else:
        trainer = pipeline.ZcaTrainer({'reg': 0.1})

    conv = pipeline.ConvLayer([
            pipeline.PatchExtractor([FLAGS.patch, FLAGS.patch], 1), # extracts patches
            pipeline.MeanvarNormalizer({'reg': 10}), # normalizes the patches
            pipeline.LinearEncoder({},
                    trainer = trainer),
            pipeline.ThresholdEncoder({'alpha': 0.0, 'twoside': False},
                    trainer = pipeline.OMPTrainer(
                         {'k': FLAGS.fromdim, 'max_iter':100})),
            pipeline.SpatialPooler({'grid': (FLAGS.grid, FLAGS.grid), 'method': FLAGS.method}) # average pool
            ])
    logging.info('Training the pipeline...')
    conv.train(cifar, 400000, exhaustive = True)
    
    logging.info('Extracting features...')
    Xtrain = conv.process_dataset(cifar, as_2d = False)
    Ytrain = cifar.labels().astype(np.int)
    Xtest = conv.process_dataset(cifar_test, as_2d = False)
    Ytest = cifar_test.labels().astype(np.int)
    
    # before we do feature computation, try to do dimensionality reduction
    Xtrain.resize(np.prod(Xtrain.shape[:-1]), Xtrain.shape[-1])
    Xtest.resize(np.prod(Xtest.shape[:-1]), Xtest.shape[-1])
    
    m, std = classifier.feature_meanstd(Xtrain, 0.01)
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std
    
    covmat = mathutil.mpi_cov(Xtrain)
    if False:
        # directly do dimensionality reduction
        eigval, eigvec = np.linalg.eigh(covmat)
        U = eigvec[:, -FLAGS.todim:]
        Xtrain = np.dot(Xtrain, U)
        Xtest = np.dot(Xtest, U)
    else:
        # do subsampling
        import code_ap
        temp = code_ap.code_af(Xtrain, FLAGS.todim)
        sel = temp[0]
        sel = mpi.COMM.bcast(sel)
        Cpred = covmat[sel]
        Csel = Cpred[:,sel]
        W = np.linalg.solve(Csel, Cpred)
        # perform svd
        U, D, _ = np.linalg.svd(W, full_matrices = 0)
        U *= D
        Xtrain = np.dot(Xtrain[:, sel], U)
        Xtest = np.dot(Xtest[:, sel], U)
    Xtrain.resize(Ytrain.shape[0], Xtrain.size / Ytrain.shape[0])
    Xtest.resize(Ytest.shape[0], Xtest.size / Ytest.shape[0])
    
    
    """
    # This part is used to do post-pooling over all features nystrom subsampling
    # normalization
    Xtrain.resize(Xtrain.shape[0], np.prod(Xtrain.shape[1:]))
    Xtest.resize(Xtest.shape[0], np.prod(Xtest.shape[1:]))
    m, std = classifier.feature_meanstd(Xtrain, reg = 0.01)
    # to match Adam Coates' pipeline
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std
    
    covmat = mathutil.mpi_cov(Xtrain)
    eigval, eigvec = np.linalg.eigh(covmat)
    U = eigvec[:, -(200*FLAGS.grid*FLAGS.grid):]
    #U = eigvec[:,-400:] * np.sqrt(eigval[-400:])
    Xtrain = np.dot(Xtrain, U)
    Xtest = np.dot(Xtest, U)
    """
    
    w, b = classifier.l2svm_onevsall(Xtrain, Ytrain, 0.002,
                                     fminargs={'disp': 0, 'maxfun': 1000})
    accu_train = classifier.Evaluator.accuracy(Ytrain, np.dot(Xtrain, w) + b)
    accu_test = classifier.Evaluator.accuracy(Ytest, np.dot(Xtest, w) + b)
    logging.info('Training accuracy: %f' % accu_train)
    logging.info('Testing accuracy: %f' % accu_test)
def cifar_demo():
    """Performs a demo classification on cifar
    """

    mpi.mkdir(FLAGS.output_dir)
    logging.info('Loading cifar data...')
    cifar = visiondata.CifarDataset(FLAGS.root, is_training=True)
    cifar_test = visiondata.CifarDataset(FLAGS.root, is_training=False)

    conv = pipeline.ConvLayer([
        pipeline.PatchExtractor([6, 6], 1),  # extracts patches
        pipeline.MeanvarNormalizer({'reg': 10}),  # normalizes the patches
        pipeline.LinearEncoder({}, trainer=pipeline.ZcaTrainer({'reg': 0.1})),
        pipeline.ThresholdEncoder({
            'alpha': 0.25,
            'twoside': False
        },
                                  trainer=pipeline.NormalizedKmeansTrainer({
                                      'k':
                                      FLAGS.fromdim,
                                      'max_iter':
                                      100
                                  })),
        pipeline.SpatialPooler({
            'grid': (FLAGS.grid, FLAGS.grid),
            'method': FLAGS.method
        })  # average pool
    ])
    logging.info('Training the pipeline...')
    conv.train(cifar, 400000, exhaustive=True)

    logging.info('Extracting features...')
    Xtrain = conv.process_dataset(cifar, as_2d=False)
    Ytrain = cifar.labels().astype(np.int)
    Xtest = conv.process_dataset(cifar_test, as_2d=False)
    Ytest = cifar_test.labels().astype(np.int)

    # before we do feature computation, try to do dimensionality reduction
    Xtrain.resize(np.prod(Xtrain.shape[:-1]), Xtrain.shape[-1])
    Xtest.resize(np.prod(Xtest.shape[:-1]), Xtest.shape[-1])

    m, std = classifier.feature_meanstd(Xtrain, 0.01)
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std

    covmat = mathutil.mpi_cov(Xtrain)

    current_dim = FLAGS.fromdim
    if FLAGS.svd == 1:
        eigval, eigvec = np.linalg.eigh(covmat)
    while current_dim >= 100:
        if current_dim < FLAGS.fromdim:
            if FLAGS.svd == 1:
                # directly do dimensionality reduction
                U = eigvec[:, -current_dim:]
                Xtrain_red = np.dot(Xtrain, U)
                Xtest_red = np.dot(Xtest, U)
            else:
                # do subsampling
                temp = code_ap.code_af(Xtrain, current_dim)
                logging.info("selected %d dims" % len(temp[0]))
                sel = temp[0]
                Xtrain_red = np.ascontiguousarray(Xtrain[:, sel])
                Xtest_red = np.ascontiguousarray(Xtest[:, sel])
            Xtrain_red.resize(Ytrain.shape[0],
                              Xtrain_red.size / Ytrain.shape[0])
            Xtest_red.resize(Ytest.shape[0], Xtest_red.size / Ytest.shape[0])
        else:
            Xtrain_red = Xtrain.copy()
            Xtest_red = Xtest.copy()
            Xtrain_red.resize(Ytrain.shape[0],
                              Xtrain_red.size / Ytrain.shape[0])
            Xtest_red.resize(Ytest.shape[0], Xtest_red.size / Ytest.shape[0])

        w, b = classifier.l2svm_onevsall(Xtrain_red,
                                         Ytrain,
                                         0.005,
                                         fminargs={
                                             'disp': 0,
                                             'maxfun': 1000
                                         })
        accu_train = classifier.Evaluator.accuracy(Ytrain,
                                                   np.dot(Xtrain_red, w) + b)
        accu_test = classifier.Evaluator.accuracy(Ytest,
                                                  np.dot(Xtest_red, w) + b)
        logging.info('%d - %d, Training accuracy: %f' %
                     (FLAGS.fromdim, current_dim, accu_train))
        logging.info('%d - %d, Testing accuracy: %f' %
                     (FLAGS.fromdim, current_dim, accu_test))
        current_dim /= 2
コード例 #15
0
        fid = open(model_file_first,'w')
        pickle.dump(conv, fid)
        fid.close()
    mpi.barrier()

################################################################################
# Obtains statistics from the first layer
################################################################################
if os.path.exists(order_file):
    logging.info('skipping the feature selection layer...')
    order = np.load(order_file)
else:
    # now, since we cannot possibly store the stl intermediate features, we do 
    # computation online and discard them on the fly
    feat = conv.sample(stl, 400000)
    covmat = mathutil.mpi_cov(feat)
    if mpi.is_root():
        # do greedy feature scoring
        order = pcfs.principal_component_feature_selection(covmat, feat.shape[1])
        np.save(order_file, order)
        np.save(covmat_file, covmat)
        residual = [np.diag(pcfs.conditional_covariance(covmat, order[:i])).sum() \
                    for i in range(1, feat.shape[1], 10)]
        try:
            from matplotlib import pyplot
            pyplot.plot(range(1, feat.shape[1], 10), residual)
            pyplot.show()
        except Exception, e:
            pass
    else:
        order = None