示例#1
0
def compute_caltech_features():
    caltech = datasets.TwoLayerDataset(FLAGS.root, ['jpg'], max_size=300)
    conv = pipeline.ConvLayer([
        dsift.DsiftExtractor(FLAGS.sift_size, FLAGS.sift_stride),
        pipeline.LLCEncoder({'k': FLAGS.llc_k},
                            trainer=pipeline.KmeansTrainer(
                                {'k': FLAGS.dict_size})),
        pipeline.PyramidPooler({
            'level': 3,
            'method': 'max'
        })
    ])
    conv.train(caltech, 400000)
    feat = conv.process_dataset(caltech, as_2d=True)

    mpi.mkdir(FLAGS.feature_dir)
    if mpi.is_root():
        with (open(os.path.join(FLAGS.feature_dir, FLAGS.model_file),
                   'w')) as fid:
            pickle.dump(conv, fid)

    mpi.dump_matrix_multi(feat,
                          os.path.join(FLAGS.feature_dir, FLAGS.feature_file))
    mpi.dump_matrix_multi(caltech.labels(),
                          os.path.join(FLAGS.feature_dir, FLAGS.label_file))
示例#2
0
def cifar_demo():
    """Performs a demo classification on cifar
    """
    mpi.mkdir(FLAGS.output_dir)
    logging.info('Loading cifar data...')
    cifar = visiondata.CifarDataset(FLAGS.root, is_training=True)
    cifar_test = visiondata.CifarDataset(FLAGS.root, is_training=False)
    conv = pipeline.ConvLayer([
            pipeline.PatchExtractor([6,6], 1), # extracts patches
            pipeline.MeanvarNormalizer({'reg': 10}), # normalizes the patches
            pipeline.LinearEncoder({},
                    trainer = pipeline.ZcaTrainer({'reg': 0.1})), # Does whitening
        pipeline.ThresholdEncoder({'alpha': 0.25, 'twoside': True},
                    trainer = pipeline.OMPTrainer(
                            {'k': 800, 'max_iter':100})), # does encoding
            pipeline.SpatialPooler({'grid': (2,2), 'method': 'ave'}) # average pool
            ])
    logging.info('Training the pipeline...')
    conv.train(cifar, 50000)
    logging.info('Dumping the pipeline...')
    if mpi.is_root():
        with open(os.path.join(FLAGS.output_dir, FLAGS.model_file),'w') as fid:
            pickle.dump(conv, fid)
            fid.close()
    with open(os.path.join(FLAGS.output_dir, FLAGS.model_file),'r') as fid:
        conv = pickle.load(fid)
    logging.info('Extracting features...')
    Xtrain = conv.process_dataset(cifar, as_2d = True)
    mpi.dump_matrix_multi(Xtrain,
                          os.path.join(FLAGS.output_dir, 
                                       FLAGS.feature_file+'_train'))
    Ytrain = cifar.labels().astype(np.int)
    Xtest = conv.process_dataset(cifar_test, as_2d = True)
    mpi.dump_matrix_multi(Xtest,
                          os.path.join(FLAGS.output_dir, 
                                       FLAGS.feature_file+'_test'))
    Ytest = cifar_test.labels().astype(np.int)

    # normalization
    m, std = classifier.feature_meanstd(Xtrain)
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std
    
    w, b = classifier.l2svm_onevsall(Xtrain, Ytrain, 0.01)
    if mpi.is_root():
        with open(os.path.join(FLAGS.output_dir, FLAGS.svm_file), 'w') as fid:
            pickle.dump({'m': m, 'std': std, 'w': w, 'b': b}, fid)
    accu = np.sum(Ytrain == (np.dot(Xtrain,w)+b).argmax(axis=1)) \
            / float(len(Ytrain))
    accu_test = np.sum(Ytest == (np.dot(Xtest,w)+b).argmax(axis=1)) \
            / float(len(Ytest))
    
    logging.info('Training accuracy: %f' % accu)
    logging.info('Testing accuracy: %f' % accu_test)
def cifar_demo():
    """Performs a demo classification on cifar
    """
    mpi.mkdir(FLAGS.output_dir)
    logging.info("Loading cifar data...")
    cifar = visiondata.CifarDataset(FLAGS.root, is_training=True)
    cifar_test = visiondata.CifarDataset(FLAGS.root, is_training=False)

    # try: use sub images
    # cifar = datasets.SubImageSet(cifar, [28,28], 1)
    # cifar_test = datasets.CenterRegionSet(cifar_test, [28,28])

    conv = pipeline.ConvLayer(
        [
            pipeline.PatchExtractor([6, 6], 1),  # extracts patches
            pipeline.MeanvarNormalizer({"reg": 10}),  # normalizes the patches
            pipeline.LinearEncoder({}, trainer=pipeline.ZcaTrainer({"reg": 0.1})),  # Does whitening
            pipeline.ThresholdEncoder(
                {"alpha": 0.25, "twoside": True}, trainer=pipeline.OMPTrainer({"k": 1600, "max_iter": 100})
            ),  # does encoding
            pipeline.SpatialPooler({"grid": (4, 4), "method": "max"}),  # average pool
        ]
    )
    logging.info("Training the pipeline...")
    conv.train(cifar, 400000)
    logging.info("Dumping the pipeline...")
    if mpi.is_root():
        with open(os.path.join(FLAGS.output_dir, FLAGS.model_file), "w") as fid:
            pickle.dump(conv, fid)
            fid.close()
    logging.info("Extracting features...")
    Xtrain = conv.process_dataset(cifar, as_2d=True)
    mpi.dump_matrix_multi(Xtrain, os.path.join(FLAGS.output_dir, FLAGS.feature_file + "_train"))
    Ytrain = cifar.labels().astype(np.int)
    Xtest = conv.process_dataset(cifar_test, as_2d=True)
    mpi.dump_matrix_multi(Xtest, os.path.join(FLAGS.output_dir, FLAGS.feature_file + "_test"))
    Ytest = cifar_test.labels().astype(np.int)
    # normalization
    m, std = classifier.feature_meanstd(Xtrain)
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std

    w, b = classifier.l2svm_onevsall(Xtrain, Ytrain, 0.005)
    if mpi.is_root():
        with open(os.path.join(FLAGS.output_dir, FLAGS.svm_file), "w") as fid:
            pickle.dump({"m": m, "std": std, "w": w, "b": b}, fid)
    accu = np.sum(Ytrain == (np.dot(Xtrain, w) + b).argmax(axis=1)) / float(len(Ytrain))
    accu_test = np.sum(Ytest == (np.dot(Xtest, w) + b).argmax(axis=1)) / float(len(Ytest))

    logging.info("Training accuracy: %f" % accu)
    logging.info("Testing accuracy: %f" % accu_test)
def compute_caltech_features():
    caltech = datasets.TwoLayerDataset(FLAGS.root, ["jpg"], max_size=300)
    conv = pipeline.ConvLayer(
        [
            dsift.DsiftExtractor(FLAGS.sift_size, FLAGS.sift_stride),
            pipeline.LLCEncoder({"k": FLAGS.llc_k}, trainer=pipeline.KmeansTrainer({"k": FLAGS.dict_size})),
            pipeline.PyramidPooler({"level": 3, "method": "max"}),
        ]
    )
    conv.train(caltech, 400000)
    feat = conv.process_dataset(caltech, as_2d=True)

    mpi.mkdir(FLAGS.feature_dir)
    if mpi.is_root():
        with (open(os.path.join(FLAGS.feature_dir, FLAGS.model_file), "w")) as fid:
            pickle.dump(conv, fid)

    mpi.dump_matrix_multi(feat, os.path.join(FLAGS.feature_dir, FLAGS.feature_file))
    mpi.dump_matrix_multi(caltech.labels(), os.path.join(FLAGS.feature_dir, FLAGS.label_file))
                        trainer = pipeline.ZcaTrainer({'reg': 0.1})),
                #pipeline.SpatialMeanNormalizer({'channels': 3}),
                pipeline.ThresholdEncoder({'alpha': 0.25, 'twoside': False},
                        trainer = pipeline.OMPTrainer(
                                {'k': 3200, 'max_iter':100})),
                pipeline.KernelPooler(\
                        {'kernel': pipeline.KernelPooler.kernel_uniform(15),
                         'method': 'max',
                         'stride': 1})
                ],
                fixed_size = True)
        conv.train(regions_data, 400000)
        mpi.root_pickle(conv, "conv.pickle")
    # so let's get the regions' features after pooling.
    regions_pooled = conv.process_dataset(regions_data)
    mpi.dump_matrix_multi(regions_pooled,
                          '/tscratch/tmp/jiayq/pooled_lda/regions_pooled')

logging.info("Feature shape:" + str(regions_pooled.shape[1:]))
std = mathutil.mpi_std(regions_pooled.reshape(regions_pooled.shape[0], \
        np.prod(regions_pooled.shape[1:])))
# compute the std mean
std.resize(np.prod(regions_pooled.shape[1:-1]), regions_pooled.shape[-1])
std = std.mean(axis=0)
std_order = np.argsort(std)

# now, compute the within-class std
regions_pooled_view = regions_pooled.reshape(regions_pooled.shape[0],
        np.prod(regions_pooled.shape[1:-1]), regions_pooled.shape[-1])
within_std_local = regions_pooled_view.var(axis=1)
print within_std_local.shape
within_std = np.sqrt(mathutil.mpi_mean(within_std_local))
    test_data = visiondata.CUBDataset(ROOT, False, crop = CROP,
            target_size = TARGET_SIZE, prefetch = True)
    if MIRRORED:
        train_data = datasets.MirrorSet(train_data)
    CONV.train(train_data, 400000, exhaustive = True)
    mpi.root_pickle(CONV, __file__ + ".conv.pickle")
    Xtrain = CONV.process_dataset(train_data, as_2d = True)
    Xtest = CONV.process_dataset(test_data, as_2d = True)
    Ytrain = train_data.labels()
    Ytest = test_data.labels()
    m, std = classifier.feature_meanstd(Xtrain)
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std
    mpi.dump_matrix_multi(Xtrain, os.path.join(FEATDIR,'Xtrain'))
    mpi.dump_matrix_multi(Xtest, os.path.join(FEATDIR,'Xtest'))
    mpi.dump_matrix_multi(Ytrain, os.path.join(FEATDIR,'Ytrain'))
    mpi.dump_matrix_multi(Ytest, os.path.join(FEATDIR,'Ytest'))
else:
    Xtrain = mpi.load_matrix_multi(os.path.join(FEATDIR,'Xtrain'))
    Xtest = mpi.load_matrix_multi(os.path.join(FEATDIR,'Xtest'))
    Ytrain = mpi.load_matrix_multi(os.path.join(FEATDIR,'Ytrain'))
    Ytest = mpi.load_matrix_multi(os.path.join(FEATDIR,'Ytest'))

if FLAGS.flat:
    logging.info("Performing flat classification")
    solver = classifier.SolverMC(FLAGS.reg,
                                 classifier.Loss.loss_multiclass_logistic,
                                 classifier.Reg.reg_l2,
                                 fminargs = {'maxfun': 1000})
                trainer = pipeline.NormalizedKmeansTrainer(
                        {'k': 1600, 'max_iter':100})), # does encoding
        pipeline.SpatialPooler({'grid': (2,2), 'method': 'ave'})
    ])
    
    logging.info('Training the pipeline...')
    conv.train(cifar, 400000, exhaustive=True)
    mpi.root_pickle(conv, 'cifar_conv.pickle')

# do pruning
try:
    selected_idx = pickle.load(open('cifar_selected_idx.pickle'))
    logging.info('Skipping first layer pruning')
except Exception, e:
    features = conv.sample(cifar, 200000, True)
    mpi.dump_matrix_multi(features, '/u/vis/ttmp/jiayq/cifar/cifar_feature_pooled_sample')
    m, std = mathutil.mpi_meanstd(features)
    features -= m
    features /= std
    covmat = mathutil.mpi_cov(features, reg = 0.01)
    if mpi.is_root():
        selected_idx = pcfs.max_variance_feature_selection(covmat, 800)
    else:
        selected_idx = None
    selected_idx = mpi.COMM.bcast(selected_idx)
    mpi.root_pickle((m, std, covmat), 'cifar_squared_correlation.pickle')
    mpi.root_pickle(selected_idx, 'cifar_selected_idx.pickle')
    
dictionary_all = conv[-2].dictionary

for i in [25,50,100,200,400,800,1600]:
    train_data = visiondata.CUBDataset(ROOT, True, crop=CROP, target_size=TARGET_SIZE, prefetch=True)
    test_data = visiondata.CUBDataset(ROOT, False, crop=CROP, target_size=TARGET_SIZE, prefetch=True)
    if MIRRORED:
        train_data = datasets.MirrorSet(train_data)
    CONV.train(train_data, 400000, exhaustive=True)
    mpi.root_pickle(CONV, __file__ + ".conv.pickle")
    Xtrain = CONV.process_dataset(train_data, as_2d=True)
    Xtest = CONV.process_dataset(test_data, as_2d=True)
    Ytrain = train_data.labels()
    Ytest = test_data.labels()
    m, std = classifier.feature_meanstd(Xtrain)
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std
    mpi.dump_matrix_multi(Xtrain, os.path.join(FEATDIR, "Xtrain"))
    mpi.dump_matrix_multi(Xtest, os.path.join(FEATDIR, "Xtest"))
    mpi.dump_matrix_multi(Ytrain, os.path.join(FEATDIR, "Ytrain"))
    mpi.dump_matrix_multi(Ytest, os.path.join(FEATDIR, "Ytest"))
else:
    Xtrain = mpi.load_matrix_multi(os.path.join(FEATDIR, "Xtrain"))
    Xtest = mpi.load_matrix_multi(os.path.join(FEATDIR, "Xtest"))
    Ytrain = mpi.load_matrix_multi(os.path.join(FEATDIR, "Ytrain"))
    Ytest = mpi.load_matrix_multi(os.path.join(FEATDIR, "Ytest"))

if FLAGS.flat:
    logging.info("Performing flat classification")
    solver = classifier.SolverMC(
        FLAGS.reg, classifier.Loss.loss_multiclass_logistic, classifier.Reg.reg_l2, fminargs={"maxfun": 1000}
    )
    w, b = solver.solve(Xtrain, classifier.to_one_of_k_coding(Ytrain, fill=0))
示例#9
0
            'grid': (2, 2),
            'method': 'ave'
        })
    ])

    logging.info('Training the pipeline...')
    conv.train(cifar, 400000, exhaustive=True)
    mpi.root_pickle(conv, 'cifar_conv.pickle')

# do pruning
try:
    selected_idx = pickle.load(open('cifar_selected_idx.pickle'))
    logging.info('Skipping first layer pruning')
except Exception, e:
    features = conv.sample(cifar, 200000, True)
    mpi.dump_matrix_multi(
        features, '/u/vis/ttmp/jiayq/cifar/cifar_feature_pooled_sample')
    m, std = mathutil.mpi_meanstd(features)
    features -= m
    features /= std
    covmat = mathutil.mpi_cov(features, reg=0.01)
    if mpi.is_root():
        selected_idx = pcfs.max_variance_feature_selection(covmat, 800)
    else:
        selected_idx = None
    selected_idx = mpi.COMM.bcast(selected_idx)
    mpi.root_pickle((m, std, covmat), 'cifar_squared_correlation.pickle')
    mpi.root_pickle(selected_idx, 'cifar_selected_idx.pickle')

dictionary_all = conv[-2].dictionary

for i in [25, 50, 100, 200, 400, 800, 1600]:
示例#10
0
def cifar_demo():
    """Performs a demo classification on cifar
    """
    mpi.mkdir(FLAGS.output_dir)
    logging.info('Loading cifar data...')
    cifar = visiondata.CifarDataset(FLAGS.root, is_training=True)
    cifar_test = visiondata.CifarDataset(FLAGS.root, is_training=False)
    conv = pipeline.ConvLayer([
        pipeline.PatchExtractor([6, 6], 1),  # extracts patches
        pipeline.MeanvarNormalizer({'reg': 10}),  # normalizes the patches
        pipeline.LinearEncoder({}, trainer=pipeline.ZcaTrainer(
            {'reg': 0.1})),  # Does whitening
        pipeline.ThresholdEncoder({
            'alpha': 0.25,
            'twoside': True
        },
                                  trainer=pipeline.OMPTrainer({
                                      'k': 800,
                                      'max_iter': 100
                                  })),  # does encoding
        pipeline.SpatialPooler({
            'grid': (2, 2),
            'method': 'ave'
        })  # average pool
    ])
    logging.info('Training the pipeline...')
    conv.train(cifar, 50000)
    logging.info('Dumping the pipeline...')
    if mpi.is_root():
        with open(os.path.join(FLAGS.output_dir, FLAGS.model_file),
                  'w') as fid:
            pickle.dump(conv, fid)
            fid.close()
    with open(os.path.join(FLAGS.output_dir, FLAGS.model_file), 'r') as fid:
        conv = pickle.load(fid)
    logging.info('Extracting features...')
    Xtrain = conv.process_dataset(cifar, as_2d=True)
    mpi.dump_matrix_multi(
        Xtrain, os.path.join(FLAGS.output_dir, FLAGS.feature_file + '_train'))
    Ytrain = cifar.labels().astype(np.int)
    Xtest = conv.process_dataset(cifar_test, as_2d=True)
    mpi.dump_matrix_multi(
        Xtest, os.path.join(FLAGS.output_dir, FLAGS.feature_file + '_test'))
    Ytest = cifar_test.labels().astype(np.int)

    # normalization
    m, std = classifier.feature_meanstd(Xtrain)
    Xtrain -= m
    Xtrain /= std
    Xtest -= m
    Xtest /= std

    w, b = classifier.l2svm_onevsall(Xtrain, Ytrain, 0.01)
    if mpi.is_root():
        with open(os.path.join(FLAGS.output_dir, FLAGS.svm_file), 'w') as fid:
            pickle.dump({'m': m, 'std': std, 'w': w, 'b': b}, fid)
    accu = np.sum(Ytrain == (np.dot(Xtrain,w)+b).argmax(axis=1)) \
            / float(len(Ytrain))
    accu_test = np.sum(Ytest == (np.dot(Xtest,w)+b).argmax(axis=1)) \
            / float(len(Ytest))

    logging.info('Training accuracy: %f' % accu)
    logging.info('Testing accuracy: %f' % accu_test)
示例#11
0
                        trainer = pipeline.ZcaTrainer({'reg': 0.1})),
                #pipeline.SpatialMeanNormalizer({'channels': 3}),
                pipeline.ThresholdEncoder({'alpha': 0.25, 'twoside': False},
                        trainer = pipeline.OMPTrainer(
                                {'k': 3200, 'max_iter':100})),
                pipeline.KernelPooler(\
                        {'kernel': pipeline.KernelPooler.kernel_uniform(15),
                         'method': 'max',
                         'stride': 1})
                ],
                fixed_size = True)
        conv.train(regions_data, 400000)
        mpi.root_pickle(conv, "conv.pickle")
    # so let's get the regions' features after pooling.
    regions_pooled = conv.process_dataset(regions_data)
    mpi.dump_matrix_multi(regions_pooled,
                          '/tscratch/tmp/jiayq/pooled_lda/regions_pooled')

logging.info("Feature shape:" + str(regions_pooled.shape[1:]))
std = mathutil.mpi_std(regions_pooled.reshape(regions_pooled.shape[0], \
        np.prod(regions_pooled.shape[1:])))
# compute the std mean
std.resize(np.prod(regions_pooled.shape[1:-1]), regions_pooled.shape[-1])
std = std.mean(axis=0)
std_order = np.argsort(std)

# now, compute the within-class std
regions_pooled_view = regions_pooled.reshape(
    regions_pooled.shape[0], np.prod(regions_pooled.shape[1:-1]),
    regions_pooled.shape[-1])
within_std_local = regions_pooled_view.var(axis=1)
print within_std_local.shape