def get_representations_for_joint_layer(yaml_file_path, save_path, batch_size):
    """
    The purpose of doing this is to test the compatibility of DBM with StackBlocks and TransformerDatasets
    Of course one can instead take use the "get_represenations.py" for data preparation for the next step.
    """
    
    hyper_params = {'save_path':save_path}
    yaml = open("{0}/stacked_image_unimodaliy.yaml".format(yaml_file_path), 'r').read()
    yaml = yaml % (hyper_params)
    image_stacked_blocks = yaml_parse.load(yaml)
    yaml = open("{0}/stacked_text_unimodaliy.yaml".format(yaml_file_path), 'r').read()
    yaml = yaml % (hyper_params)
    text_stacked_blocks = yaml_parse.load(yaml)
      
    image_raw = Flickr_Image_Toronto(which_cat = 'unlabelled',which_sub='nnz', using_statisfile = True)
    image_rep = TransformerDataset( raw = image_raw, transformer = image_stacked_blocks )
    m, n = image_raw.get_data().shape()
    dw = data_writer.DataWriter(['image_h2_rep'], save_path + 'image/', '10G', [n], m)
    image_iterator = image_rep.iterator(batch_size= batch_size)
    
    for data in image_iterator:
        dw.Submit(data)
    dw.Commit()
    text_raw = Flickr_Text_Toronto(which_cat='unlabelled')
    text_rep = TransformerDataset( raw = text_raw, transformer = text_stacked_blocks )
    m, n = text_raw.get_data().shape()
    dw = data_writer.DataWriter(['text_h2_rep'], save_path + 'text/', '10G', [n], m)
    text_iterator = text_rep.iterator(batch_size= batch_size)
    
    for data in text_iterator:
        dw.Submit(data)
    dw.Commit()
Esempio n. 2
0
def main():
    parser = OptionParser()
    parser.add_option("-d",
                      "--data",
                      dest="dataset",
                      default="toy",
                      help="specify the dataset, either cifar10 or toy")
    (options, args) = parser.parse_args()

    global SAVE_MODEL

    if options.dataset == 'toy':
        trainset, testset = get_dataset_toy()
        SAVE_MODEL = False
    elif options.dataset == 'cifar10':
        trainset, testset, = get_dataset_cifar10()
        SAVE_MODEL = True

    design_matrix = trainset.get_design_matrix()
    n_input = design_matrix.shape[1]

    # build layers
    layers = []
    structure = [[n_input, 400], [400, 50], [50, 100], [100, 2]]
    # layer 0: gaussianRBM
    layers.append(get_grbm(structure[0]))
    # layer 1: denoising AE
    layers.append(get_denoising_autoencoder(structure[1]))
    # layer 2: AE
    layers.append(get_autoencoder(structure[2]))
    # layer 3: logistic regression used in supervised training
    layers.append(get_logistic_regressor(structure[3]))

    #construct training sets for different layers
    trainset = [
        trainset,
        TransformerDataset(raw=trainset, transformer=layers[0]),
        TransformerDataset(raw=trainset,
                           transformer=StackedBlocks(layers[0:2])),
        TransformerDataset(raw=trainset,
                           transformer=StackedBlocks(layers[0:3]))
    ]

    # construct layer trainers
    layer_trainers = []
    layer_trainers.append(get_layer_trainer_sgd_rbm(layers[0], trainset[0]))
    layer_trainers.append(
        get_layer_trainer_sgd_autoencoder(layers[1], trainset[1]))
    layer_trainers.append(
        get_layer_trainer_sgd_autoencoder(layers[2], trainset[2]))
    layer_trainers.append(get_layer_trainer_logistic(layers[3], trainset[3]))

    #unsupervised pretraining
    for layer_trainer in layer_trainers[0:3]:
        layer_trainer.main_loop()

    #supervised training
    layer_trainers[-1].main_loop()
Esempio n. 3
0
    def __init__(self, raw, transformer, cpu_only = False):
        TransformerDataset.__init__(self,raw, transformer, cpu_only = cpu_only)

        N = self.transformer.nhid

        r = int(np.sqrt(N))
        c = N / r

        if N == r * c:
            shape = (r,c,1)
        else:
            shape = (N,1,1)

        self.view_converter = DefaultViewConverter(shape=shape)
Esempio n. 4
0
def test_transformer_iterator():
    """
    Tests whether TransformerIterator is iterable
    """

    test_path = os.path.join(pylearn2.__path__[0], 'datasets', 'tests',
                             'test.csv')
    raw = CSVDataset(path=test_path, expect_headers=False)
    block = Block()
    dataset = TransformerDataset(raw, block)
    iterator = dataset.iterator('shuffled_sequential', 3)
    try:
        iter(iterator)
    except TypeError:
        assert False, "TransformerIterator isn't iterable"
def test_transformer_iterator():
    """
    Tests whether TransformerIterator is iterable
    """

    test_path = os.path.join(pylearn2.__path__[0],
                             'datasets', 'tests', 'test.csv')
    raw = CSVDataset(path=test_path, expect_headers=False)
    block = Block()
    dataset = TransformerDataset(raw, block)
    iterator = dataset.iterator('shuffled_sequential', 3)
    try:
        iter(iterator)
    except TypeError:
        assert False, "TransformerIterator isn't iterable"
Esempio n. 6
0
 def __iter__(self):
     """
     Construct a Transformer dataset for each partition.
     """
     for k, datasets in enumerate(self.dataset_iterator):
         if isinstance(self.transformers, list):
             transformer = self.transformers[k]
         elif isinstance(self.transformers, StackedBlocksCV):
             transformer = self.transformers.select_fold(k)
         else:
             transformer = self.transformers
         if isinstance(datasets, list):
             for i, dataset in enumerate(datasets):
                 datasets[i] = TransformerDataset(dataset, transformer)
         else:
             for key, dataset in datasets.items():
                 datasets[key] = TransformerDataset(dataset, transformer)
         yield datasets
Esempio n. 7
0
def main():
    test = len(sys.argv)>1 and sys.argv[1]=='test'


    print "loading pretraining sets"
    pretrainset,prevalidset =  load_predatasets(test)
#    print "loading training sets"
#    trainset,validset = load_traindatasets()
#    print "loading test sets"
#    testset = load_testdatasets()

#    for lr in [1e-4,1e-5,1e-6,1e-7]:
    if test:
        layer_trainers = build_conv(1e-5,pretrainset,prevalidset,None,None)#trainset,validset)
        print '-----------------------------------'
        print ' Supervised pretraining'
        print '-----------------------------------'
        layer_trainers[0].main_loop()

        sys.exit(0)

    for lr in [1e-4,1e-5,1e-6,1e-7,1e-8,1e-9]:
    #    layer_trainers = build_mlp(pretrainset,prevalidset,trainset,validset)
        try:
            layer_trainers = build_conv(lr,pretrainset,prevalidset,None,None)#trainset,validset)
    #    import theano
    #    theano.config.compute_test_value = 'warn'

            print '-----------------------------------'
            print ' Supervised pretraining'
            print '-----------------------------------'
            layer_trainers[0].main_loop()
        #    layer_trainers[0] = serial.load('pretrained.pkl')

#        X = model.get_input_space().make_theano_batch(name="%s[X]" % self.__class__.__name__)
#        self.f = function([X],model.fprop(X),on_unused_input='ignore')

#        dX = dataset.adjust_for_viewer(dataset.get_batch_topo(2))
#        y_hat = self.f(dX[0:1].astype('float32'))
#        y_hat = y_hat.reshape([1]+dataset.output_shape)
#        y = dataset.y[0:1].reshape([1]+dataset.output_shape)

#        pylab.imshow(y_hat[0],cmap="gray")
#        pylab.show()
        except BaseException as e:
            print e

    sys.exit(0)

    print '-----------------------------------'
    print ' Supervised training'
    print '-----------------------------------'
    layer_trainers[-1].main_loop()

#    premodel = serial.load('pretrained.pkl')
#    model = serial.load('best_full.pkl')
    premodel = layer_trainers[0].model
    model = layer_trainers[1].model

    premodel._params = []
    model._params = []

    X = tensor.matrix()
    y = model.fprop(X)

    f = function([X],y)

#    base_path = "/home/xavier/data/ContestDataset"
#    testset = FacialKeypointDataset(
#        base_path = base_path,
#        which_set = 'train',
#        start = 1001,
#        stop = 1500)

    T_testset = TransformerDataset( raw = testset, transformer = StackedMLPs([premodel]) )
    
    Xs = np.concatenate([batch for batch in T_testset.iterator(mode='sequential',batch_size=50,targets=False)])
    print Xs.shape
#    Xs = testset.X

    Y_hat = f(np.float32(Xs))
    Y = testset.y

#    Y_hat += 100.

    for i in range(10):
        print "Y:  ",Y[i]
        print "Yh: ",Y_hat[i]

    D = np.sqrt(np.sum(Y_hat**2 -2*Y_hat*Y + Y**2, axis=1))
    print D.shape
    print D.mean(0), D.std(0)

    stacked = StackedMLPs([premodel,model])
    serial.save("stacked.pkl", stacked, on_overwrite = 'backup')
def main():

    trainset, validset, testset, extraset = get_dataset_icml()
    #trainset,testset = get_dataset_mnist()
    
    design_matrix = trainset.get_design_matrix()
    n_input = design_matrix.shape[1]
    
    n_output = 9 #10

    # build layers
    layers = []
    structure = [[n_input, 1000], [1000,1000],[1000,1000], [1000, n_output]]
    
    #layers.append(get_grbm(structure[0]))
    # layer 0: denoising AE
    layers.append(get_grbm(structure[0]))
    # layer 1: denoising AE
    layers.append(get_grbm(structure[1]))
     # layer 1: denoising AE
    layers.append(get_grbm(structure[2]))
    # layer 2: logistic regression used in supervised training
    #layers.append(get_logistic_regressor(structure[3]))


    #construct training sets for different layers
    traindata = [ extraset ,
                TransformerDataset( raw = extraset, transformer = layers[0] ),
                TransformerDataset( raw = extraset, transformer = StackedBlocks( layers[0:2] )),
                TransformerDataset( raw = extraset, transformer = StackedBlocks( layers[0:3] )) ]
    
    #valid =  TransformerDataset( raw = validset, transformer = StackedBlocks( layers[0:2] ))
    
    #valid = trainset

    # construct layer trainers
    layer_trainers = []
    #layer_trainers.append(get_layer_trainer_sgd_rbm(layers[0], trainset[0]))
    layer_trainers.append(get_layer_trainer_sgd_rbm(layers[0], traindata[0],'db1.pkl'))
    layer_trainers.append(get_layer_trainer_sgd_rbm(layers[1], traindata[1],'db2.pkl'))
    layer_trainers.append(get_layer_trainer_sgd_rbm(layers[2], traindata[2],'db3.pkl'))
    #layer_trainers.append(get_layer_trainer_logistic(layers[2], trainset[2], valid))

    #unsupervised pretraining
    for i, layer_trainer in enumerate(layer_trainers[0:3]):
        print '-----------------------------------'
        print ' Unsupervised training (pretraining) layer %d, %s'%(i, layers[i].__class__)
        print '-----------------------------------'
        layer_trainer.main_loop()


    print '\n'
    print '------------------------------------------------------'
    print ' Unsupervised training done! Start supervised training (fine-tuning)...'
    print '------------------------------------------------------'
    print '\n'
    
    mlp_layers = []
    mlp_layers.append(PretrainedLayer(layer_name = 'h0', layer_content = serial.load('db1.pkl')))
    mlp_layers.append(PretrainedLayer(layer_name = 'h1', layer_content = serial.load('db2.pkl')))
    mlp_layers.append(PretrainedLayer(layer_name = 'h2', layer_content = serial.load('db3.pkl')))

    #supervised training
    #layer_trainers[-1].main_loop()
    mlp_model = get_layer_MLP(mlp_layers,trainset,validset)
    mlp_model.main_loop()
Esempio n. 9
0
#if __name__ == '__main__':

#    _, fold_file, model_file = sys.argv
fold_file = 'GTZAN_1024-fold-1_of_4.pkl'
model_file = './saved-rlu-505050/mlp_rlu_fold1_best.pkl'

# get model
model = serial.load(model_file)

# get stanardized dictionary
which_set = 'test'
with open(fold_file) as f:
    config = cPickle.load(f)

dataset = TransformerDataset(
    raw=GTZAN_dataset.GTZAN_dataset(config, which_set),
    transformer=GTZAN_dataset.GTZAN_standardizer(config))

# test error
#err, conf = frame_misclass_error(model, dataset)

hist = class_histogram(model, dataset)
hist = np.vstack(hist)

test_files = np.array(config['test_files'])
test_labels = test_files // 100

most_votes = np.argmax(hist, axis=0)
most_rep_files = test_files[most_votes]
most_rep_hist = hist[most_votes, :]
Esempio n. 10
0
                        help='Full path and prefix for saving output models')
    parser.add_argument('--use_autoencoder', action='store_true')
    args = parser.parse_args()

    if args.epochs is None:
        args.epochs = 5

    arch = [(i, j) for i, j in zip(args.arch[:-1], args.arch[1:])]

    with open(args.fold_config) as f:
        config = cPickle.load(f)

    preproc_layer = PreprocLayer(config=config, proc_type='standardize')

    dataset = TransformerDataset(raw=AudioDataset(which_set='train',
                                                  config=config),
                                 transformer=preproc_layer.layer_content)

    # transformer_yaml = '''!obj:pylearn2.datasets.transformer_dataset.TransformerDataset {
    #     raw : %(raw)s,
    #     transformer : %(transformer)s
    # }'''
    #
    # dataset_yaml = transformer_yaml % {
    #     'raw' : '''!obj:audio_dataset.AudioDataset {
    #         which_set : 'train',
    #         config : !pkl: "%(fold_config)s"
    #     }''' % {'fold_config' : args.fold_config},
    #     'transformer' : '''!obj:pylearn2.models.mlp.MLP {
    #         nvis : %(nvis)i,
    #         layers :
Esempio n. 11
0
for debugging perpose
for learning pylearn2
"""

from dataclassraw import CLICK4DAY
from transformer import Transformer
from pylearn2.datasets.transformer_dataset import TransformerDataset

from pylearn2.models import mlp
from pylearn2.training_algorithms import sgd
from pylearn2.termination_criteria import EpochCounter


raw_ds = CLICK4DAY(which_set='train', which_day=21)
transformer = Transformer(raw=raw_ds, nfeatures=1024,  rng=None)
ds = TransformerDataset(raw=raw_ds, transformer=transformer, cpu_only=False, \
                 space_preserving=False)


hidden_layer = mlp.Sigmoid(layer_name='hidden', dim=256, irange=.1, init_bias=1.)

output_layer = mlp.Softmax(2, 'output', irange=.1)

trainer = sgd.SGD(learning_rate=.05, batch_size=1024, \
train_iteration_mode='even_sequential',termination_criterion=EpochCounter(400))

layers = [hidden_layer, output_layer]

ann = mlp.MLP(layers, nvis=1024)

trainer.setup(ann, ds)
def main(args=None):
    """
    args is the list of arguments that will be passed to the option parser.
    The default (None) means use sys.argv[1:].
    """
    # parser = OptionParser()
    # parser.add_option("-d", "--data", dest="dataset", default="toy",
    #                   help="specify the dataset, either cifar10, mnist or toy")
    # (options, args) = parser.parse_args(args=args)
    #
    # if options.dataset == 'toy':
    #     trainset, testset = get_dataset_toy()
    #     n_output = 2
    # elif options.dataset == 'cifar10':
    #     trainset, testset, = get_dataset_cifar10()
    #     n_output = 10
    #
    # elif options.dataset == 'mnist':
    #     trainset, testset, = get_dataset_mnist()
    #     n_output = 10
    #
    # else:
    #     NotImplementedError()
    trainset = get_dataset_timitCons()
    n_output = 32

    design_matrix = trainset.get_design_matrix()
    n_input = design_matrix.shape[1]

    # build layers
    layers = []
    structure = [[n_input, 500], [500, 500], [500, 500], [500, n_output]]
    # layer 0: gaussianRBM
    layers.append(get_grbm(structure[0]))
    # # layer 1: denoising AE
    # layers.append(get_denoising_autoencoder(structure[1]))
    # # layer 2: AE
    # layers.append(get_autoencoder(structure[2]))
    # # layer 3: logistic regression used in supervised training
    # layers.append(get_logistic_regressor(structure[3]))

    # layer 1: gaussianRBM
    layers.append(get_grbm(structure[1]))
    # layer 2: gaussianRBM
    layers.append(get_grbm(structure[2]))
    # layer 3: logistic regression used in supervised training
    # layers.append(get_logistic_regressor(structure[3]))
    layers.append(get_mlp_softmax(structure[3]))

    # construct training sets for different layers
    trainset = [
        trainset,
        TransformerDataset(raw=trainset, transformer=layers[0]),
        TransformerDataset(raw=trainset,
                           transformer=StackedBlocks(layers[0:2])),
        TransformerDataset(raw=trainset,
                           transformer=StackedBlocks(layers[0:3]))
    ]

    # construct layer trainers
    layer_trainers = []
    layer_trainers.append(get_layer_trainer_sgd_rbm0(layers[0], trainset[0]))
    # layer_trainers.append(get_layer_trainer_sgd_autoencoder(layers[1], trainset[1]))
    # layer_trainers.append(get_layer_trainer_sgd_autoencoder(layers[2], trainset[2]))
    layer_trainers.append(get_layer_trainer_sgd_rbm1(layers[1], trainset[1]))
    layer_trainers.append(get_layer_trainer_sgd_rbm2(layers[2], trainset[2]))
    # layer_trainers.append(get_layer_trainer_logistic(layers[3], trainset[3]))
    layer_trainers.append(get_layer_trainer_softmax(layers[3], trainset[3]))

    # unsupervised pretraining
    for i, layer_trainer in enumerate(layer_trainers[0:3]):
        print('-----------------------------------')
        print(' Unsupervised training layer %d, %s' % (i, layers[i].__class__))
        print('-----------------------------------')
        layer_trainer.main_loop()

    print('\n')
    print('------------------------------------------------------')
    print(' Unsupervised training done! Start supervised training...')
    print('------------------------------------------------------')
    print('\n')

    # supervised training
    # layer_trainers[-1].main_loop()
    layer1_yaml = open('MachineLearning.yaml', 'r').read()
    train = yaml_parse.load(layer1_yaml)
    train.main_loop()
def train_SdA(config, dataset):
    ## load config
    hidden_layers_sizes = config.get('hidden_layers_sizes', [10, 10])
    corruption_levels = config.get('corruption_levels', [0.1, 0.2])
    stage2_corruption_levels = config.get('stage2_corruption_levels',
                                          [0.1, 0.1])

    pretrain_epochs = config.get('pretrain_epochs', 10)
    pretrain_lr = config.get('pretrain_learning_rate', 0.001)

    finetune_epochs = config.get('finetune_epochs', 10)
    finetune_lr = config.get('finetune_learning_rate', 0.01)

    batch_size = config.get('batch_size', 10)
    monitoring_batches = config.get('monitoring_batches', 5)

    output_path = config.get('output_path', './')

    input_trainset = dataset
    design_matrix = input_trainset.get_design_matrix()
    #     print design_matrix.shape;
    n_input = design_matrix.shape[1]
    log.info('done')

    log.debug('input dimensions : {0}'.format(n_input))
    log.debug('training examples: {0}'.format(design_matrix.shape[0]))

    # numpy random generator
    #     numpy_rng = numpy.random.RandomState(89677)

    log.info('... building the model')

    # build layers
    layer_dims = [n_input]
    layer_dims.extend(hidden_layers_sizes)

    layers = []
    for i in xrange(1, len(layer_dims)):
        structure = [layer_dims[i - 1], layer_dims[i]]
        layers.append(
            create_denoising_autoencoder(structure,
                                         corruption=corruption_levels[i - 1]))

    # unsupervised pre-training
    log.info('... pre-training the model')
    start_time = time.clock()

    for i in xrange(len(layers)):
        # reset corruption to make sure input is not corrupted
        for layer in layers:
            layer.set_corruption_level(0)

        if i == 0:
            trainset = input_trainset
        elif i == 1:
            trainset = TransformerDataset(raw=input_trainset,
                                          transformer=layers[0])
        else:
            trainset = TransformerDataset(raw=input_trainset,
                                          transformer=StackedBlocks(
                                              layers[0:i]))

        # set corruption for layer to train
        layers[i].set_corruption_level(corruption_levels[i])

        trainer = get_layer_trainer_sgd_autoencoder(
            layers[i],
            trainset,
            learning_rate=pretrain_lr,
            max_epochs=pretrain_epochs,
            batch_size=batch_size,
            monitoring_batches=monitoring_batches,
            name='pre-train' + str(i))

        log.info('unsupervised training layer %d, %s ' %
                 (i, layers[i].__class__))
        trainer.main_loop()

#         theano.printing.pydotprint_variables(
#                                      layer_trainer.algorithm.sgd_update.maker.fgraph.outputs[0],
#                                      outfile='pylearn2-sgd_update.png',
#                                      var_with_name_simple=True);

    end_time = time.clock()
    log.info('pre-training code ran for {0:.2f}m'.format(
        (end_time - start_time) / 60.))

    # now untie the decoder weights
    log.info('untying decoder weights')
    for layer in layers:
        layer.untie_weights()

    # construct multi-layer training fuctions

    # unsupervised training
    log.info('... training the model')

    sdae = None
    for depth in xrange(1, len(layers) + 1):
        first_layer_i = len(layers) - depth
        log.debug('training layers {}..{}'.format(first_layer_i,
                                                  len(layers) - 1))

        group = layers[first_layer_i:len(layers)]
        #         log.debug(group);

        # reset corruption
        for layer in layers:
            layer.set_corruption_level(0)

        if first_layer_i == 0:
            trainset = input_trainset
        elif first_layer_i == 1:
            trainset = TransformerDataset(raw=input_trainset,
                                          transformer=layers[0])
        else:
            trainset = TransformerDataset(raw=input_trainset,
                                          transformer=StackedBlocks(
                                              layers[0:first_layer_i]))

        # set corruption for input layer of stack to train
#         layers[first_layer_i].set_corruption_level(stage2_corruption_levels[first_layer_i]);

        corruptor = LoggingCorruptor(BinomialCorruptor(
            corruption_level=stage2_corruption_levels[first_layer_i]),
                                     name='depth {}'.format(depth))
        sdae = StackedDenoisingAutoencoder(group, corruptor)

        trainer = get_layer_trainer_sgd_autoencoder(
            sdae,
            trainset,
            learning_rate=finetune_lr,
            max_epochs=finetune_epochs,
            batch_size=batch_size,
            monitoring_batches=monitoring_batches,
            name='multi-train' + str(depth))

        log.info('unsupervised multi-layer training %d' % (i))
        trainer.main_loop()

    end_time = time.clock()
    log.info('full training code ran for {0:.2f}m'.format(
        (end_time - start_time) / 60.))

    # save the model
    model_file = os.path.join(output_path, 'sdae-model.pkl')
    with log_timing(log, 'saving SDA model to {}'.format(model_file)):
        serial.save(model_file, sdae)

    # TODO: pylearn2.train_extensions.best_params.KeepBestParams(model, cost, monitoring_dataset, batch_size)
    # pylearn2.train_extensions.best_params.MonitorBasedSaveBest

    log.info('done')

    return sdae