Example #1
0
def load_model(model_type='sae', input=None, params_dir=None):
    params = cPickle.load(open(params_dir))
    if model_type == 'rbm':
        model = RBM(input=input, params=params)
    else:
        model = SparseAutoencoder(input=input, params=params)
    return model
Example #2
0
def build_CompressModel():
    print 'STEP 1 start...'
    dataset = Nikkei(dataset_type=params['experiment_type'],
                     brandcode=params['STEP3']['brandcode'])
    # pdb.set_trace()
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    if params['STEP1']['model'] == 'rbm':
        model = RBM(input=x,
                    n_visible=dataset.phase1_input_size,
                    n_hidden=params['STEP1']['n_hidden'],
                    reg_weight=params['STEP1']['beta'])
        train_rbm(input=x,
                  model=model,
                  dataset=dataset,
                  learning_rate=params['STEP1']['learning_rate'],
                  outdir=model_dirs['STEP1'])
    else:
        model = SparseAutoencoder(input=x,
                                  n_visible=dataset.phase1_input_size,
                                  n_hidden=params['STEP1']['n_hidden'],
                                  beta=params['STEP1']['beta'])
        train_sae(input=x,
                  model=model,
                  dataset=dataset,
                  learning_rate=params['STEP1']['learning_rate'],
                  outdir=model_dirs['STEP1'])
Example #3
0
def build_CompressModel():
    print 'STEP 1 start...'
    dataset = Nikkei(dataset_type=params['dataset_type'],
                     brandcode=params['STEP3']['brandcode'])
    # pdb.set_trace()
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    if params['STEP1']['model'] == 'rbm':
        model = RBM(input=x,
                    n_visible=dataset.phase1_input_size,
                    n_hidden=params['STEP1']['n_hidden'],
                    reg_weight=params['STEP1']['reg_weight'],
                    corruption_level=params['STEP1']['corruption_level'])
        train_rbm(input=x,
                  model=model,
                  dataset=dataset,
                  learning_rate=params['STEP1']['learning_rate'],
                  outdir=model_dirs['STEP1'],
                  batch_size=params['STEP1']['batch_size'])
    elif params['STEP1']['model'] == 'sda':
        sda_params = {
            'dataset':
            dataset,
            'hidden_layers_sizes':
            [params['STEP1']['n_hidden'], params['STEP1']['n_hidden'] / 2],
            'pretrain_lr':
            params['STEP1']['learning_rate'],
            'pretrain_batch_size':
            params['STEP1']['batch_size'],
            'pretrain_epochs':
            5,
            'corruption_levels': [0.5, 0.5],
            'k':
            None,
            'y_type':
            0,
            'sparse_weight':
            params['STEP1']['reg_weight']
        }
        model = SdA.compress(sda_params)
        pre_params = get_model_params(model)

        while (True):
            try:
                f_out = open(model_dirs['STEP1'], 'w')
                f_out.write(cPickle.dumps(model, 1))
                f_out.close()
                break
            except:
                pdb.set_trace()
    else:
        model = SparseAutoencoder(
            input=x,
            n_visible=dataset.phase1_input_size,
            n_hidden=params['STEP1']['n_hidden'],
            reg_weight=params['STEP1']['reg_weight'],
            corruption_level=params['STEP1']['corruption_level'])
        train_sae(input=x,
                  model=model,
                  dataset=dataset,
                  learning_rate=params['STEP1']['learning_rate'],
                  outdir=model_dirs['STEP1'],
                  batch_size=params['STEP1']['batch_size'])