예제 #1
0
def build_CompressModel():
    print 'STEP 1 start...'
    dataset = Nikkei(dataset_type=params['experiment_type'],
                     brandcode=params['STEP3']['brandcode'])
    # pdb.set_trace()
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    if params['STEP1']['model'] == 'rbm':
        model = RBM(input=x,
                    n_visible=dataset.phase1_input_size,
                    n_hidden=params['STEP1']['n_hidden'],
                    reg_weight=params['STEP1']['beta'])
        train_rbm(input=x,
                  model=model,
                  dataset=dataset,
                  learning_rate=params['STEP1']['learning_rate'],
                  outdir=model_dirs['STEP1'])
    else:
        model = SparseAutoencoder(input=x,
                                  n_visible=dataset.phase1_input_size,
                                  n_hidden=params['STEP1']['n_hidden'],
                                  beta=params['STEP1']['beta'])
        train_sae(input=x,
                  model=model,
                  dataset=dataset,
                  learning_rate=params['STEP1']['learning_rate'],
                  outdir=model_dirs['STEP1'])
예제 #2
0
def build_CompressModel():
    print "STEP 1 start..."
    dataset = Nikkei(dataset_type=params["experiment_type"], brandcode=params["STEP3"]["brandcode"])
    # pdb.set_trace()
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix("x")  # the data is presented as rasterized images
    if params["STEP1"]["model"] == "rbm":
        model = RBM(
            input=x,
            n_visible=dataset.phase1_input_size,
            n_hidden=params["STEP1"]["n_hidden"],
            reg_weight=params["STEP1"]["beta"],
        )
        train_rbm(
            input=x,
            model=model,
            dataset=dataset,
            learning_rate=params["STEP1"]["learning_rate"],
            outdir=model_dirs["STEP1"],
        )
    else:
        model = SparseAutoencoder(
            input=x,
            n_visible=dataset.phase1_input_size,
            n_hidden=params["STEP1"]["n_hidden"],
            beta=params["STEP1"]["beta"],
        )
        train_sae(
            input=x,
            model=model,
            dataset=dataset,
            learning_rate=params["STEP1"]["learning_rate"],
            outdir=model_dirs["STEP1"],
        )
예제 #3
0
def build_CompressModel():
    print 'STEP 1 start...'
    dataset = Nikkei(dataset_type=params['dataset_type'], brandcode=params['STEP3']['brandcode'])
    # pdb.set_trace()
    index = T.lscalar()    # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    if params['STEP1']['model'] == 'rbm':
        model = RBM(input=x, n_visible=dataset.phase1_input_size, n_hidden=params['STEP1']['n_hidden'], reg_weight=params['STEP1']['reg_weight'], corruption_level=params['STEP1']['corruption_level'])
        train_rbm(input=x, model=model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], outdir=model_dirs['STEP1'], batch_size=params['STEP1']['batch_size'])
    elif params['STEP1']['model'] == 'sda':
        sda_params = {
            'dataset' : dataset, 
            'hidden_layers_sizes' : [params['STEP1']['n_hidden'], params['STEP1']['n_hidden'] / 2],
            'pretrain_lr' : params['STEP1']['learning_rate'],
            'pretrain_batch_size' : params['STEP1']['batch_size'],
            'pretrain_epochs' : 5,
            'corruption_levels' : [0.5, 0.5],
            'k' : None,
            'y_type' : 0,
            'sparse_weight' : params['STEP1']['reg_weight']
        }
        model = SdA.compress(sda_params)
        pre_params = get_model_params(model)

        while(True):
            try:
                f_out = open(model_dirs['STEP1'], 'w')
                f_out.write(cPickle.dumps(model, 1))
                f_out.close()
                break
            except:
                pdb.set_trace()
    else:
        model = SparseAutoencoder(input=x, n_visible=dataset.phase1_input_size, n_hidden=params['STEP1']['n_hidden'], reg_weight=params['STEP1']['reg_weight'], corruption_level=params['STEP1']['corruption_level'])
        train_sae(input=x, model=model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], outdir=model_dirs['STEP1'], batch_size=params['STEP1']['batch_size'])
예제 #4
0
def retrain_CompressModel():
    print 'STEP 1 start...'
    dataset = Nikkei(dataset_type=params['dataset_type'],
                     brandcode=params['STEP3']['brandcode'])
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    if params['STEP1']['model'] == 'rbm':
        model = load_model(model_type='rbm',
                           input=x,
                           params_dir=model_dirs['STEP1'])
        train_rbm(input=x,
                  model=model,
                  dataset=dataset,
                  learning_rate=params['STEP1']['learning_rate'],
                  batch_size=params['STEP1']['batch_size'],
                  outdir=model_dirs['STEP1'])
    # elif params['STEP1']['model'] == 'sda':
    #     presae_dir = '%s/%s/h%d_lr%s_b%s_c%s.%s' % (default_model_dir, 'STEP1', params['STEP1']['n_hidden'], str(params['STEP1']['learning_rate']), str(params['STEP1']['reg_weight']), str(params['STEP1']['corruption_level']), 'sae')
    #     x2 = T.matrix('x')
    #     pre_model = load_model(model_type='sae', input=x, params_dir=presae_dir)
    #     model = SparseAutoencoder(input=x2, n_visible=params['STEP1']['n_hidden'], n_hidden=params['STEP1']['n_hidden'], reg_weight=params['STEP1']['reg_weight'], corruption_level=params['STEP1']['corruption_level'])
    #     train_sae2(input=x, model=model, pre_model=pre_model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], outdir=model_dirs['STEP1'])
    else:
        model = load_model(model_type='sae',
                           input=x,
                           params_dir=model_dirs['STEP1'])
        train_sae(input=x,
                  model=model,
                  dataset=dataset,
                  learning_rate=params['STEP1']['learning_rate'],
                  batch_size=params['STEP1']['batch_size'],
                  outdir=model_dirs['STEP1'])
예제 #5
0
def retrain_CompressModel():
    print 'STEP 1 start...'
    dataset = Nikkei(dataset_type=params['experiment_type'], brandcode=params['STEP3']['brandcode'])
    index = T.lscalar()    # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    if params['STEP1']['model'] == 'rbm':
        model = load_model(model_type='rbm', input=x, params_dir=model_dirs['STEP1'])
        train_rbm(input=x, model=model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], outdir=model_dirs['STEP1'])
    else:
        model = load_model(model_type='sae', input=x, params_dir=model_dirs['STEP1'])
        train_sae(input=x, model=model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], outdir=model_dirs['STEP1'])
예제 #6
0
def retrain_CompressModel():
    print 'STEP 1 start...'
    dataset = Nikkei(dataset_type=params['experiment_type'], brandcode=params['STEP3']['brandcode'])
    index = T.lscalar()    # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    if params['STEP1']['model'] == 'rbm':
        model = load_model(model_type='rbm', input=x, params_dir=model_dirs['STEP1'])
        train_rbm(input=x, model=model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], outdir=model_dirs['STEP1'])
    else:
        model = load_model(model_type='sae', input=x, params_dir=model_dirs['STEP1'])
        train_sae(input=x, model=model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], outdir=model_dirs['STEP1'])
예제 #7
0
def build_CompressModel():
    print 'STEP 1 start...'
    dataset = Nikkei(dataset_type=params['experiment_type'], brandcode=params['STEP3']['brandcode'])
    # pdb.set_trace()
    index = T.lscalar()    # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    if params['STEP1']['model'] == 'rbm':
        model = RBM(input=x, n_visible=dataset.phase1_input_size, n_hidden=params['STEP1']['n_hidden'], reg_weight=params['STEP1']['beta'])
        train_rbm(input=x, model=model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], outdir=model_dirs['STEP1'])
    else:
        model = SparseAutoencoder(input=x, n_visible=dataset.phase1_input_size, n_hidden=params['STEP1']['n_hidden'], beta=params['STEP1']['beta'])
        train_sae(input=x, model=model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], outdir=model_dirs['STEP1'])
예제 #8
0
def retrain_CompressModel():
    print 'STEP 1 start...'
    dataset = Nikkei(dataset_type=params['dataset_type'], brandcode=params['STEP3']['brandcode'])
    index = T.lscalar()    # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    if params['STEP1']['model'] == 'rbm':
        model = load_model(model_type='rbm', input=x, params_dir=model_dirs['STEP1'])
        train_rbm(input=x, model=model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], batch_size=params['STEP1']['batch_size'], outdir=model_dirs['STEP1'])
    # elif params['STEP1']['model'] == 'sda':
    #     presae_dir = '%s/%s/h%d_lr%s_b%s_c%s.%s' % (default_model_dir, 'STEP1', params['STEP1']['n_hidden'], str(params['STEP1']['learning_rate']), str(params['STEP1']['reg_weight']), str(params['STEP1']['corruption_level']), 'sae')
    #     x2 = T.matrix('x')
    #     pre_model = load_model(model_type='sae', input=x, params_dir=presae_dir)
    #     model = SparseAutoencoder(input=x2, n_visible=params['STEP1']['n_hidden'], n_hidden=params['STEP1']['n_hidden'], reg_weight=params['STEP1']['reg_weight'], corruption_level=params['STEP1']['corruption_level'])
    #     train_sae2(input=x, model=model, pre_model=pre_model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], outdir=model_dirs['STEP1'])
    else:
        model = load_model(model_type='sae', input=x, params_dir=model_dirs['STEP1'])
        train_sae(input=x, model=model, dataset=dataset, learning_rate=params['STEP1']['learning_rate'], batch_size=params['STEP1']['batch_size'], outdir=model_dirs['STEP1'])
예제 #9
0
def retrain_CompressModel():
    print "STEP 1 start..."
    dataset = Nikkei(dataset_type=params["experiment_type"], brandcode=params["STEP3"]["brandcode"])
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix("x")  # the data is presented as rasterized images
    if params["STEP1"]["model"] == "rbm":
        model = load_model(model_type="rbm", input=x, params_dir=model_dirs["STEP1"])
        train_rbm(
            input=x,
            model=model,
            dataset=dataset,
            learning_rate=params["STEP1"]["learning_rate"],
            outdir=model_dirs["STEP1"],
        )
    else:
        model = load_model(model_type="sae", input=x, params_dir=model_dirs["STEP1"])
        train_sae(
            input=x,
            model=model,
            dataset=dataset,
            learning_rate=params["STEP1"]["learning_rate"],
            outdir=model_dirs["STEP1"],
        )
예제 #10
0
def build_CompressModel():
    print 'STEP 1 start...'
    dataset = Nikkei(dataset_type=params['dataset_type'],
                     brandcode=params['STEP3']['brandcode'])
    # pdb.set_trace()
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    if params['STEP1']['model'] == 'rbm':
        model = RBM(input=x,
                    n_visible=dataset.phase1_input_size,
                    n_hidden=params['STEP1']['n_hidden'],
                    reg_weight=params['STEP1']['reg_weight'],
                    corruption_level=params['STEP1']['corruption_level'])
        train_rbm(input=x,
                  model=model,
                  dataset=dataset,
                  learning_rate=params['STEP1']['learning_rate'],
                  outdir=model_dirs['STEP1'],
                  batch_size=params['STEP1']['batch_size'])
    elif params['STEP1']['model'] == 'sda':
        sda_params = {
            'dataset':
            dataset,
            'hidden_layers_sizes':
            [params['STEP1']['n_hidden'], params['STEP1']['n_hidden'] / 2],
            'pretrain_lr':
            params['STEP1']['learning_rate'],
            'pretrain_batch_size':
            params['STEP1']['batch_size'],
            'pretrain_epochs':
            5,
            'corruption_levels': [0.5, 0.5],
            'k':
            None,
            'y_type':
            0,
            'sparse_weight':
            params['STEP1']['reg_weight']
        }
        model = SdA.compress(sda_params)
        pre_params = get_model_params(model)

        while (True):
            try:
                f_out = open(model_dirs['STEP1'], 'w')
                f_out.write(cPickle.dumps(model, 1))
                f_out.close()
                break
            except:
                pdb.set_trace()
    else:
        model = SparseAutoencoder(
            input=x,
            n_visible=dataset.phase1_input_size,
            n_hidden=params['STEP1']['n_hidden'],
            reg_weight=params['STEP1']['reg_weight'],
            corruption_level=params['STEP1']['corruption_level'])
        train_sae(input=x,
                  model=model,
                  dataset=dataset,
                  learning_rate=params['STEP1']['learning_rate'],
                  outdir=model_dirs['STEP1'],
                  batch_size=params['STEP1']['batch_size'])