コード例 #1
0
def train_spectrum_coding():
        
    output_folder = 'dim_L9'
    data_file = 'TIMIT_train_(N1)_split.mat'
#     data_file = 'TIMIT_train_(N8)_split.mat'
#     if output_folder.find('300') != -1:    
#         data_file = '300bps/TIMIT_train_split.mat'
#     elif output_folder.find('600') != -1:
#         data_file = '600bps/TIMIT_train_dr1_dr4_split.mat'
#     elif output_folder.find('1200') != -1:
#         data_file = '1200bps/TIMIT_train_dr1_dr2_split.mat'
#     elif output_folder.find('2400') != -1:
#         data_file = 'TIMIT_train_(N8)_split.mat'
        
    datasets = load_TIMIT(data_file)
    train_set, valid_set, test_set = datasets
    p_list = [0]
    sigma_list = [0]
    input_dim = train_set.get_value(borrow=True).shape[1]
#     layers_sizes = [input_dim,500,70,500,input_dim]
#     layers_sizes = [input_dim,1000,500,250,70,250,500,1000,input_dim]
#     layers_sizes = [input_dim,2000,1000,500,250,70,250,500,1000,2000,input_dim]    
    layers_sizes = [input_dim,2000,1000,500,54,500,1000,2000,input_dim]
#     layers_sizes = [input_dim,2000,2000,54,2000,2000,input_dim]
#     layers_sizes = [input_dim,2000,576,2000,input_dim]
    for p in p_list:
        for sigma in sigma_list:
            p_dict = {'p_list': [0, 0, 0, 0], 'p': p}
            sigma_dict = {'sigma_list':[0, 0, 0, 0], 'sigma': sigma}
            train_SAE(datasets,layers_sizes,output_folder,p_dict,sigma_dict)
コード例 #2
0
ファイル: mfcc_coding_rmb.py プロジェクト: jiangkid/workspace
def train_codec():
        
    output_folder = '600_mfs'
    data_file = 'mfs_train_(N4).mat'    
#     if output_folder.find('300') != -1:
#         if output_folder.find('rbm') != -1:
#             data_file = '300bps/rbm_TIMIT_train_split.mat'
#         elif output_folder.find('mfs') != -1:
#             data_file = 'mfs_train_(N8).mat'
#         else:
#             data_file = '300bps/TIMIT_train_split.mat'        
#     elif output_folder.find('600') != -1:
#         data_file = '600bps/TIMIT_train_dr1_dr4_split.mat'
#     elif output_folder.find('1200') != -1:
#         data_file = '1200bps/TIMIT_train_dr1_dr2_split.mat'
#     elif output_folder.find('2400') != -1:
#         data_file = '2400bps/TIMIT_train_dr1_dr2_split.mat'
        
    datasets = load_TIMIT(data_file)
    train_set, valid_set, test_set = datasets
    p_list = [-1]
    sigma_list = [0]
    input_dim = train_set.get_value(borrow=True).shape[1]    
    layers_sizes = [input_dim, 2000, 1000, 500, 108, 500, 1000, 2000, input_dim]
#     layers_sizes = [input_dim,2000,2000,54,2000,2000,input_dim]
#     layers_sizes = [input_dim,500,54,500,input_dim]
    for p in p_list:
        for sigma in sigma_list:
            train_DAE(datasets, layers_sizes, output_folder, p, sigma)    
コード例 #3
0
def train_gb_rbm(batch_size=100,epochs=50):
    output_folder = 'gb_rbm'
    data_file = 'rbm_TIMIT_dr2_(N1)_split.mat'
    
    datasets = load_TIMIT(data_file)
    train_set, valid_set, test_set = datasets
    numpy_rng = numpy.random.RandomState(123)
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
        
    input_dim = train_set.get_value(borrow=True).shape[1]
    layers_sizes = [input_dim,70,input_dim]
    input_x = T.matrix(name='x')
    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)
    logger = mylogger(output_folder + '/log.log')
    gb_rbm_layer = GBRBM(numpy_rng=numpy_rng,
                              theano_rng=theano_rng,
                              input=input_x,
                              n_visible=layers_sizes[0],
                              n_hidden=layers_sizes[1])
    
    index = T.lscalar('index')  
    momentum = T.scalar('momentum')
    learning_rate = T.scalar('lr') 
    # number of mini-batches
    n_batches = train_set.get_value(borrow=True).shape[0] / batch_size
    # start and end index of this mini-batch
    batch_begin = index * batch_size
    batch_end = batch_begin + batch_size
        
    r_cost, fe_cost, updates = gb_rbm_layer.get_cost_updates(batch_size, learning_rate,
                                                            momentum, weight_cost=0.0002,
                                                            persistent=None, k = 1)

            # compile the theano function
    fn = theano.function(inputs=[index,
                              theano.Param(learning_rate, default=0.0001),
                              theano.Param(momentum, default=0.5)],
                              outputs= [r_cost, fe_cost],
                              updates=updates,
                              givens={input_x: train_set[batch_begin:batch_end]})
    r_c, fe_c = [], []  # keep record of reconstruction and free-energy cost
    for epoch in range(epochs):
        for batch_index in xrange(n_batches):  # loop over mini-batches
            [reconstruction_cost, free_energy_cost] = fn(index=batch_index)
            r_c.append(reconstruction_cost)
            fe_c.append(free_energy_cost)
        logger.log('pre-training, epoch %d, r_cost %f, fe_cost %f' % (epoch, numpy.mean(r_c), numpy.mean(fe_c)))
    
        params = []
        for item in gb_rbm_layer.params:
            params.append(item.get_value(borrow=True))
    savemat(output_folder+'/gb_rbm.mat', {'params':params})
コード例 #4
0
ファイル: auto_encoder.py プロジェクト: jiangkid/workspace
def test_model(batch_size=100, file_name='da.pkl'):
    
#     datasets = load_data(dataset)
    print '...loading data'
    datasets = load_TIMIT()
    train_set, valid_set, test_set = datasets

    print '...building model'
    
    pickle_lst = [1000]  # , 500, 1000
#     pickle_lst = [1, 10]
    for epoch in pickle_lst:
        print 'epoch: ', epoch
        file_name = "da_epoch_%d" % (epoch)        
        w, b, b_prime = load_model_mat(file_name)
        # allocate symbolic variables for the data
        index = T.lscalar()  # index to a [mini]batch
    
        # generate symbolic variables for input (x and y represent a
        # minibatch)
        x = T.matrix('x')  # data, presented as rasterized images        
        
        rng = numpy.random.RandomState(123)
        theano_rng = RandomStreams(rng.randint(2 ** 30))
        da = dA(
            numpy_rng=rng,
            theano_rng=theano_rng,
            input=x,
            n_visible=129,
            n_hidden=500,
            W=w,
            bhid=b,
            bvis=b_prime
        )
            
#         test_fun = theano.function(
#             inputs=[index],
#             outputs=da.get_reconstructed_out(),
#             givens={
#                 x: test_set_x[index * batch_size:(index + 1) * batch_size]
#             }
#         )
        get_outputs = theano.function(
            inputs=[index],
            outputs=da.get_active(),
            givens={
                x: test_set[index * batch_size:(index + 1) * batch_size]
            }
        )
        
        index = 1
        hidden_value = get_outputs(index)
        plot_data = test_set.get_value(borrow=True)[index * batch_size:(index + 1) * batch_size]
        pylab.figure(); pylab.hist(plot_data.reshape(plot_data.size, 1), 50);
        pylab.figure();pylab.plot(numpy.mean(plot_data, axis=0), '*');pylab.xlim(0, 128);pylab.ylim(0, 1);
        pylab.figure();pylab.hist(hidden_value.reshape(hidden_value.size, 1), 50);
        pylab.figure();pylab.plot(numpy.mean(hidden_value, axis=0), '*');pylab.ylim(0, 1);
        pylab.show()
        set_trace()
#         pylab.title(epoch)
    pylab.show()
コード例 #5
0
ファイル: auto_encoder.py プロジェクト: jiangkid/workspace
def auto_encoder_finetune(datasets=None, p=0, sigma=1, param=None, training_epochs=1000):
    if datasets == None:
        datasets = load_TIMIT()
    train_set, valid_set, test_set = datasets
         
    def get_shared(x):
        return theano.shared(numpy.asarray(x, dtype=theano.config.floatX), borrow=False)            
    
    layers_sizes = param['layers_sizes']
    L1_file = param['L1_file']
    L2_file = param['L2_file']
    L3_file = param['L3_file']
    L4_file = param['L4_file']
    output_folder = param['output_folder']
    item_str = param['item_str']
    
    valid_flag = 0
    finetune_lr = 0.1
    batch_size = 100
    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set.get_value(borrow=True).shape[0] / batch_size
    
    # allocate symbolic variables for the data
    index = T.lscalar('index') 
    learning_rate = T.scalar('lr') 
    x = T.matrix('x')

    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)
         
#     pickle_lst = [100,200,300,400,500,600,700,800,900,1000]
    L1_w, L1_b, L1_b_prime = load_model_mat(file_name=L1_file, shared=0)    
    L2_w, L2_b, L2_b_prime = load_model_mat(file_name=L2_file, shared=0)
    L3_w, L3_b, L3_b_prime = load_model_mat(file_name=L3_file, shared=0)
    L4_w, L4_b, L4_b_prime = load_model_mat(file_name=L4_file, shared=0)
    w_list = [get_shared(L1_w), get_shared(L2_w), get_shared(L3_w), get_shared(L4_w), 
              get_shared(L4_w.T), get_shared(L3_w.T), get_shared(L2_w.T), get_shared(L1_w.T)]    
    b_list = [get_shared(L1_b), get_shared(L2_b), get_shared(L3_b), get_shared(L4_b), 
              get_shared(L4_b_prime), get_shared(L3_b_prime), get_shared(L2_b_prime), get_shared(L1_b_prime)]
      
    rng = numpy.random.RandomState(123)
    sae = SAE(numpy_rng=rng,
              input=x,
              layers_sizes=layers_sizes,
              w_list=w_list,
              b_list=b_list)
    print '... building the model'
    train_fn, valid_model, test_model = sae.build_finetune_functions(datasets, batch_size, learning_rate, p, sigma)
    print '... training'    

    logger = mylogger(output_folder + '/' + item_str + '.log')
    logger.log('p:%g, sigma:%g, learning rate:%g' % (p, sigma, finetune_lr))
    
    #===========================================================================
    # start training
    #===========================================================================

    patience = 100 * n_train_batches  # look as this many examples regardless
    patience_increase = 2.  # wait this much longer when a new best is found
    improvement_threshold = 0.999  # a relative improvement of this much is considered significant
    validation_frequency = min(n_train_batches, patience / 2)
        
    best_validation_loss = numpy.inf
    
    test_score = 0.
    start_time = strict_time()

    done_looping = False
    epoch = 0;best_epoch = 0
        
    while (epoch < training_epochs):
        epoch = epoch + 1
        epoch_time_s = strict_time()
        c = []
        for minibatch_index in xrange(n_train_batches):
            err = train_fn(minibatch_index,finetune_lr)
#             err = 0
            c.append(err)
        logger.log('Training epoch %d, cost %.5f, took %f seconds ' % (epoch, numpy.mean(c), (strict_time() - epoch_time_s)))        
        if epoch % 100 == 0:
            finetune_lr = 0.8 * finetune_lr            
            logger.log('learning rate: %g' % (finetune_lr))
        if valid_flag == 0:
            continue
        validation_losses = numpy.mean(valid_model())
        logger.log('valid %.5f' % (validation_losses))
        if validation_losses < best_validation_loss:
            best_validation_loss = validation_losses
            best_epoch = epoch
            # test it on the test set
            test_losses = numpy.mean(test_model())
            logger.log('test %.5f' % (test_losses))
            sae.save_model_mat(output_folder + '/' + item_str + '.mat')
#     logger.log('best validation %.5f, test error %.5f, on epoch %d'%(best_validation_loss, test_losses, best_epoch))
    sae.save_model_mat(output_folder + '/' + item_str + '.mat')
    logger.log('ran for %.2f m ' % ((strict_time() - start_time) / 60.))
    return

    for epoch in xrange(1, training_epochs + 1):
        # go through trainng set                
        c = []
        epoch_time_s = strict_time()
        for batch_index in xrange(n_train_batches):            
            err = train_fn(batch_index, finetune_lr)
#             err = 0
            c.append(err)
        logger.log('Training epoch %d, cost %.5f, took %f seconds ' % (epoch, numpy.mean(c), (strict_time() - epoch_time_s)))
        if epoch % 100 == 0:
            finetune_lr = 0.8 * finetune_lr            
            logger.log('learning rate: %g' % (finetune_lr)) 

    sae.save_model_mat(output_folder + '/' + item_str + '.mat')
    logger.log('ran for %.2f m ' % ((strict_time() - start_time) / 60.))
コード例 #6
0
ファイル: auto_encoder.py プロジェクト: jiangkid/workspace
def train_auto_encoder(L=1, N=1):    
#     N = 1 #frame number
#     L = -1 #layer
    param = {}
    
#     if N == 1:
#         data_file = '600bps/TIMIT_train_dr1_dr4_split.mat'
#         param['pretrain_lr'] = 0.05
#         param['down_epoch'] = 100
#         param['layers_sizes'] = [280, 2000, 500, 54, 500, 500, 280]
#     elif N == 5:
#         param['pretrain_lr'] = 0.05
#         param['down_epoch'] = 50
#         param['layers_sizes'] = [645, 1000, 512, 1000, 512, 645]     
#     elif N == 11:      
#         param['pretrain_lr'] = 0.02
#         param['down_epoch'] = 30
#         param['layers_sizes'] = [1419, 2000, 1024, 2000, 1419]        
    
    data_file = '300bps/TIMIT_train_split.mat'
    param['output_folder'] = '300bps'
    param['pretrain_lr'] = 0.05
    param['down_epoch'] = 100
    param['layers_sizes'] = [560,2000,1000,500,54,500,1000,2000,560]
    param['n_hidden'] = param['layers_sizes'][L]
    
    datasets = load_TIMIT(data_file)
    train_set, valid_set, test_set = datasets
    
    p_list = [0]
    sigma_list = [1]
    if L == 1:
        for p in p_list:      
            for sigma in sigma_list:
                param['item_str'] = 'L1_p%g_s%g' % (p, sigma)
                auto_encoder_Lx(train_set, p, sigma, param)
    elif L == 2:
        L1_p = 0; L1_s = 1
        model_str = 'L1_p%g_s%g'%(L1_p, L1_s)
        model_file = '%s/%s.mat' %(param['output_folder'], model_str)
        train_set = get_hidden(train_set, model_file)        
        for p in p_list:
            for sigma in sigma_list:
#                 param['item_str'] = 'L2_p%g_s%g_(%s)' % (p, sigma, model_str)
                param['item_str'] = 'L2_p%g_s%g' % (p, sigma)
                auto_encoder_Lx(train_set, p, sigma, param)    
    elif L == 3:
        L1_p = 0; L1_s = 1
        model_str = 'L1_p%g_s%g'%(L1_p, L1_s)
        model_file = '%s/%s.mat' %(param['output_folder'], model_str)
        train_set = get_hidden(train_set, model_file)        
        L2_p = 0; L2_s = 1        
        model_str = 'L2_p%g_s%g'%(L2_p, L2_s)
        model_file = '%s/%s.mat' %(param['output_folder'], model_str)
        train_set = get_hidden(train_set, model_file)        
        for p in p_list:
            for sigma in sigma_list:
                param['item_str'] = 'L3_p%g_s%g' % (p, sigma)
                auto_encoder_Lx(train_set, p, sigma, param)
    elif L == 4:
        L1_p = 0; L1_s = 1
        model_str = 'L1_p%g_s%g'%(L1_p, L1_s)
        model_file = '%s/%s.mat' %(param['output_folder'], model_str)
        train_set = get_hidden(train_set, model_file)
        L2_p = 0; L2_s = 1        
        model_str = 'L2_p%g_s%g'%(L2_p, L2_s)
        model_file = '%s/%s.mat' %(param['output_folder'], model_str)
        train_set = get_hidden(train_set, model_file)
        L3_p = 0; L3_s = 1        
        model_str = 'L3_p%g_s%g'%(L3_p, L3_s)
        model_file = '%s/%s.mat' %(param['output_folder'], model_str)
        train_set = get_hidden(train_set, model_file)          
        for p in p_list:
            for sigma in sigma_list:
                param['item_str'] = 'L4_p%g_s%g' % (p, sigma)
                auto_encoder_Lx(train_set, p, sigma, param)
    elif L == -1: #finetune
        for p in p_list:      
            for sigma in sigma_list:
                L1_str = 'L1_p0_s1'
                L2_str = 'L2_p0_s1'
                L3_str = 'L3_p0_s1'
                L4_str = 'L4_p0_s1'
                param['item_str'] = 'SAE_p0_s1'
                param['L1_file'] = '%s/%s.mat' %(param['output_folder'], L1_str)
                param['L2_file'] = '%s/%s.mat' %(param['output_folder'], L2_str)
                param['L3_file'] = '%s/%s.mat' %(param['output_folder'], L3_str)
                param['L4_file'] = '%s/%s.mat' %(param['output_folder'], L4_str)  
                auto_encoder_finetune(datasets, 0, 1, param)
    return