Example #1
0
def runSP():
    ds = StockPrice()
    
    # create hidden layer with 2 nodes, init weights in range -0.1 to 0.1 and add
    # a bias with value 1
    hidden_layer = mlp.Sigmoid(layer_name='hidden', dim=10000, irange=.1, init_bias=1.)
    # create Softmax output layer
    output_layer = mlp.Linear(layer_name='output', dim=1, irange=.1, init_bias=1.)
    # create Stochastic Gradient Descent trainer that runs for 400 epochs
    trainer = sgd.SGD(learning_rate=.005, batch_size=500, termination_criterion=EpochCounter(10))
    layers = [hidden_layer, output_layer]
    # create neural net that takes two inputs
    ann = mlp.MLP(layers, nvis=1000)
    trainer.setup(ann, ds)
    # train neural net until the termination criterion is true
    while True:
        trainer.train(dataset=ds)
        ann.monitor.report_epoch()
        ann.monitor()
        if not trainer.continue_learning(ann):
            break
    #accuracy = Accuracy()
    acc = Accuracy()
    for i, predict in enumerate(ann.fprop(theano.shared(ds.valid[0], name='inputs')).eval()):
        print predict, ds.valid[1][i]
        acc.evaluatePN(predict[0], ds.valid[1][i][0])
    acc.printResult()
Example #2
0
def runAutoencoder():
    ds = StockPrice()
    #print ds.train[0][0]
    data = np.random.randn(10, 5).astype(config.floatX)
    #print data
    print BinomialCorruptor(.2)
    ae = DenoisingAutoencoder(BinomialCorruptor(corruption_level=.2), 1000, 100, act_enc='sigmoid', act_dec='linear',
                     tied_weights=False)
    trainer = sgd.SGD(learning_rate=.005, batch_size=5, termination_criterion=EpochCounter(3), cost=cost_ae.MeanSquaredReconstructionError(), monitoring_batches=5, monitoring_dataset=ds)
    trainer.setup(ae, ds)
    while True:
        trainer.train(dataset=ds)
        ae.monitor()
        ae.monitor.report_epoch()
        if not trainer.continue_learning(ae):
            break
    #print ds.train[0][0]
    #print ae.reconstruct(ds.train[0][0])

    w = ae.weights.get_value()
    #ae.hidbias.set_value(np.random.randn(1000).astype(config.floatX))
    hb = ae.hidbias.get_value()
    #ae.visbias.set_value(np.random.randn(100).astype(config.floatX))
    vb = ae.visbias.get_value()
    d = tensor.matrix()
    result = np.dot(1. / (1 + np.exp(-hb - np.dot(ds.train[0][0],  w))), w.T) + vb
Example #3
0
    def _create_trainer(self, dataset):
        sgd.log.setLevel(logging.WARNING)

        # Aggregate all the dropout parameters into shared dictionaries.
        probs, scales = {}, {}
        for l in [l for l in self.layers if l.dropout is not None]:
            incl = 1.0 - l.dropout
            probs[l.name] = incl
            scales[l.name] = 1.0 / incl

        if self.cost == "Dropout" or len(probs) > 0:
            # Use the globally specified dropout rate when there are no layer-specific ones.
            incl = 1.0 - self.dropout
            default_prob, default_scale = incl, 1.0 / incl

            # Pass all the parameters to pylearn2 as a custom cost function.
            self.cost = Dropout(default_input_include_prob=default_prob,
                                default_input_scale=default_scale,
                                input_include_probs=probs,
                                input_scales=scales)

        logging.getLogger('pylearn2.monitor').setLevel(logging.WARNING)
        if dataset is not None:
            termination_criterion = MonitorBased(channel_name='objective',
                                                 N=self.n_stable,
                                                 prop_decrease=self.f_stable)
        else:
            termination_criterion = None

        return sgd.SGD(cost=self.cost,
                       batch_size=self.batch_size,
                       learning_rule=self._learning_rule,
                       learning_rate=self.learning_rate,
                       termination_criterion=termination_criterion,
                       monitoring_dataset=dataset)
Example #4
0
def cnn_run_dropout_maxout(data_path, num_rows, num_cols, num_channels,
                           input_path, pred_path):
    t = time.time()
    sub_window = gen_center_sub_window(76, num_cols)
    trn = SarDataset(ds[0][0], ds[0][1], sub_window)
    vld = SarDataset(ds[1][0], ds[1][1], sub_window)
    tst = SarDataset(ds[2][0], ds[2][1], sub_window)
    print 'Take {}s to read data'.format(time.time() - t)
    t = time.time()
    batch_size = 100
    h1 = maxout.Maxout(layer_name='h2', num_units=1, num_pieces=100, irange=.1)
    hidden_layer = mlp.ConvRectifiedLinear(layer_name='h2',
                                           output_channels=8,
                                           irange=0.05,
                                           kernel_shape=[5, 5],
                                           pool_shape=[2, 2],
                                           pool_stride=[2, 2],
                                           max_kernel_norm=1.9365)
    hidden_layer2 = mlp.ConvRectifiedLinear(layer_name='h3',
                                            output_channels=8,
                                            irange=0.05,
                                            kernel_shape=[5, 5],
                                            pool_shape=[2, 2],
                                            pool_stride=[2, 2],
                                            max_kernel_norm=1.9365)
    #output_layer = mlp.Softplus(dim=1,layer_name='output',irange=0.1)
    output_layer = mlp.Linear(dim=1, layer_name='output', irange=0.05)
    trainer = sgd.SGD(learning_rate=0.001,
                      batch_size=100,
                      termination_criterion=EpochCounter(2000),
                      cost=dropout.Dropout(),
                      train_iteration_mode='even_shuffled_sequential',
                      monitor_iteration_mode='even_shuffled_sequential',
                      monitoring_dataset={
                          'test': tst,
                          'valid': vld,
                          'train': trn
                      })
    layers = [hidden_layer, hidden_layer2, output_layer]
    input_space = space.Conv2DSpace(shape=[num_rows, num_cols],
                                    num_channels=num_channels)

    ann = mlp.MLP(layers, input_space=input_space, batch_size=batch_size)
    watcher = best_params.MonitorBasedSaveBest(channel_name='valid_objective',
                                               save_path='sar_cnn_mlp.pkl')
    experiment = Train(dataset=trn,
                       model=ann,
                       algorithm=trainer,
                       extensions=[watcher])
    print 'Take {}s to compile code'.format(time.time() - t)
    t = time.time()
    experiment.main_loop()
    print 'Training time: {}s'.format(time.time() - t)
    serial.save('cnn_hhv_{0}_{1}.pkl'.format(num_rows, num_cols),
                ann,
                on_overwrite='backup')

    #read hh and hv into a 3D numpy
    image = read_hhv(input_path)
    return ann, sar_predict(ann, image, pred_path)
Example #5
0
    def __init__(self,
                 layers,
                 dropout=False,
                 input_scaler=None,
                 output_scaler=None,
                 learning_rate=0.01,
                 verbose=0):
        """

        :param layers: List of tuples of types of layers alongside the number of neurons
        :param learning_rate: The learning rate for all layers
        :param verbose: Verbosity level
        :return:
        """
        self.layers = layers
        self.ds = None
        self.f = None
        self.verbose = verbose
        cost = None
        if (dropout):
            cost = Dropout()
        self.trainer = sgd.SGD(learning_rate=learning_rate,
                               cost=cost,
                               batch_size=100)

        self.input_normaliser = input_scaler
        self.output_normaliser = output_scaler
Example #6
0
def get_ae_pretrainer(layer, data, batch_size):
    init_lr = 0.1
    dec_fac = 1.0001

    train_algo = sgd.SGD(
        batch_size=batch_size,
        learning_rate=init_lr,
        init_momentum=0.5,
        monitoring_batches=100 / batch_size,
        monitoring_dataset={'train': data},
        #cost = MeanSquaredReconstructionError(),
        cost=CAE_cost(),
        termination_criterion=EpochCounter(20),
        update_callbacks=sgd.ExponentialDecay(decay_factor=dec_fac,
                                              min_lr=0.02))
    return Train(model=layer, algorithm=train_algo, dataset=data, \
      extensions=[sgd.MomentumAdjustor(final_momentum=0.9, start=0, saturate=15), ])
Example #7
0
def get_finetuner(model,
                  cost,
                  trainset,
                  validset=None,
                  batch_size=100,
                  iters=100):
    train_algo = sgd.SGD(
        batch_size=batch_size,
        init_momentum=0.5,
        learning_rate=0.5,
        #monitoring_batches = 100/batch_size,
        #monitoring_dataset = {'train': trainset, 'valid': validset},
        cost=cost,
        termination_criterion=EpochCounter(iters),
        update_callbacks=sgd.ExponentialDecay(decay_factor=1.005, min_lr=0.05))
    return Train(model=model, algorithm=train_algo, dataset=trainset, save_freq=0, \
      extensions=[sgd.MomentumAdjustor(final_momentum=0.9, start=0, saturate=int(0.8*iters)), ])
Example #8
0
 def __init__(self, data):
     self.N = 5 * 5
     self.predictionLength = 2
     # create hidden layer with 2 nodes, init weights in range -0.1 to 0.1 and add
     # a bias with value 1
     hidden_layer = mlp.Sigmoid(layer_name='hidden',
                                dim=25,
                                irange=.1,
                                init_bias=1.)
     # create Linear output layer
     output_layer = mlp.Linear(1, 'output', irange=.1, init_bias=1.)
     # create Stochastic Gradient Descent trainer that runs for 400 epochs
     trainer = sgd.SGD(learning_rate=.005,
                       batch_size=10,
                       termination_criterion=EpochCounter(100))
     layers = [hidden_layer, output_layer]
     # create neural net that takes two inputs
     nn = mlp.MLP(layers, nvis=self.N)
     NeuralNetwork.__init__(self, data, nn, trainer)
Example #9
0
def runXOR():
    ds = XOR()
    hidden_layer = mlp.Sigmoid(layer_name='hidden', dim=10, irange=.1, init_bias=1.)
    output_layer = mlp.Linear(layer_name='output', dim=1, irange=.1, init_bias=1.)
    trainer = sgd.SGD(learning_rate=.05, batch_size=1, termination_criterion=EpochCounter(1000))
    layers = [hidden_layer, output_layer]
    # create neural net that takes two inputs
    ann = mlp.MLP(layers, nvis=4)
    trainer.setup(ann, ds)
    # train neural net until the termination criterion is true
    while True:
        trainer.train(dataset=ds)
        #ann.monitor.report_epoch()
        #ann.monitor()
        if not trainer.continue_learning(ann):
            break
    inputs= np.array([[0, 0, 0, 1]])
    print ann.fprop(theano.shared(inputs, name='inputs')).eval()
    inputs = np.array([[0, 1, 0, 1]])
    print ann.fprop(theano.shared(inputs, name='inputs')).eval()
    inputs = np.array([[1, 1, 1, 1]])
    print ann.fprop(theano.shared(inputs, name='inputs')).eval()
    inputs = np.array([[1, 1, 0, 0]])
    print ann.fprop(theano.shared(inputs, name='inputs')).eval()
Example #10
0
def main():
    base_name = sys.argv[
        1]  # 获取第一个参数   sys.argv[ ]记录(获取)命令行参数  sys(system)  argv(argument variable)参数变量,该变量为list列表
    n_epoch = int(sys.argv[2])  #获取第二个参数
    n_hidden = int(sys.argv[3])  #获取第三个参数作为隐层神经元个数
    include_rate = float(sys.argv[4])

    in_size = 1001  #输入层神经元个数(标记基因个数)
    out_size = 1  #输出层神经元个数
    b_size = 200  #偏差值
    l_rate = 5e-4  #学习速率
    l_rate_min = 1e-5  #学习速率最小值
    decay_factor = 0.9  #衰减因数
    lr_scale = 3.0
    momentum = 0.5
    init_vals = np.sqrt(6.0 / (np.array([in_size, n_hidden]) +
                               np.array([n_hidden, out_size])))  #初始值,返回平方根

    print 'loading data...'  #显示载入数据

    X_tr = np.load(
        'geno_X_tr_float64.npy')  # tr(traing)以numpy专用二进制类型保存训练数据集的数据
    Y_tr = np.load('pheno_Y_tr_0-4760_float64.npy')
    Y_tr_pheno = np.array(Y_tr)
    X_va = np.load(
        'geno_X_va_float64.npy')  #验证集(模型选择,在学习到不同复杂度的模型中,选择对验证集有最小预测误差的模型)
    Y_va = np.load('pheno_Y_va_0-4760_float64.npy')
    Y_va_target = np.array(Y_va)
    X_te = np.load('geno_te_float64.npy')  #测试集(对学习方法的评估)
    Y_te = np.load('pheno_Y_te_0-4760_float64.npy')
    Y_te_target = np.array(Y_te)

    random.seed(0)  #设置生成随机数用的整数起始值。调用任何其他random模块函数之前调用这个函数
    monitor_idx_tr = random.sample(range(88807), 5000)  #监测训练
    #将训练数据集类型设为32位浮点型,The DenseDesignMatrix class and related code Functionality for representing data that can be described as a dense matrix (rather than a sparse matrix) with each row containing an example and each column corresponding to a different feature.
    data_tr = p2_dt_dd.DenseDesignMatrix(X=X_tr.astype('float32'),
                                         y=Y_tr.astype('float32'))
    X_tr_monitor, Y_tr_monitor_target = X_tr[monitor_idx_tr, :], Y_tr_target[
        monitor_idx_tr, :]
    #一个隐层,用Tanh()作激活函数; 输出层用线性函数作激活函数
    h1_layer = p2_md_mlp.Tanh(layer_name='h1',
                              dim=n_hidden,
                              irange=init_vals[0],
                              W_lr_scale=1.0,
                              b_lr_scale=1.0)
    o_layer = p2_md_mlp.Linear(layer_name='y',
                               dim=out_size,
                               irange=0.0001,
                               W_lr_scale=lr_scale,
                               b_lr_scale=1.0)
    #Multilayer Perceptron;nvis(Number of “visible units” input units)  layers(a list of layer objects,最后1层指定MLP的输出空间)
    model = p2_md_mlp.MLP(nvis=in_size, layers=[h1_layer, o_layer], seed=1)
    dropout_cost = p2_ct_mlp_dropout.Dropout(input_include_probs={
        'h1': 1.0,
        'y': include_rate
    },
                                             input_scales={
                                                 'h1':
                                                 1.0,
                                                 'y':
                                                 np.float32(1.0 / include_rate)
                                             })
    #随机梯度下降法
    algorithm = p2_alg_sgd.SGD(
        batch_size=b_size,
        learning_rate=l_rate,
        learning_rule=p2_alg_lr.Momentum(momentum),
        termination_criterion=p2_termcri.EpochCounter(max_epochs=1000),
        cost=dropout_cost)
    #训练 根据前面的定义 :dataset为一个密集型矩阵,model为MLP多层神经网络,algorithm为SGD
    train = pylearn2.train.Train(dataset=data_tr,
                                 model=model,
                                 algorithm=algorithm)
    train.setup()

    x = T.matrix()  #定义为一个二维数组
    #fprop(state_below) does the forward prop transformation
    y = model.fprop(x)
    f = theano.function([x], y)  #定义一个function函数,输入为x,输出为y

    MAE_va_old = 10.0  #平均绝对误差
    MAE_va_best = 10.0
    MAE_tr_old = 10.0  #训练误差
    MAE_te_old = 10.0
    MAE_1000G_old = 10.0
    MAE_1000G_best = 10.0
    MAE_GTEx_old = 10.0
    #base_name = sys.argv[1]      # 获取第一个参数   sys.argv[ ]记录(获取)命令行参数
    outlog = open(base_name + '.log', 'w')
    log_str = '\t'.join(
        map(str, [
            'epoch', 'MAE_va', 'MAE_va_change', 'MAE_te', 'MAE_te_change',
            'MAE_tr', 'MAE_tr_change', 'learing_rate', 'time(sec)'
        ]))
    print log_str  #输出运行日志
    outlog.write(log_str + '\n')
    #Python的标准输出缓冲(这意味着它收集“写入”标准出来之前,将其写入到终端的数据)。调用sys.stdout.flush()强制其“缓冲
    sys.stdout.flush()

    for epoch in range(0, n_epoch):
        t_old = time.time()
        train.algorithm.train(train.dataset)

        Y_va_hat = f(X_va.astype('float32')).astype('float64')
        Y_te_hat = f(X_te.astype('float32')).astype('float64')
        Y_tr_hat_monitor = f(X_tr_monitor.astype('float32')).astype('float64')

        #计算平均绝对误差
        MAE_va = np.abs(Y_va_target - Y_va_hat).mean()
        MAE_te = np.abs(Y_te_target - Y_te_hat).mean()
        MAE_tr = np.abs(Y_tr_monitor_target - Y_tr_hat_monitor).mean()

        #误差变换率
        MAE_va_change = (MAE_va - MAE_va_old) / MAE_va_old
        MAE_te_change = (MAE_te - MAE_te_old) / MAE_te_old
        MAE_tr_change = (MAE_tr - MAE_tr_old) / MAE_tr_old

        #将old误差值更新为当前误差值
        MAE_va_old = MAE_va
        MAE_te_old = MAE_te
        MAE_tr_old = MAE_tr

        #返回当前的时间戳(1970纪元后经过的浮点秒数)
        t_new = time.time()
        l_rate = train.algorithm.learning_rate.get_value()
        log_str = '\t'.join(
            map(str, [
                epoch + 1,
                '%.6f' % MAE_va,
                '%.6f' % MAE_va_change,
                '%.6f' % MAE_te,
                '%.6f' % MAE_te_change,
                '%.6f' % MAE_tr,
                '%.6f' % MAE_tr_change,
                '%.5f' % l_rate,
                int(t_new - t_old)
            ]))
        print log_str
        outlog.write(log_str + '\n')
        sys.stdout.flush()

        if MAE_tr_change > 0:  #训练误差变换率大于0时,学习速率乘上一个衰减因子
            l_rate = l_rate * decay_factor
        if l_rate < l_rate_min:  #学习速率小于最小速率时,更新为最小速率
            l_rate = l_rate_min

        train.algorithm.learning_rate.set_value(np.float32(l_rate))

        if MAE_va < MAE_va_best:
            MAE_va_best = MAE_va
            outmodel = open(base_name + '_bestva_model.pkl', 'wb')
            pkl.dump(model, outmodel)
            outmodel.close()
            np.save(base_name + '_bestva_Y_te_hat.npy', Y_te_hat)
            np.save(base_name + '_bestva_Y_va_hat.npy', Y_va_hat)

    print 'MAE_va_best : %.6f' % (MAE_va_best)
    outlog.write('MAE_va_best : %.6f' % (MAE_va_best) + '\n')
    outlog.close()
Example #11
0
def main():
    base_name = sys.argv[1]
    n_epoch = int(sys.argv[2])
    n_hidden = int(sys.argv[3])
    include_rate = float(sys.argv[4])

    in_size = 943
    out_size = 4760
    b_size = 200
    l_rate = 3e-4
    l_rate_min = 1e-5
    decay_factor = 0.9
    lr_scale = 3.0
    momentum = 0.5
    init_vals = np.sqrt(6.0/(np.array([in_size, n_hidden, n_hidden, n_hidden])+np.array([n_hidden, n_hidden, n_hidden, out_size])))
    
    print 'loading data...'
    
    X_tr = np.load('bgedv2_X_tr_float64.npy')
    Y_tr = np.load('bgedv2_Y_tr_4760-9520_float64.npy')
    Y_tr_target = np.array(Y_tr)
    X_va = np.load('bgedv2_X_va_float64.npy')
    Y_va = np.load('bgedv2_Y_va_4760-9520_float64.npy')
    Y_va_target = np.array(Y_va)
    X_te = np.load('bgedv2_X_te_float64.npy')
    Y_te = np.load('bgedv2_Y_te_4760-9520_float64.npy')
    Y_te_target = np.array(Y_te)

    X_1000G = np.load('1000G_X_float64.npy')
    Y_1000G = np.load('1000G_Y_4760-9520_float64.npy')
    Y_1000G_target = np.array(Y_1000G)
    X_GTEx = np.load('GTEx_X_float64.npy')
    Y_GTEx = np.load('GTEx_Y_4760-9520_float64.npy')
    Y_GTEx_target = np.array(Y_GTEx)

    
    random.seed(0)
    monitor_idx_tr = random.sample(range(88807), 5000)
    
    data_tr = p2_dt_dd.DenseDesignMatrix(X=X_tr.astype('float32'), y=Y_tr.astype('float32'))
    X_tr_monitor, Y_tr_monitor_target = X_tr[monitor_idx_tr, :], Y_tr_target[monitor_idx_tr, :]
    h1_layer = p2_md_mlp.Tanh(layer_name='h1', dim=n_hidden, irange=init_vals[0], W_lr_scale=1.0, b_lr_scale=1.0)
    h2_layer = p2_md_mlp.Tanh(layer_name='h2', dim=n_hidden, irange=init_vals[1], W_lr_scale=lr_scale, b_lr_scale=1.0)
    h3_layer = p2_md_mlp.Tanh(layer_name='h3', dim=n_hidden, irange=init_vals[2], W_lr_scale=lr_scale, b_lr_scale=1.0)
    o_layer = p2_md_mlp.Linear(layer_name='y', dim=out_size, irange=0.0001, W_lr_scale=lr_scale, b_lr_scale=1.0)
    model = p2_md_mlp.MLP(nvis=in_size, layers=[h1_layer, h2_layer, h3_layer, o_layer], seed=1)
    dropout_cost = p2_ct_mlp_dropout.Dropout(input_include_probs={'h1':1.0, 'h2':include_rate, 'h3':include_rate,
                                                                   'y':include_rate}, 
                                             input_scales={'h1':1.0, 'h2':np.float32(1.0/include_rate),
                                                           'h3':np.float32(1.0/include_rate),
                                                           'y':np.float32(1.0/include_rate)})
    
    algorithm = p2_alg_sgd.SGD(batch_size=b_size, learning_rate=l_rate, 
                               learning_rule = p2_alg_lr.Momentum(momentum),
                               termination_criterion=p2_termcri.EpochCounter(max_epochs=1000),
                               cost=dropout_cost)

    train = pylearn2.train.Train(dataset=data_tr, model=model, algorithm=algorithm)
    train.setup()

    x = T.matrix()
    y = model.fprop(x)
    f = theano.function([x], y)

    MAE_va_old = 10.0
    MAE_va_best = 10.0
    MAE_tr_old = 10.0
    MAE_te_old = 10.0
    MAE_1000G_old = 10.0
    MAE_1000G_best = 10.0
    MAE_GTEx_old = 10.0

    outlog = open(base_name + '.log', 'w')
    log_str = '\t'.join(map(str, ['epoch', 'MAE_va', 'MAE_va_change', 'MAE_te', 'MAE_te_change', 
                              'MAE_1000G', 'MAE_1000G_change', 'MAE_GTEx', 'MAE_GTEx_change',
                              'MAE_tr', 'MAE_tr_change', 'learing_rate', 'time(sec)']))
    print log_str
    outlog.write(log_str + '\n')
    sys.stdout.flush()

    for epoch in range(0, n_epoch):
        t_old = time.time()
        train.algorithm.train(train.dataset)
        
        Y_va_hat = f(X_va.astype('float32')).astype('float64')
        Y_te_hat = f(X_te.astype('float32')).astype('float64')
        Y_tr_hat_monitor = f(X_tr_monitor.astype('float32')).astype('float64')
        Y_1000G_hat = f(X_1000G.astype('float32')).astype('float64')
        Y_GTEx_hat = f(X_GTEx.astype('float32')).astype('float64')

        MAE_va = np.abs(Y_va_target - Y_va_hat).mean()
        MAE_te = np.abs(Y_te_target - Y_te_hat).mean()
        MAE_tr = np.abs(Y_tr_monitor_target - Y_tr_hat_monitor).mean()
        MAE_1000G = np.abs(Y_1000G_target - Y_1000G_hat).mean()
        MAE_GTEx = np.abs(Y_GTEx_target - Y_GTEx_hat).mean()
        
        MAE_va_change = (MAE_va - MAE_va_old)/MAE_va_old
        MAE_te_change = (MAE_te - MAE_te_old)/MAE_te_old
        MAE_tr_change = (MAE_tr - MAE_tr_old)/MAE_tr_old
        MAE_1000G_change = (MAE_1000G - MAE_1000G_old)/MAE_1000G_old
        MAE_GTEx_change = (MAE_GTEx - MAE_GTEx_old)/MAE_GTEx_old

        
        MAE_va_old = MAE_va
        MAE_te_old = MAE_te
        MAE_tr_old = MAE_tr
        MAE_1000G_old = MAE_1000G
        MAE_GTEx_old = MAE_GTEx

        
        t_new = time.time()
        l_rate = train.algorithm.learning_rate.get_value()
        log_str = '\t'.join(map(str, [epoch+1, '%.6f'%MAE_va, '%.6f'%MAE_va_change, '%.6f'%MAE_te, '%.6f'%MAE_te_change,
                                  '%.6f'%MAE_1000G, '%.6f'%MAE_1000G_change, '%.6f'%MAE_GTEx, '%.6f'%MAE_GTEx_change,
                                  '%.6f'%MAE_tr, '%.6f'%MAE_tr_change, '%.5f'%l_rate, int(t_new-t_old)]))
        print log_str
        outlog.write(log_str + '\n')
        sys.stdout.flush()
        
        if MAE_tr_change > 0:
            l_rate = l_rate*decay_factor
        if l_rate < l_rate_min:
            l_rate = l_rate_min

        train.algorithm.learning_rate.set_value(np.float32(l_rate))

        if MAE_va < MAE_va_best:
            MAE_va_best = MAE_va
            outmodel = open(base_name + '_bestva_model.pkl', 'wb')
            pkl.dump(model, outmodel)
            outmodel.close()    
            np.save(base_name + '_bestva_Y_te_hat.npy', Y_te_hat)
            np.save(base_name + '_bestva_Y_va_hat.npy', Y_va_hat)
        
        if MAE_1000G < MAE_1000G_best:
            MAE_1000G_best = MAE_1000G
            outmodel = open(base_name + '_best1000G_model.pkl', 'wb')
            pkl.dump(model, outmodel)
            outmodel.close()    
            np.save(base_name + '_best1000G_Y_1000G_hat.npy', Y_1000G_hat)
            np.save(base_name + '_best1000G_Y_GTEx_hat.npy', Y_GTEx_hat)

    print 'MAE_va_best : %.6f' % (MAE_va_best)
    print 'MAE_1000G_best : %.6f' % (MAE_1000G_best)
    outlog.write('MAE_va_best : %.6f' % (MAE_va_best) + '\n')
    outlog.write('MAE_1000G_best : %.6f' % (MAE_1000G_best) + '\n')
    outlog.close()
Example #12
0
def main():
    training_data, validation_data, test_data, std_scale = load_training_data()
    kaggle_test_features = load_test_data(std_scale)

    ###############
    # pylearn2 ML
    hl1 = mlp.Sigmoid(layer_name='hl1', dim=200, irange=.1, init_bias=1.)
    hl2 = mlp.Sigmoid(layer_name='hl2', dim=100, irange=.1, init_bias=1.)

    # create Softmax output layer
    output_layer = mlp.Softmax(9, 'output', irange=.1)
    # create Stochastic Gradient Descent trainer that runs for 400 epochs
    trainer = sgd.SGD(learning_rate=.05,
                      batch_size=300,
                      learning_rule=learning_rule.Momentum(.5),
                      termination_criterion=MonitorBased(
                          channel_name='valid_objective',
                          prop_decrease=0.,
                          N=10),
                      monitoring_dataset={
                          'valid': validation_data,
                          'train': training_data
                      })

    layers = [hl1, hl2, output_layer]
    # create neural net
    model = mlp.MLP(layers, nvis=93)

    watcher = best_params.MonitorBasedSaveBest(
        channel_name='valid_objective',
        save_path='pylearn2_results/pylearn2_test.pkl')

    velocity = learning_rule.MomentumAdjustor(final_momentum=.6,
                                              start=1,
                                              saturate=250)
    decay = sgd.LinearDecayOverEpoch(start=1, saturate=250, decay_factor=.01)
    ######################

    experiment = Train(dataset=training_data,
                       model=model,
                       algorithm=trainer,
                       extensions=[watcher, velocity, decay])

    experiment.main_loop()

    #load best model and test
    ################
    model = serial.load('pylearn2_results/pylearn2_test.pkl')
    # get an prediction of the accuracy from the test_data
    test_results = model.fprop(theano.shared(test_data[0],
                                             name='test_data')).eval()

    print test_results.shape
    loss = multiclass_log_loss(test_data[1], test_results)

    print 'Test multiclass log loss:', loss

    out_file = 'pylearn2_results/' + str(loss) + 'ann'
    #exp.save(out_file + '.pkl')

    #save the kaggle results

    results = model.fprop(
        theano.shared(kaggle_test_features, name='kaggle_test_data')).eval()
    save_results(out_file + '.csv', kaggle_test_features, results)
Example #13
0
#output = mlp.HingeLoss(layer_name='y',n_classes=2,irange=.05)

#layers = [l5, l6, output]
layers = [l1, l2, l3, l4, l5, output]

ann = mlp.MLP(layers, nvis=X[0].reshape(-1).shape[0])

lr = 0.1
epochs = 400
trainer = sgd.SGD(
    learning_rate=lr,
    batch_size=100,
    learning_rule=learning_rule.Momentum(.05),
    # Remember, default dropout is .5
    #cost=Dropout(input_include_probs={'l1': .5},
    #             input_scales={'l1': 1.}),
    termination_criterion=EpochCounter(epochs),
    monitoring_dataset={
        'train': ds,
        'valid': ds_test
    })

watcher = best_params.MonitorBasedSaveBest(channel_name='valid_roc_auc',
                                           save_path='saved_clf.pkl')

velocity = learning_rule.MomentumAdjustor(final_momentum=.9,
                                          start=1,
                                          saturate=250)

decay = sgd.LinearDecayOverEpoch(start=1, saturate=250, decay_factor=lr * .05)
rocauc = roc_auc.RocAucChannel()
Example #14
0
    images_train = images[train_index]
    y_train = y[train_index]
    images_train, y_train = shuffle(images_train, y_train, random_state=7)
    X_train = DenseDesignMatrix(X=images_train, y=y_train,view_converter=view_converter)
    
    images_test = images[test_index]
    y_test = y[test_index]
    X_test = DenseDesignMatrix(X=images_test, y=y_test,view_converter=view_converter)
            
    if retrain:
        print "training on", X_train.X.shape, 'testing on', X_test.X.shape
        trainer = sgd.SGD(learning_rate=learn_rate, batch_size=batch_size,
                          learning_rule=learning_rule.Momentum(momentum_start),
                          cost=Dropout(
                                       input_include_probs={'l1':1., 'l2':1., 'l3':1., 'l4':1., 'l5':1., 'l6':1.},
                                       input_scales={'l1':1., 'l2':1., 'l3':1., 'l4':1., 'l5':1., 'l6':1.}
                                       ),
                          termination_criterion=EpochCounter(max_epochs=max_epochs),
                          monitoring_dataset={'train':X_train, 'valid':X_test},
                          )
        
        
        input_space = Conv2DSpace(shape=(central_window_shape, central_window_shape),
                    axes = axes,
                    num_channels = 1)
                    
        ann = mlp.MLP(layers, input_space=input_space)

        velocity = learning_rule.MomentumAdjustor(final_momentum=momentum_end,
                                          start=1,
                                          saturate=momentum_saturate)
Example #15
0
layerh3 = mlp.ConvRectifiedLinear(layer_name='h3',
                                  output_channels=64,
                                  irange=.05,
                                  kernel_shape=[5, 5],
                                  pool_shape=[4, 4],
                                  pool_stride=[2, 2],
                                  max_kernel_norm=1.9365)
''' Note: changed the number of classes '''
layery = mlp.Softmax(max_col_norm=1.9365,
                     layer_name='y',
                     n_classes=121,
                     istdev=.05)
print 'Setting up trainers'
trainer = sgd.SGD(learning_rate=0.5,
                  batch_size=50,
                  termination_criterion=EpochCounter(200),
                  learning_rule=Momentum(init_momentum=0.5))
layers = [layerh2, layerh3, layery]
ann = mlp.MLP(layers, input_space=Conv2DSpace(shape=[28, 28], num_channels=1))
trainer.setup(ann, ds)
print 'Start Training'
while True:
    trainer.train(dataset=ds)
    ann.monitor.report_epoch()
    ann.monitor()
    if not trainer.continue_learning(ann):
        break

# 3. Predict
XReport, Y_info = plankton.loadReportData()
probMatrix = ann.fprop(theano.shared(XReport, name='XReport')).eval()
                      irange=ir,
                      dim=dim,
                      max_col_norm=1.)
 l3 = RectifiedLinear(layer_name='l3',
                      irange=ir,
                      dim=dim,
                      max_col_norm=1.)
 output = Softmax(layer_name='y',
                  n_classes=9,
                  irange=ir,
                  max_col_norm=mcn_out)
 mdl = MLP([l1, l2, l3, output], nvis=X2.shape[1])
 trainer = sgd.SGD(learning_rate=lr,
                   batch_size=bs,
                   learning_rule=learning_rule.Momentum(mm),
                   cost=Dropout(default_input_include_prob=ip,
                                default_input_scale=1 / ip),
                   termination_criterion=EpochCounter(epochs),
                   seed=seed)
 decay = sgd.LinearDecayOverEpoch(start=2, saturate=20, decay_factor=.1)
 experiment = Train(dataset=training,
                    model=mdl,
                    algorithm=trainer,
                    extensions=[decay])
 experiment.main_loop()
 epochs_current = epochs
 for s in range(n_add):
     trainer = sgd.SGD(learning_rate=lr * .1,
                       batch_size=bs,
                       learning_rule=learning_rule.Momentum(mm),
                       cost=Dropout(default_input_include_prob=ip,
Example #17
0
from csv_data import CSVData
import numpy as np


class MLPData(DenseDesignMatrix):
    def __init__(self, X, y):
        super(MLPData, self).__init__(X=X, y=y.astype(int), y_labels=2)


threshold = 0.95
hidden_layer = mlp.Sigmoid(layer_name='h0', dim=10, sparse_init=10)
output_layer = mlp.Softmax(layer_name='y', n_classes=2, irange=0.05)
layers = [hidden_layer, output_layer]
neural_net = mlp.MLP(layers, nvis=10)
trainer = sgd.SGD(batch_size=5,
                  learning_rate=.1,
                  termination_criterion=EpochCounter(100))

first = True
learning = True
correct = 0
incorrect = 0
total = 0
data = CSVData("results2.csv")
while True:
    X, y = data.get_data()
    if (X == None):
        break

    if learning:
        ds = MLPData(X, np.array([[0]]))
Example #18
0
def cnn_train(
    train_path,
    test_path,
    valid_path,
    save_path,
    predict_path,
    image_path,
    num_rows=28,
    num_cols=28,
    num_channels=2,
    batch_size=128,
    output_channels=[64, 64],
    kernel_shape=[[12, 12], [5, 5]],
    pool_shape=[[4, 4], [2, 2]],
    pool_stride=[[2, 2], [2, 2]],
    irange=[0.05, 0.05, 0.05],
    max_kernel_norm=[1.9365, 1.9365],
    learning_rate=0.001,
    init_momentum=0.9,
    weight_decay=[0.0002, 0.0002, 0.0002],
    n_epoch=1000,
):
    #load data
    #t = time.time()
    ds = load_data(valid_path, num_rows, num_cols, num_channels)
    vld = SarDataset(np.array(ds[0]), ds[1])
    ds = load_data(train_path, num_rows, num_cols, num_channels)
    trn = SarDataset(np.array(ds[0]), ds[1])
    ds = load_data(test_path, num_rows, num_cols, num_channels)
    tst = SarDataset(np.array(ds[0]), ds[1])
    #load balanced data
    #ds = load_data_balance_under_sample(train_path, num_rows,num_cols, num_channels)
    #trn = SarDataset(np.array(ds[0]),ds[1])
    #ds = load_data_balance(valid_path, num_rows,num_cols, num_channels)
    #vld = SarDataset(np.array(ds[0]),ds[1])
    #ds = load_data_balance(test_path, num_rows,num_cols, num_channels)
    #tst = SarDataset(np.array(ds[0]),ds[1])
    #print 'Take {}s to read data'.format( time.time()-t)
    #use gaussian convlution on the origional image to see if it can concentrate in the center
    #trn,tst,vld = load_data_lidar()

    #mytransformer = transformer.TransformationPipeline(input_space=space.Conv2DSpace(shape=[num_rows,num_cols],num_channels=num_channels),transformations=[transformer.Rotation(),transformer.Flipping()])
    #trn = contestTransformerDataset.TransformerDataset(trn,mytransformer,space_preserving=True)
    #tst = contestTransformerDataset.TransformerDataset(tst,mytransformer,space_preserving=True)
    #vld = contestTransformerDataset.TransformerDataset(vld,mytransformer,space_preserving=True)

    #trn = transformer_dataset.TransformerDataset(trn,mytransformer,space_preserving=True)
    #tst = transformer_dataset.TransformerDataset(tst,mytransformer,space_preserving=True)
    #vld = transformer_dataset.TransformerDataset(vld,mytransformer,space_preserving=True)

    #setup the network
    t = time.time()
    layers = []
    for i in range(len(output_channels)):
        layer_name = 'h{}'.format(i + 1)
        convlayer = mlp.ConvRectifiedLinear(layer_name=layer_name,
                                            output_channels=output_channels[i],
                                            irange=irange[i],
                                            kernel_shape=kernel_shape[i],
                                            pool_shape=pool_shape[i],
                                            pool_stride=pool_stride[i],
                                            max_kernel_norm=max_kernel_norm[i])
        layers.append(convlayer)

    output_mlp = mlp.Linear(dim=1,
                            layer_name='output',
                            irange=irange[-1],
                            use_abs_loss=True)
    #output_mlp = mlp.linear_mlp_ace(dim=1,layer_name='output',irange=irange[-1])
    layers.append(output_mlp)

    #ann = cPickle.load(open('../output/train_with_2010_2l_40_64/original_500/f/f0.pkl'))
    #layers = []
    #for layer in ann.layers:
    #    layer.set_mlp_force(None)
    #    layers.append(layer)

    trainer = sgd.SGD(
        learning_rate=learning_rate,
        batch_size=batch_size,
        termination_criterion=EpochCounter(n_epoch),
        #termination_criterion = termination_criteria.And([termination_criteria.MonitorBased(channel_name = 'train_objective', prop_decrease=0.01,N=10),EpochCounter(n_epoch)]),
        #cost = dropout.Dropout(),
        cost=cost.SumOfCosts(
            [cost.MethodCost('cost_from_X'),
             WeightDecay(weight_decay)]),
        init_momentum=init_momentum,
        train_iteration_mode='even_shuffled_sequential',
        monitor_iteration_mode='even_shuffled_sequential',
        monitoring_dataset={
            'test': tst,
            'valid': vld,
            'train': trn
        })

    input_space = space.Conv2DSpace(shape=[num_rows, num_cols],
                                    num_channels=num_channels)
    #ann = mlp.MLP(layers,input_space=input_space,batch_size=batch_size)
    ann = serial.load(
        '../output/train_with_2010_2l_40_64/original_500/f/f0.pkl')
    ann = monitor.push_monitor(ann, 'stage_0')
    watcher = best_params.MonitorBasedSaveBest(channel_name='valid_objective',
                                               save_path=predict_path +
                                               save_path)
    flip = window_flip.WindowAndFlip((num_rows, num_cols),
                                     randomize=[tst, vld, trn])
    experiment = Train(dataset=trn,
                       model=ann,
                       algorithm=trainer,
                       extensions=[watcher, flip])
    print 'Take {}s to compile code'.format(time.time() - t)

    #train the network
    t = time.time()
    experiment.main_loop()
    print 'Training time: {}h'.format((time.time() - t) / 3600)
    utils.sms_notice('Training time:{}'.format((time.time() - t) / 3600))

    return ann
Example #19
0
def cnn_train_tranformer(train_path,
                         test_path,
                         valid_path,
                         save_path,
                         predict_path,
                         num_rows=28,
                         num_cols=28,
                         num_channels=2,
                         batch_size=128,
                         output_channels=[64, 64],
                         kernel_shape=[[12, 12], [5, 5]],
                         pool_shape=[[4, 4], [2, 2]],
                         pool_stride=[[2, 2], [2, 2]],
                         irange=[0.05, 0.05, 0.05],
                         max_kernel_norm=[1.9365, 1.9365],
                         learning_rate=0.001,
                         init_momentum=0.9,
                         weight_decay=[0.0002, 0.0002, 0.0002],
                         n_epoch=1000,
                         image_path=''):

    ds = load_data_transformed(train_path, num_cols, batch_size)
    ds = (np.transpose(ds[0], axes=[0, 3, 1, 2]), ds[1])
    trn = SarDataset(np.array(ds[0]), ds[1])
    ds = load_data_transformed(valid_path, num_cols, batch_size)
    ds = (np.transpose(ds[0], axes=[0, 3, 1, 2]), ds[1])
    vld = SarDataset(np.array(ds[0]), ds[1])
    ds = load_data_transformed(test_path, num_cols, batch_size)
    ds = (np.transpose(ds[0], axes=[0, 3, 1, 2]), ds[1])
    tst = SarDataset(np.array(ds[0]), ds[1])
    #setup the network
    #X = np.random.random([400000,2,41,41])
    #y = np.random.random([400000,1])
    #trn = SarDataset(X,y)
    #X = np.random.random([60000,2,41,41])
    #y = np.random.random([60000,1])
    #tst = SarDataset(X,y)
    #X = np.random.random([60000,2,41,41])
    #y = np.random.random([60000,1])
    #vld = SarDataset(X,y)
    t = time.time()
    layers = []
    for i in range(len(output_channels)):
        layer_name = 'h{}'.format(i + 1)
        convlayer = mlp.ConvRectifiedLinear(layer_name=layer_name,
                                            output_channels=output_channels[i],
                                            irange=irange[i],
                                            kernel_shape=kernel_shape[i],
                                            pool_shape=pool_shape[i],
                                            pool_stride=pool_stride[i],
                                            max_kernel_norm=max_kernel_norm[i])
        layers.append(convlayer)

    output_mlp = mlp.Linear(dim=1, layer_name='output', irange=irange[-1])
    #output_mlp = mlp.linear_mlp_bayesian_cost(dim=1,layer_name='output',irange=irange[-1])
    layers.append(output_mlp)

    trainer = sgd.SGD(
        learning_rate=learning_rate,
        batch_size=batch_size,
        termination_criterion=EpochCounter(n_epoch),
        #termination_criterion = termination_criteria.And([termination_criteria.MonitorBased(channel_name = 'train_objective', prop_decrease=0.01,N=10),EpochCounter(n_epoch)]),
        #cost = dropout.Dropout(),
        cost=cost.SumOfCosts(
            [cost.MethodCost('cost_from_X'),
             WeightDecay(weight_decay)]),
        init_momentum=init_momentum,
        train_iteration_mode='even_shuffled_sequential',
        monitor_iteration_mode='even_shuffled_sequential',
        monitoring_dataset={
            'test': tst,
            'valid': vld,
            'train': trn
        })

    input_space = space.Conv2DSpace(shape=[num_rows, num_cols],
                                    num_channels=num_channels)
    #ann = mlp.MLP(layers,input_space=input_space,batch_size=batch_size)
    watcher = best_params.MonitorBasedSaveBest(channel_name='valid_objective',
                                               save_path=predict_path +
                                               save_path)
    #flip = window_flip.WindowAndFlip((num_rows,num_cols),randomize=[tst,vld,trn])
    experiment = Train(dataset=trn,
                       model=ann,
                       algorithm=trainer,
                       extensions=[watcher])
    print 'Take {}s to compile code'.format(time.time() - t)

    #train the network
    t = time.time()
    experiment.main_loop()
    print 'Training time: {}h'.format((time.time() - t) / 3600)
    utils.sms_notice('Training time:{}'.format((time.time() - t) / 3600))

    return ann
Example #20
0
                      irange=ir,
                      dim=dim,
                      max_col_norm=1.)
 l3 = RectifiedLinear(layer_name='l3',
                      irange=ir,
                      dim=dim,
                      max_col_norm=1.)
 output = Softmax(layer_name='y',
                  n_classes=9,
                  irange=ir,
                  max_col_norm=mcn_out)
 mdl = MLP([l1, l2, l3, output], nvis=X2.shape[1])
 trainer = sgd.SGD(learning_rate=lr,
                   batch_size=bs,
                   learning_rule=learning_rule.Momentum(mm),
                   cost=Dropout(default_input_include_prob=ip,
                                default_input_scale=1 / ip),
                   termination_criterion=EpochCounter(epochs),
                   seed=seed)
 decay = sgd.LinearDecayOverEpoch(start=2, saturate=20, decay_factor=.1)
 #fname = path + 'model/TRI_' + 'kmax_'+ str(k_max) + '_seed_' + str(seed) + '.pkl'
 experiment = Train(dataset=training,
                    model=mdl,
                    algorithm=trainer,
                    extensions=[decay])
 #                   save_path = fname, save_freq = epochs)
 experiment.main_loop()
 pred_train = predict(mdl, X2[:num_train].astype(np.float32))
 pred_test = predict(mdl, X2[num_train:].astype(np.float32))
 predAll_train += pred_train
 predAll_test += pred_test
Example #21
0
def train(d):
    print 'Creating dataset'
    # load mnist here
    # X = d.train_X
    # y = d.train_Y
    # test_X = d.test_X
    # test_Y = d.test_Y
    # nb_classes = len(np.unique(y))
    # train_y = convert_one_hot(y)
    # train_set = DenseDesignMatrix(X=X, y=y)
    train = DenseDesignMatrix(X=d.train_X, y=convert_one_hot(d.train_Y))
    valid = DenseDesignMatrix(X=d.valid_X, y=convert_one_hot(d.valid_Y))
    test = DenseDesignMatrix(X=d.test_X, y=convert_one_hot(d.test_Y))

    print 'Setting up'
    batch_size = 1000
    conv = mlp.ConvRectifiedLinear(
        layer_name='c0',
        output_channels=20,
        irange=.05,
        kernel_shape=[5, 5],
        pool_shape=[4, 4],
        pool_stride=[2, 2],
        # W_lr_scale=0.25,
        max_kernel_norm=1.9365)
    mout = MaxoutConvC01B(layer_name='m0',
                          num_pieces=4,
                          num_channels=96,
                          irange=.05,
                          kernel_shape=[5, 5],
                          pool_shape=[4, 4],
                          pool_stride=[2, 2],
                          W_lr_scale=0.25,
                          max_kernel_norm=1.9365)
    mout2 = MaxoutConvC01B(layer_name='m1',
                           num_pieces=4,
                           num_channels=96,
                           irange=.05,
                           kernel_shape=[5, 5],
                           pool_shape=[4, 4],
                           pool_stride=[2, 2],
                           W_lr_scale=0.25,
                           max_kernel_norm=1.9365)
    sigmoid = mlp.Sigmoid(
        layer_name='Sigmoid',
        dim=500,
        sparse_init=15,
    )
    smax = mlp.Softmax(layer_name='y', n_classes=10, irange=0.)
    in_space = Conv2DSpace(shape=[28, 28],
                           num_channels=1,
                           axes=['c', 0, 1, 'b'])
    net = mlp.MLP(
        layers=[mout, mout2, smax],
        input_space=in_space,
        # nvis=784,
    )
    trainer = bgd.BGD(batch_size=batch_size,
                      line_search_mode='exhaustive',
                      conjugate=1,
                      updates_per_batch=10,
                      monitoring_dataset={
                          'train': train,
                          'valid': valid,
                          'test': test
                      },
                      termination_criterion=termination_criteria.MonitorBased(
                          channel_name='valid_y_misclass'))
    trainer = sgd.SGD(learning_rate=0.15,
                      cost=dropout.Dropout(),
                      batch_size=batch_size,
                      monitoring_dataset={
                          'train': train,
                          'valid': valid,
                          'test': test
                      },
                      termination_criterion=termination_criteria.MonitorBased(
                          channel_name='valid_y_misclass'))
    trainer.setup(net, train)
    epoch = 0
    while True:
        print 'Training...', epoch
        trainer.train(dataset=train)
        net.monitor()
        epoch += 1
Example #22
0
def supervisedLayerwisePRL(trainset, testset):
    '''
	The supervised layerwise training as used in the PRL Paper.
	
	Input
	------
	trainset : A path to an hdf5 file created through h5py.
	testset  : A path to an hdf5 file created through h5py.
	'''
    batch_size = 100

    # Both train and test h5py files are expected to have a 'topo_view' and 'y'
    # datasets side them corresponding to the 'b01c' data format as used in pylearn2
    # and 'y' equivalent to the one hot encoded labels
    trn = HDF5Dataset(filename=trainset,
                      topo_view='topo_view',
                      y='y',
                      load_all=False)
    tst = HDF5Dataset(filename=testset,
                      topo_view='topo_view',
                      y='y',
                      load_all=False)
    '''
	The 1st Convolution and Pooling Layers are added below.
	'''
    h1 = mlp.ConvRectifiedLinear(layer_name='h1',
                                 output_channels=64,
                                 irange=0.05,
                                 kernel_shape=[4, 4],
                                 pool_shape=[4, 4],
                                 pool_stride=[2, 2],
                                 max_kernel_norm=1.9365)

    fc = mlp.RectifiedLinear(layer_name='fc', dim=1500, irange=0.05)
    output = mlp.Softmax(layer_name='y',
                         n_classes=171,
                         irange=.005,
                         max_col_norm=1.9365)

    layers = [h1, fc, output]

    mdl = mlp.MLP(layers,
                  input_space=Conv2DSpace(shape=(70, 70), num_channels=1))

    trainer = sgd.SGD(
        learning_rate=0.002,
        batch_size=batch_size,
        learning_rule=learning_rule.RMSProp(),
        cost=SumOfCosts(
            costs=[Default(),
                   WeightDecay(coeffs=[0.0005, 0.0005, 0.0005])]),
        train_iteration_mode='shuffled_sequential',
        monitor_iteration_mode='sequential',
        termination_criterion=EpochCounter(max_epochs=15),
        monitoring_dataset={
            'test': tst,
            'valid': vld
        })

    watcher = best_params.MonitorBasedSaveBest(
        channel_name='valid_y_misclass',
        save_path='./Saved Models/conv_supervised_layerwise_best1.pkl')

    decay = sgd.LinearDecayOverEpoch(start=8, saturate=15, decay_factor=0.1)

    experiment = Train(
        dataset=trn,
        model=mdl,
        algorithm=trainer,
        extensions=[watcher, decay],
    )

    experiment.main_loop()

    del mdl
    mdl = serial.load('./Saved Models/conv_supervised_layerwise_best1.pkl')
    mdl = push_monitor(mdl, 'k')
    '''
	The 2nd Convolution and Pooling Layers are added below.
	'''
    h2 = mlp.ConvRectifiedLinear(layer_name='h2',
                                 output_channels=64,
                                 irange=0.05,
                                 kernel_shape=[4, 4],
                                 pool_shape=[4, 4],
                                 pool_stride=[2, 2],
                                 max_kernel_norm=1.9365)

    fc = mlp.RectifiedLinear(layer_name='fc', dim=1500, irange=0.05)
    output = mlp.Softmax(layer_name='y',
                         n_classes=171,
                         irange=.005,
                         max_col_norm=1.9365)

    del mdl.layers[-1]
    mdl.layer_names.remove('y')
    del mdl.layers[-1]
    mdl.layer_names.remove('fc')
    mdl.add_layers([h2, fc, output])

    trainer = sgd.SGD(learning_rate=0.002,
                      batch_size=batch_size,
                      learning_rule=learning_rule.RMSProp(),
                      cost=SumOfCosts(costs=[
                          Default(),
                          WeightDecay(coeffs=[0.0005, 0.0005, 0.0005, 0.0005])
                      ]),
                      train_iteration_mode='shuffled_sequential',
                      monitor_iteration_mode='sequential',
                      termination_criterion=EpochCounter(max_epochs=15),
                      monitoring_dataset={
                          'test': tst,
                          'valid': vld
                      })

    watcher = best_params.MonitorBasedSaveBest(
        channel_name='valid_y_misclass',
        save_path='./Saved Models/conv_supervised_layerwise_best2.pkl')

    decay = sgd.LinearDecayOverEpoch(start=8, saturate=15, decay_factor=0.1)

    experiment = Train(
        dataset=trn,
        model=mdl,
        algorithm=trainer,
        extensions=[watcher, decay],
    )

    experiment.main_loop()

    del mdl
    mdl = serial.load('./Saved Models/conv_supervised_layerwise_best2.pkl')
    mdl = push_monitor(mdl, 'l')
    '''
	The 3rd Convolution and Pooling Layers are added below.
	'''
    h3 = mlp.ConvRectifiedLinear(layer_name='h2',
                                 output_channels=64,
                                 irange=0.05,
                                 kernel_shape=[4, 4],
                                 pool_shape=[4, 4],
                                 pool_stride=[2, 2],
                                 max_kernel_norm=1.9365)

    fc = mlp.RectifiedLinear(layer_name='h3', dim=1500, irange=0.05)
    output = mlp.Softmax(layer_name='y',
                         n_classes=10,
                         irange=.005,
                         max_col_norm=1.9365)

    del mdl.layers[-1]
    mdl.layer_names.remove('y')
    del mdl.layers[-1]
    mdl.layer_names.remove('fc')
    mdl.add_layers([h3, output])

    trainer = sgd.SGD(
        learning_rate=.002,
        batch_size=batch_size,
        learning_rule=learning_rule.RMSProp(),
        cost=SumOfCosts(costs=[
            Default(),
            WeightDecay(coeffs=[0.0005, 0.0005, 0.0005, 0.0005, 0.0005])
        ]),
        train_iteration_mode='shuffled_sequential',
        monitor_iteration_mode='sequential',
        termination_criterion=EpochCounter(max_epochs=15),
        monitoring_dataset={
            'test': tst,
            'valid': vld
        })

    watcher = best_params.MonitorBasedSaveBest(
        channel_name='valid_y_misclass',
        save_path='./Saved Models/conv_supervised_layerwise_best3.pkl')

    decay = sgd.LinearDecayOverEpoch(start=8, saturate=15, decay_factor=0.1)

    experiment = Train(
        dataset=trn,
        model=mdl,
        algorithm=trainer,
        extensions=[watcher, decay],
    )

    experiment.main_loop()
Example #23
0
def main():
    base_name = sys.argv[1] #文件名前缀
    n_epoch = int(sys.argv[2]) # epoch次数
    n_hidden = int(sys.argv[3]) # 隐含层节点数
    include_rate = float(sys.argv[4]) # 包含率(1-dropout)

    in_size = 943 # 输入层节点数目
    out_size = 4760  #输出层节点数
    b_size = 200 #batch的大小
    l_rate = 5e-4 #学习速率
    l_rate_min = 1e-5 #学习速率最小值
    decay_factor = 0.9 #
    lr_scale = 3.0 #
    momentum = 0.5 #摄动因子
    init_vals = np.sqrt(6.0/(np.array([in_size, n_hidden])+np.array([n_hidden, out_size])))
    
    print 'loading data...'
    #读取数据Train,Validation,Test
    X_tr = np.load('bgedv2_X_tr_float64.npy')
    Y_tr = np.load('bgedv2_Y_tr_0-4760_float64.npy')
    Y_tr_target = np.array(Y_tr)
    X_va = np.load('bgedv2_X_va_float64.npy')
    Y_va = np.load('bgedv2_Y_va_0-4760_float64.npy')
    Y_va_target = np.array(Y_va)
    X_te = np.load('bgedv2_X_te_float64.npy')
    Y_te = np.load('bgedv2_Y_te_0-4760_float64.npy')
    Y_te_target = np.array(Y_te)

    X_1000G = np.load('1000G_X_float64.npy')
    Y_1000G = np.load('1000G_Y_0-4760_float64.npy')
    Y_1000G_target = np.array(Y_1000G)
    X_GTEx = np.load('GTEx_X_float64.npy')
    Y_GTEx = np.load('GTEx_Y_0-4760_float64.npy')
    Y_GTEx_target = np.array(Y_GTEx)

    #随机化
    random.seed(0)
    #随机抽取5000样本进行训练
    monitor_idx_tr = random.sample(range(88807), 5000)
    #将数据X,Y整合成DensenMatrix类型
    data_tr = p2_dt_dd.DenseDesignMatrix(X=X_tr.astype('float32'), y=Y_tr.astype('float32'))
    #取出X中对应5000样本进行训练
    X_tr_monitor, Y_tr_monitor_target = X_tr[monitor_idx_tr, :], Y_tr_target[monitor_idx_tr, :]
    #设置多层感知机的隐含层计算方式
    h1_layer = p2_md_mlp.Tanh(layer_name='h1', dim=n_hidden, irange=init_vals[0], W_lr_scale=1.0, b_lr_scale=1.0)
    #设置多层感知机的输出层计算方式
    o_layer = p2_md_mlp.Linear(layer_name='y', dim=out_size, irange=0.0001, W_lr_scale=lr_scale, b_lr_scale=1.0)
    #设置好模型 
    model = p2_md_mlp.MLP(nvis=in_size, layers=[h1_layer, o_layer], seed=1)
    #设置dropout比例
    dropout_cost = p2_ct_mlp_dropout.Dropout(input_include_probs={'h1':1.0, 'y':include_rate}, 
                                             input_scales={'h1':1.0, 
                                                           'y':np.float32(1.0/include_rate)})
    #设置训练算法(batch大小,学习速率,学习规则,终止条件,dropout比例)
    algorithm = p2_alg_sgd.SGD(batch_size=b_size, learning_rate=l_rate, 
                               learning_rule = p2_alg_lr.Momentum(momentum),
                               termination_criterion=p2_termcri.EpochCounter(max_epochs=1000),
                               cost=dropout_cost)
    #设置训练类(数据集,训练模型,训练算法)
    train = pylearn2.train.Train(dataset=data_tr, model=model, algorithm=algorithm)
    train.setup()

    x = T.matrix()
    y = model.fprop(x) #训练好的模型对X的预测值
    f = theano.function([x], y) 

    MAE_va_old = 10.0
    MAE_va_best = 10.0
    MAE_tr_old = 10.0
    MAE_te_old = 10.0
    MAE_1000G_old = 10.0
    MAE_1000G_best = 10.0
    MAE_GTEx_old = 10.0

    outlog = open(base_name + '.log', 'w')
    log_str = '\t'.join(map(str, ['epoch', 'MAE_va', 'MAE_va_change', 'MAE_te', 'MAE_te_change', 
                              'MAE_1000G', 'MAE_1000G_change', 'MAE_GTEx', 'MAE_GTEx_change',
                              'MAE_tr', 'MAE_tr_change', 'learing_rate', 'time(sec)']))
    print log_str
    outlog.write(log_str + '\n')
    sys.stdout.flush() #刷新缓冲区

    for epoch in range(0, n_epoch):
        t_old = time.time() #开始时间
        train.algorithm.train(train.dataset)#训练
        #计算不同数据集预测值
        Y_va_hat = f(X_va.astype('float32')).astype('float64')
        Y_te_hat = f(X_te.astype('float32')).astype('float64')
        Y_tr_hat_monitor = f(X_tr_monitor.astype('float32')).astype('float64')
        Y_1000G_hat = f(X_1000G.astype('float32')).astype('float64')
        Y_GTEx_hat = f(X_GTEx.astype('float32')).astype('float64')
        #计算预测值与真实值的MAE
        MAE_va = np.abs(Y_va_target - Y_va_hat).mean()
        MAE_te = np.abs(Y_te_target - Y_te_hat).mean()
        MAE_tr = np.abs(Y_tr_monitor_target - Y_tr_hat_monitor).mean()
        MAE_1000G = np.abs(Y_1000G_target - Y_1000G_hat).mean()
        MAE_GTEx = np.abs(Y_GTEx_target - Y_GTEx_hat).mean()
        #计算迭代误差
        MAE_va_change = (MAE_va - MAE_va_old)/MAE_va_old
        MAE_te_change = (MAE_te - MAE_te_old)/MAE_te_old
        MAE_tr_change = (MAE_tr - MAE_tr_old)/MAE_tr_old
        MAE_1000G_change = (MAE_1000G - MAE_1000G_old)/MAE_1000G_old
        MAE_GTEx_change = (MAE_GTEx - MAE_GTEx_old)/MAE_GTEx_old
        
        #更新MAE
        MAE_va_old = MAE_va
        MAE_te_old = MAE_te
        MAE_tr_old = MAE_tr
        MAE_1000G_old = MAE_1000G
        MAE_GTEx_old = MAE_GTEx

        
        t_new = time.time() #终止时间
        l_rate = train.algorithm.learning_rate.get_value()
        log_str = '\t'.join(map(str, [epoch+1, '%.6f'%MAE_va, '%.6f'%MAE_va_change, '%.6f'%MAE_te, '%.6f'%MAE_te_change,
                                  '%.6f'%MAE_1000G, '%.6f'%MAE_1000G_change, '%.6f'%MAE_GTEx, '%.6f'%MAE_GTEx_change,
                                  '%.6f'%MAE_tr, '%.6f'%MAE_tr_change, '%.5f'%l_rate, int(t_new-t_old)]))
        print log_str
        outlog.write(log_str + '\n')
        sys.stdout.flush()
        
        if MAE_tr_change > 0: #如果误差增大,减小学习速率
            l_rate = l_rate*decay_factor
        if l_rate < l_rate_min: #学习速率最小为l_rate_min
            l_rate = l_rate_min

        train.algorithm.learning_rate.set_value(np.float32(l_rate)) #更改训练类的学习速率参数
        #更新Validation误差值
        if MAE_va < MAE_va_best:
            MAE_va_best = MAE_va
            outmodel = open(base_name + '_bestva_model.pkl', 'wb')
            pkl.dump(model, outmodel)
            outmodel.close()    
            np.save(base_name + '_bestva_Y_te_hat.npy', Y_te_hat)
            np.save(base_name + '_bestva_Y_va_hat.npy', Y_va_hat)
        #更新1000G误差值
        if MAE_1000G < MAE_1000G_best:
            MAE_1000G_best = MAE_1000G
            outmodel = open(base_name + '_best1000G_model.pkl', 'wb')
            pkl.dump(model, outmodel)
            outmodel.close()    
            np.save(base_name + '_best1000G_Y_1000G_hat.npy', Y_1000G_hat)
            np.save(base_name + '_best1000G_Y_GTEx_hat.npy', Y_GTEx_hat)

    print 'MAE_va_best : %.6f' % (MAE_va_best)
    print 'MAE_1000G_best : %.6f' % (MAE_1000G_best)
    outlog.write('MAE_va_best : %.6f' % (MAE_va_best) + '\n')
    outlog.write('MAE_1000G_best : %.6f' % (MAE_1000G_best) + '\n')
    outlog.close()
Example #24
0
    random.seed(0)   #设置生成随机数用的整数起始值。调用任何其他random模块函数之前调用这个函数
    monitor_idx_tr = random.sample(range(88807), 5000)   #监测训练
    #将训练数据集类型设为32位浮点型,The DenseDesignMatrix class and related code Functionality for representing data that can be described as a dense matrix (rather than a sparse matrix) with each row containing an example and each column corresponding to a different feature.
    data_tr = p2_dt_dd.DenseDesignMatrix(X=X_tr.astype('float32'), y=Y_tr.astype('float32'))
    X_tr_monitor, Y_tr_monitor_target = X_tr[monitor_idx_tr, :], Y_tr_target[monitor_idx_tr, :]
    #一个隐层,用Tanh()作激活函数; 输出层用线性函数作激活函数
    h1_layer = p2_md_mlp.Tanh(layer_name='h1', dim=n_hidden, irange=init_vals[0], W_lr_scale=1.0, b_lr_scale=1.0) 
    o_layer = p2_md_mlp.Linear(layer_name='y', dim=out_size, irange=0.0001, W_lr_scale=lr_scale, b_lr_scale=1.0)
    #Multilayer Perceptron;nvis(Number of “visible units” input units)  layers(a list of layer objects,最后1层指定MLP的输出空间) 
    model = p2_md_mlp.MLP(nvis=in_size, layers=[h1_layer, o_layer], seed=1)
    dropout_cost = p2_ct_mlp_dropout.Dropout(input_include_probs={'h1':1.0, 'y':include_rate}, 
                                             input_scales={'h1':1.0, 
                                                           'y':np.float32(1.0/include_rate)})
    #随机梯度下降法
    algorithm = p2_alg_sgd.SGD(batch_size=b_size, learning_rate=l_rate, 
                               learning_rule = p2_alg_lr.Momentum(momentum),
                               termination_criterion=p2_termcri.EpochCounter(max_epochs=1000),
                               cost=dropout_cost)
    #训练 根据前面的定义 :dataset为一个密集型矩阵,model为MLP多层神经网络,algorithm为SGD
    train = pylearn2.train.Train(dataset=data_tr, model=model, algorithm=algorithm)
    train.setup()

    x = T.matrix()             #定义为一个二维数组
    #fprop(state_below) does the forward prop transformation
    y = model.fprop(x)  
    f = theano.function([x], y)  #定义一个function函数,输入为x,输出为y

    MAE_va_old = 10.0      #平均绝对误差
    MAE_va_best = 10.0
    MAE_tr_old = 10.0      #训练误差
    MAE_te_old = 10.0
    
Example #25
0
                y.append([1, 0])
        X = np.array(X)
        y = np.array(y)
        super(XOR, self).__init__(X=X, y=y)


# create XOR dataset
ds = XOR()
# create hidden layer with 2 nodes, init weights in range -0.1 to 0.1 and add
# a bias with value 1
hidden_layer = mlp.Sigmoid(layer_name='hidden', dim=2, irange=.1, init_bias=1.)
# create Softmax output layer
output_layer = mlp.Softmax(2, 'output', irange=.1)
# create Stochastic Gradient Descent trainer that runs for 400 epochs
trainer = sgd.SGD(learning_rate=.05,
                  batch_size=10,
                  termination_criterion=EpochCounter(400))
layers = [hidden_layer, output_layer]
# create neural net that takes two inputs
ann = mlp.MLP(layers, nvis=2)
trainer.setup(ann, ds)
# train neural net until the termination criterion is true
while True:
    trainer.train(dataset=ds)
    ann.monitor.report_epoch()
    ann.monitor()
    if not trainer.continue_learning(ann):
        break

inputs = np.array([[0, 0]])
print(ann.fprop(theanoShared(inputs, name='inputs')).eval())
Example #26
0
def train(d=None):
    train_X = np.array(d.train_X)
    train_y = np.array(d.train_Y)
    valid_X = np.array(d.valid_X)
    valid_y = np.array(d.valid_Y)
    test_X = np.array(d.test_X)
    test_y = np.array(d.test_Y)
    nb_classes = len(np.unique(train_y))
    train_y = convert_one_hot(train_y)
    valid_y = convert_one_hot(valid_y)
    # train_set = RotationalDDM(X=train_X, y=train_y)
    train_set = DenseDesignMatrix(X=train_X, y=train_y)
    valid_set = DenseDesignMatrix(X=valid_X, y=valid_y)
    print 'Setting up'
    batch_size = 100
    c0 = mlp.ConvRectifiedLinear(
        layer_name='c0',
        output_channels=64,
        irange=.05,
        kernel_shape=[5, 5],
        pool_shape=[4, 4],
        pool_stride=[2, 2],
        # W_lr_scale=0.25,
        max_kernel_norm=1.9365)
    c1 = mlp.ConvRectifiedLinear(
        layer_name='c1',
        output_channels=64,
        irange=.05,
        kernel_shape=[5, 5],
        pool_shape=[4, 4],
        pool_stride=[2, 2],
        # W_lr_scale=0.25,
        max_kernel_norm=1.9365)
    c2 = mlp.ConvRectifiedLinear(
        layer_name='c2',
        output_channels=64,
        irange=.05,
        kernel_shape=[5, 5],
        pool_shape=[4, 4],
        pool_stride=[5, 4],
        W_lr_scale=0.25,
        # max_kernel_norm=1.9365
    )
    sp0 = mlp.SoftmaxPool(
        detector_layer_dim=16,
        layer_name='sp0',
        pool_size=4,
        sparse_init=512,
    )
    sp1 = mlp.SoftmaxPool(
        detector_layer_dim=16,
        layer_name='sp1',
        pool_size=4,
        sparse_init=512,
    )
    r0 = mlp.RectifiedLinear(
        layer_name='r0',
        dim=512,
        sparse_init=512,
    )
    r1 = mlp.RectifiedLinear(
        layer_name='r1',
        dim=512,
        sparse_init=512,
    )
    s0 = mlp.Sigmoid(
        layer_name='s0',
        dim=500,
        # max_col_norm=1.9365,
        sparse_init=15,
    )
    out = mlp.Softmax(
        n_classes=nb_classes,
        layer_name='output',
        irange=.0,
        # max_col_norm=1.9365,
        # sparse_init=nb_classes,
    )
    epochs = EpochCounter(100)
    layers = [s0, out]
    decay_coeffs = [.00005, .00005, .00005]
    in_space = Conv2DSpace(
        shape=[d.size, d.size],
        num_channels=1,
    )
    vec_space = VectorSpace(d.size**2)
    nn = mlp.MLP(
        layers=layers,
        # input_space=in_space,
        nvis=d.size**2,
        # batch_size=batch_size,
    )
    trainer = sgd.SGD(
        learning_rate=0.01,
        # cost=SumOfCosts(costs=[
        # dropout.Dropout(),
        #     MethodCost(method='cost_from_X'),
        # WeightDecay(decay_coeffs),
        # ]),
        # cost=MethodCost(method='cost_from_X'),
        batch_size=batch_size,
        # train_iteration_mode='even_shuffled_sequential',
        termination_criterion=epochs,
        # learning_rule=learning_rule.Momentum(init_momentum=0.5),
    )
    trainer = bgd.BGD(
        batch_size=10000,
        line_search_mode='exhaustive',
        conjugate=1,
        updates_per_batch=10,
        termination_criterion=epochs,
    )
    lr_adjustor = LinearDecayOverEpoch(
        start=1,
        saturate=10,
        decay_factor=.1,
    )
    momentum_adjustor = learning_rule.MomentumAdjustor(
        final_momentum=.99,
        start=1,
        saturate=10,
    )
    trainer.setup(nn, train_set)
    print 'Learning'
    test_X = vec_space.np_format_as(test_X, nn.get_input_space())
    train_X = vec_space.np_format_as(train_X, nn.get_input_space())
    i = 0
    X = nn.get_input_space().make_theano_batch()
    Y = nn.fprop(X)
    predict = theano.function([X], Y)
    best = -40
    best_iter = -1
    while trainer.continue_learning(nn):
        print '--------------'
        print 'Training Epoch ' + str(i)
        trainer.train(dataset=train_set)
        nn.monitor()
        print 'Evaluating...'
        predictions = convert_categorical(predict(train_X[:2000]))
        score = accuracy_score(convert_categorical(train_y[:2000]),
                               predictions)
        print 'Score on train: ' + str(score)
        predictions = convert_categorical(predict(test_X))
        score = accuracy_score(test_y, predictions)
        print 'Score on test: ' + str(score)
        best, best_iter = (best, best_iter) if best > score else (score, i)
        print 'Current best: ' + str(best) + ' at iter ' + str(best_iter)
        print classification_report(test_y, predictions)
        print 'Adjusting parameters...'
        # momentum_adjustor.on_monitor(nn, valid_set, trainer)
        # lr_adjustor.on_monitor(nn, valid_set, trainer)
        i += 1
        print ' '
Example #27
0
                                        irange=0.01,
                                        init_bias=0)
    hidden_layer3 = mlp.RectifiedLinear(layer_name='hidden3',
                                        dim=128,
                                        irange=0.01,
                                        init_bias=0)
    # create Softmax output layer
    output_layer = mlp.Softmax(3, 'output', irange=.1)
    # create Stochastic Gradient Descent trainer that runs for 400 epochs
    cost = NegativeLogLikelihoodCost()
    rule = Momentum(0.9)
    # rule = Momentum(0.9, True)
    # update_callbacks=ExponentialDecay(1 + 1e-5, 0.001)
    trainer = sgd.SGD(learning_rate=0.01,
                      cost=cost,
                      batch_size=128,
                      termination_criterion=EpochCounter(1000),
                      monitoring_dataset=vds,
                      learning_rule=rule)
    layers = [hidden_layer, hidden_layer2, output_layer]
    # create neural net that takes two inputs
    ann = mlp.MLP(layers, nvis=ds.feat_cnt)

    trainer.setup(ann, ds)
    print trainer.cost
    # train neural net until the termination criterion is true

    iteration = 0

    while True:
        trainer.train(dataset=ds)
        ann.monitor.report_epoch()
Example #28
0
output = mlp.Softmax(layer_name='y',
                     n_classes=10,
                     irange=.005,
                     max_col_norm=1.9365)

layers = [l1, l2, l3, l4, output]

mdl = mlp.MLP(layers,
              input_space=in_space)

trainer = sgd.SGD(learning_rate=.17,
                  batch_size=128,
                  learning_rule=learning_rule.Momentum(.5),
                  # Remember, default dropout is .5
                  cost=Dropout(input_include_probs={'l1': .8},
                               input_scales={'l1': 1.}),
                  termination_criterion=EpochCounter(max_epochs=475),
                  monitoring_dataset={'valid': tst,
                                      'train': trn})

preprocessor = Pipeline([GlobalContrastNormalization(scale=55.), ZCA()])
trn.apply_preprocessor(preprocessor=preprocessor, can_fit=True)
tst.apply_preprocessor(preprocessor=preprocessor, can_fit=False)
serial.save('kaggle_cifar10_preprocessor.pkl', preprocessor)

watcher = best_params.MonitorBasedSaveBest(
    channel_name='valid_y_misclass',
    save_path='kaggle_cifar10_maxout_zca.pkl')

velocity = learning_rule.MomentumAdjustor(final_momentum=.65,
Example #29
0
momentum_rule = learning_rule.Momentum(initial_momentum)
 
# learning rate
start = .1
saturate = 20
decay_factor = .00001
learning_rate_adjustor = sgd.LinearDecayOverEpoch(start, saturate, decay_factor)

# termination criterion that stops after 50 epochs without
# any increase in misclassification on the validation set
termination_criterion = MonitorBased(channel_name='objective', N=20, prop_decrease=0.0)
 
# create Stochastic Gradient Descent trainer 
trainer = sgd.SGD(learning_rate=.001,
                    batch_size=10,
                    monitoring_dataset=ds_valid, 
                    termination_criterion=termination_criterion, 
                    cost=L1_cost)
#learning_rule=momentum_rule,
trainer.setup(ann, ds_train) 

# add monitor for saving the model with best score
monitor_save_best = best_params.MonitorBasedSaveBest('objective','./tmp/best.pkl')
 

#####################################
#Train model
####################################

# train neural net until the termination criterion is true
while True:
Example #30
0
from pylearn2.models import mlp
from pylearn2.training_algorithms import sgd
from pylearn2.termination_criteria import EpochCounter


raw_ds = CLICK4DAY(which_set='train', which_day=21)
transformer = Transformer(raw=raw_ds, nfeatures=1024,  rng=None)
ds = TransformerDataset(raw=raw_ds, transformer=transformer, cpu_only=False, \
                 space_preserving=False)


hidden_layer = mlp.Sigmoid(layer_name='hidden', dim=256, irange=.1, init_bias=1.)

output_layer = mlp.Softmax(2, 'output', irange=.1)

trainer = sgd.SGD(learning_rate=.05, batch_size=1024, \
train_iteration_mode='even_sequential',termination_criterion=EpochCounter(400))

layers = [hidden_layer, output_layer]

ann = mlp.MLP(layers, nvis=1024)

trainer.setup(ann, ds)

# train neural net until the termination criterion is true
while True:
    trainer.train(dataset=ds)
    ann.monitor.report_epoch()
    ann.monitor()
    if not trainer.continue_learning(ann):
        break