Esempio n. 1
0
def cnn_run_dropout_maxout(data_path, num_rows, num_cols, num_channels,
                           input_path, pred_path):
    t = time.time()
    sub_window = gen_center_sub_window(76, num_cols)
    trn = SarDataset(ds[0][0], ds[0][1], sub_window)
    vld = SarDataset(ds[1][0], ds[1][1], sub_window)
    tst = SarDataset(ds[2][0], ds[2][1], sub_window)
    print 'Take {}s to read data'.format(time.time() - t)
    t = time.time()
    batch_size = 100
    h1 = maxout.Maxout(layer_name='h2', num_units=1, num_pieces=100, irange=.1)
    hidden_layer = mlp.ConvRectifiedLinear(layer_name='h2',
                                           output_channels=8,
                                           irange=0.05,
                                           kernel_shape=[5, 5],
                                           pool_shape=[2, 2],
                                           pool_stride=[2, 2],
                                           max_kernel_norm=1.9365)
    hidden_layer2 = mlp.ConvRectifiedLinear(layer_name='h3',
                                            output_channels=8,
                                            irange=0.05,
                                            kernel_shape=[5, 5],
                                            pool_shape=[2, 2],
                                            pool_stride=[2, 2],
                                            max_kernel_norm=1.9365)
    #output_layer = mlp.Softplus(dim=1,layer_name='output',irange=0.1)
    output_layer = mlp.Linear(dim=1, layer_name='output', irange=0.05)
    trainer = sgd.SGD(learning_rate=0.001,
                      batch_size=100,
                      termination_criterion=EpochCounter(2000),
                      cost=dropout.Dropout(),
                      train_iteration_mode='even_shuffled_sequential',
                      monitor_iteration_mode='even_shuffled_sequential',
                      monitoring_dataset={
                          'test': tst,
                          'valid': vld,
                          'train': trn
                      })
    layers = [hidden_layer, hidden_layer2, output_layer]
    input_space = space.Conv2DSpace(shape=[num_rows, num_cols],
                                    num_channels=num_channels)

    ann = mlp.MLP(layers, input_space=input_space, batch_size=batch_size)
    watcher = best_params.MonitorBasedSaveBest(channel_name='valid_objective',
                                               save_path='sar_cnn_mlp.pkl')
    experiment = Train(dataset=trn,
                       model=ann,
                       algorithm=trainer,
                       extensions=[watcher])
    print 'Take {}s to compile code'.format(time.time() - t)
    t = time.time()
    experiment.main_loop()
    print 'Training time: {}s'.format(time.time() - t)
    serial.save('cnn_hhv_{0}_{1}.pkl'.format(num_rows, num_cols),
                ann,
                on_overwrite='backup')

    #read hh and hv into a 3D numpy
    image = read_hhv(input_path)
    return ann, sar_predict(ann, image, pred_path)
Esempio n. 2
0
    #serial.save(DATA_DIR+'cae6_005_pretrained.pkl', stack)

    # construct DBN
    dbn = construct_dbn_from_stack(stack)

    # train DBN
    if submission:
        traindata = sup_data[2]
        validdata = sup_data[2]
    else:
        traindata = sup_data[0]
        validdata = sup_data[1]

    cost = dropout.Dropout(input_include_probs={'h0': 0.5},
                           input_scales={'h0': 1. / 0.5},
                           default_input_include_prob=0.5,
                           default_input_scale=1. / 0.5)

    # finetune softmax layer a bit
    finetuner = get_finetuner(dbn,
                              cost,
                              traindata,
                              validdata,
                              batch_size,
                              iters=100)
    finetuner.main_loop()

    # now finetune layer-by-layer
    lrs = [5., 2., 1., 0.5, 0.25]
    for ii, lr in zip(range(len(structure) - 1), lrs):
        # set lr to boosted value for current layer
Esempio n. 3
0
def main():
    base_name = sys.argv[1] #文件名前缀
    n_epoch = int(sys.argv[2]) # epoch次数
    n_hidden = int(sys.argv[3]) # 隐含层节点数
    include_rate = float(sys.argv[4]) # 包含率(1-dropout)

    in_size = 943 # 输入层节点数目
    out_size = 4760  #输出层节点数
    b_size = 200 #batch的大小
    l_rate = 5e-4 #学习速率
    l_rate_min = 1e-5 #学习速率最小值
    decay_factor = 0.9 #
    lr_scale = 3.0 #
    momentum = 0.5 #摄动因子
    init_vals = np.sqrt(6.0/(np.array([in_size, n_hidden])+np.array([n_hidden, out_size])))
    
    print 'loading data...'
    #读取数据Train,Validation,Test
    X_tr = np.load('bgedv2_X_tr_float64.npy')
    Y_tr = np.load('bgedv2_Y_tr_0-4760_float64.npy')
    Y_tr_target = np.array(Y_tr)
    X_va = np.load('bgedv2_X_va_float64.npy')
    Y_va = np.load('bgedv2_Y_va_0-4760_float64.npy')
    Y_va_target = np.array(Y_va)
    X_te = np.load('bgedv2_X_te_float64.npy')
    Y_te = np.load('bgedv2_Y_te_0-4760_float64.npy')
    Y_te_target = np.array(Y_te)

    X_1000G = np.load('1000G_X_float64.npy')
    Y_1000G = np.load('1000G_Y_0-4760_float64.npy')
    Y_1000G_target = np.array(Y_1000G)
    X_GTEx = np.load('GTEx_X_float64.npy')
    Y_GTEx = np.load('GTEx_Y_0-4760_float64.npy')
    Y_GTEx_target = np.array(Y_GTEx)

    #随机化
    random.seed(0)
    #随机抽取5000样本进行训练
    monitor_idx_tr = random.sample(range(88807), 5000)
    #将数据X,Y整合成DensenMatrix类型
    data_tr = p2_dt_dd.DenseDesignMatrix(X=X_tr.astype('float32'), y=Y_tr.astype('float32'))
    #取出X中对应5000样本进行训练
    X_tr_monitor, Y_tr_monitor_target = X_tr[monitor_idx_tr, :], Y_tr_target[monitor_idx_tr, :]
    #设置多层感知机的隐含层计算方式
    h1_layer = p2_md_mlp.Tanh(layer_name='h1', dim=n_hidden, irange=init_vals[0], W_lr_scale=1.0, b_lr_scale=1.0)
    #设置多层感知机的输出层计算方式
    o_layer = p2_md_mlp.Linear(layer_name='y', dim=out_size, irange=0.0001, W_lr_scale=lr_scale, b_lr_scale=1.0)
    #设置好模型 
    model = p2_md_mlp.MLP(nvis=in_size, layers=[h1_layer, o_layer], seed=1)
    #设置dropout比例
    dropout_cost = p2_ct_mlp_dropout.Dropout(input_include_probs={'h1':1.0, 'y':include_rate}, 
                                             input_scales={'h1':1.0, 
                                                           'y':np.float32(1.0/include_rate)})
    #设置训练算法(batch大小,学习速率,学习规则,终止条件,dropout比例)
    algorithm = p2_alg_sgd.SGD(batch_size=b_size, learning_rate=l_rate, 
                               learning_rule = p2_alg_lr.Momentum(momentum),
                               termination_criterion=p2_termcri.EpochCounter(max_epochs=1000),
                               cost=dropout_cost)
    #设置训练类(数据集,训练模型,训练算法)
    train = pylearn2.train.Train(dataset=data_tr, model=model, algorithm=algorithm)
    train.setup()

    x = T.matrix()
    y = model.fprop(x) #训练好的模型对X的预测值
    f = theano.function([x], y) 

    MAE_va_old = 10.0
    MAE_va_best = 10.0
    MAE_tr_old = 10.0
    MAE_te_old = 10.0
    MAE_1000G_old = 10.0
    MAE_1000G_best = 10.0
    MAE_GTEx_old = 10.0

    outlog = open(base_name + '.log', 'w')
    log_str = '\t'.join(map(str, ['epoch', 'MAE_va', 'MAE_va_change', 'MAE_te', 'MAE_te_change', 
                              'MAE_1000G', 'MAE_1000G_change', 'MAE_GTEx', 'MAE_GTEx_change',
                              'MAE_tr', 'MAE_tr_change', 'learing_rate', 'time(sec)']))
    print log_str
    outlog.write(log_str + '\n')
    sys.stdout.flush() #刷新缓冲区

    for epoch in range(0, n_epoch):
        t_old = time.time() #开始时间
        train.algorithm.train(train.dataset)#训练
        #计算不同数据集预测值
        Y_va_hat = f(X_va.astype('float32')).astype('float64')
        Y_te_hat = f(X_te.astype('float32')).astype('float64')
        Y_tr_hat_monitor = f(X_tr_monitor.astype('float32')).astype('float64')
        Y_1000G_hat = f(X_1000G.astype('float32')).astype('float64')
        Y_GTEx_hat = f(X_GTEx.astype('float32')).astype('float64')
        #计算预测值与真实值的MAE
        MAE_va = np.abs(Y_va_target - Y_va_hat).mean()
        MAE_te = np.abs(Y_te_target - Y_te_hat).mean()
        MAE_tr = np.abs(Y_tr_monitor_target - Y_tr_hat_monitor).mean()
        MAE_1000G = np.abs(Y_1000G_target - Y_1000G_hat).mean()
        MAE_GTEx = np.abs(Y_GTEx_target - Y_GTEx_hat).mean()
        #计算迭代误差
        MAE_va_change = (MAE_va - MAE_va_old)/MAE_va_old
        MAE_te_change = (MAE_te - MAE_te_old)/MAE_te_old
        MAE_tr_change = (MAE_tr - MAE_tr_old)/MAE_tr_old
        MAE_1000G_change = (MAE_1000G - MAE_1000G_old)/MAE_1000G_old
        MAE_GTEx_change = (MAE_GTEx - MAE_GTEx_old)/MAE_GTEx_old
        
        #更新MAE
        MAE_va_old = MAE_va
        MAE_te_old = MAE_te
        MAE_tr_old = MAE_tr
        MAE_1000G_old = MAE_1000G
        MAE_GTEx_old = MAE_GTEx

        
        t_new = time.time() #终止时间
        l_rate = train.algorithm.learning_rate.get_value()
        log_str = '\t'.join(map(str, [epoch+1, '%.6f'%MAE_va, '%.6f'%MAE_va_change, '%.6f'%MAE_te, '%.6f'%MAE_te_change,
                                  '%.6f'%MAE_1000G, '%.6f'%MAE_1000G_change, '%.6f'%MAE_GTEx, '%.6f'%MAE_GTEx_change,
                                  '%.6f'%MAE_tr, '%.6f'%MAE_tr_change, '%.5f'%l_rate, int(t_new-t_old)]))
        print log_str
        outlog.write(log_str + '\n')
        sys.stdout.flush()
        
        if MAE_tr_change > 0: #如果误差增大,减小学习速率
            l_rate = l_rate*decay_factor
        if l_rate < l_rate_min: #学习速率最小为l_rate_min
            l_rate = l_rate_min

        train.algorithm.learning_rate.set_value(np.float32(l_rate)) #更改训练类的学习速率参数
        #更新Validation误差值
        if MAE_va < MAE_va_best:
            MAE_va_best = MAE_va
            outmodel = open(base_name + '_bestva_model.pkl', 'wb')
            pkl.dump(model, outmodel)
            outmodel.close()    
            np.save(base_name + '_bestva_Y_te_hat.npy', Y_te_hat)
            np.save(base_name + '_bestva_Y_va_hat.npy', Y_va_hat)
        #更新1000G误差值
        if MAE_1000G < MAE_1000G_best:
            MAE_1000G_best = MAE_1000G
            outmodel = open(base_name + '_best1000G_model.pkl', 'wb')
            pkl.dump(model, outmodel)
            outmodel.close()    
            np.save(base_name + '_best1000G_Y_1000G_hat.npy', Y_1000G_hat)
            np.save(base_name + '_best1000G_Y_GTEx_hat.npy', Y_GTEx_hat)

    print 'MAE_va_best : %.6f' % (MAE_va_best)
    print 'MAE_1000G_best : %.6f' % (MAE_1000G_best)
    outlog.write('MAE_va_best : %.6f' % (MAE_va_best) + '\n')
    outlog.write('MAE_1000G_best : %.6f' % (MAE_1000G_best) + '\n')
    outlog.close()
Esempio n. 4
0
def train(d):
    print 'Creating dataset'
    # load mnist here
    # X = d.train_X
    # y = d.train_Y
    # test_X = d.test_X
    # test_Y = d.test_Y
    # nb_classes = len(np.unique(y))
    # train_y = convert_one_hot(y)
    # train_set = DenseDesignMatrix(X=X, y=y)
    train = DenseDesignMatrix(X=d.train_X, y=convert_one_hot(d.train_Y))
    valid = DenseDesignMatrix(X=d.valid_X, y=convert_one_hot(d.valid_Y))
    test = DenseDesignMatrix(X=d.test_X, y=convert_one_hot(d.test_Y))

    print 'Setting up'
    batch_size = 1000
    conv = mlp.ConvRectifiedLinear(
        layer_name='c0',
        output_channels=20,
        irange=.05,
        kernel_shape=[5, 5],
        pool_shape=[4, 4],
        pool_stride=[2, 2],
        # W_lr_scale=0.25,
        max_kernel_norm=1.9365)
    mout = MaxoutConvC01B(layer_name='m0',
                          num_pieces=4,
                          num_channels=96,
                          irange=.05,
                          kernel_shape=[5, 5],
                          pool_shape=[4, 4],
                          pool_stride=[2, 2],
                          W_lr_scale=0.25,
                          max_kernel_norm=1.9365)
    mout2 = MaxoutConvC01B(layer_name='m1',
                           num_pieces=4,
                           num_channels=96,
                           irange=.05,
                           kernel_shape=[5, 5],
                           pool_shape=[4, 4],
                           pool_stride=[2, 2],
                           W_lr_scale=0.25,
                           max_kernel_norm=1.9365)
    sigmoid = mlp.Sigmoid(
        layer_name='Sigmoid',
        dim=500,
        sparse_init=15,
    )
    smax = mlp.Softmax(layer_name='y', n_classes=10, irange=0.)
    in_space = Conv2DSpace(shape=[28, 28],
                           num_channels=1,
                           axes=['c', 0, 1, 'b'])
    net = mlp.MLP(
        layers=[mout, mout2, smax],
        input_space=in_space,
        # nvis=784,
    )
    trainer = bgd.BGD(batch_size=batch_size,
                      line_search_mode='exhaustive',
                      conjugate=1,
                      updates_per_batch=10,
                      monitoring_dataset={
                          'train': train,
                          'valid': valid,
                          'test': test
                      },
                      termination_criterion=termination_criteria.MonitorBased(
                          channel_name='valid_y_misclass'))
    trainer = sgd.SGD(learning_rate=0.15,
                      cost=dropout.Dropout(),
                      batch_size=batch_size,
                      monitoring_dataset={
                          'train': train,
                          'valid': valid,
                          'test': test
                      },
                      termination_criterion=termination_criteria.MonitorBased(
                          channel_name='valid_y_misclass'))
    trainer.setup(net, train)
    epoch = 0
    while True:
        print 'Training...', epoch
        trainer.train(dataset=train)
        net.monitor()
        epoch += 1
Esempio n. 5
0
def main():
    base_name = sys.argv[
        1]  # 获取第一个参数   sys.argv[ ]记录(获取)命令行参数  sys(system)  argv(argument variable)参数变量,该变量为list列表
    n_epoch = int(sys.argv[2])  #获取第二个参数
    n_hidden = int(sys.argv[3])  #获取第三个参数作为隐层神经元个数
    include_rate = float(sys.argv[4])

    in_size = 1001  #输入层神经元个数(标记基因个数)
    out_size = 1  #输出层神经元个数
    b_size = 200  #偏差值
    l_rate = 5e-4  #学习速率
    l_rate_min = 1e-5  #学习速率最小值
    decay_factor = 0.9  #衰减因数
    lr_scale = 3.0
    momentum = 0.5
    init_vals = np.sqrt(6.0 / (np.array([in_size, n_hidden]) +
                               np.array([n_hidden, out_size])))  #初始值,返回平方根

    print 'loading data...'  #显示载入数据

    X_tr = np.load(
        'geno_X_tr_float64.npy')  # tr(traing)以numpy专用二进制类型保存训练数据集的数据
    Y_tr = np.load('pheno_Y_tr_0-4760_float64.npy')
    Y_tr_pheno = np.array(Y_tr)
    X_va = np.load(
        'geno_X_va_float64.npy')  #验证集(模型选择,在学习到不同复杂度的模型中,选择对验证集有最小预测误差的模型)
    Y_va = np.load('pheno_Y_va_0-4760_float64.npy')
    Y_va_target = np.array(Y_va)
    X_te = np.load('geno_te_float64.npy')  #测试集(对学习方法的评估)
    Y_te = np.load('pheno_Y_te_0-4760_float64.npy')
    Y_te_target = np.array(Y_te)

    random.seed(0)  #设置生成随机数用的整数起始值。调用任何其他random模块函数之前调用这个函数
    monitor_idx_tr = random.sample(range(88807), 5000)  #监测训练
    #将训练数据集类型设为32位浮点型,The DenseDesignMatrix class and related code Functionality for representing data that can be described as a dense matrix (rather than a sparse matrix) with each row containing an example and each column corresponding to a different feature.
    data_tr = p2_dt_dd.DenseDesignMatrix(X=X_tr.astype('float32'),
                                         y=Y_tr.astype('float32'))
    X_tr_monitor, Y_tr_monitor_target = X_tr[monitor_idx_tr, :], Y_tr_target[
        monitor_idx_tr, :]
    #一个隐层,用Tanh()作激活函数; 输出层用线性函数作激活函数
    h1_layer = p2_md_mlp.Tanh(layer_name='h1',
                              dim=n_hidden,
                              irange=init_vals[0],
                              W_lr_scale=1.0,
                              b_lr_scale=1.0)
    o_layer = p2_md_mlp.Linear(layer_name='y',
                               dim=out_size,
                               irange=0.0001,
                               W_lr_scale=lr_scale,
                               b_lr_scale=1.0)
    #Multilayer Perceptron;nvis(Number of “visible units” input units)  layers(a list of layer objects,最后1层指定MLP的输出空间)
    model = p2_md_mlp.MLP(nvis=in_size, layers=[h1_layer, o_layer], seed=1)
    dropout_cost = p2_ct_mlp_dropout.Dropout(input_include_probs={
        'h1': 1.0,
        'y': include_rate
    },
                                             input_scales={
                                                 'h1':
                                                 1.0,
                                                 'y':
                                                 np.float32(1.0 / include_rate)
                                             })
    #随机梯度下降法
    algorithm = p2_alg_sgd.SGD(
        batch_size=b_size,
        learning_rate=l_rate,
        learning_rule=p2_alg_lr.Momentum(momentum),
        termination_criterion=p2_termcri.EpochCounter(max_epochs=1000),
        cost=dropout_cost)
    #训练 根据前面的定义 :dataset为一个密集型矩阵,model为MLP多层神经网络,algorithm为SGD
    train = pylearn2.train.Train(dataset=data_tr,
                                 model=model,
                                 algorithm=algorithm)
    train.setup()

    x = T.matrix()  #定义为一个二维数组
    #fprop(state_below) does the forward prop transformation
    y = model.fprop(x)
    f = theano.function([x], y)  #定义一个function函数,输入为x,输出为y

    MAE_va_old = 10.0  #平均绝对误差
    MAE_va_best = 10.0
    MAE_tr_old = 10.0  #训练误差
    MAE_te_old = 10.0
    MAE_1000G_old = 10.0
    MAE_1000G_best = 10.0
    MAE_GTEx_old = 10.0
    #base_name = sys.argv[1]      # 获取第一个参数   sys.argv[ ]记录(获取)命令行参数
    outlog = open(base_name + '.log', 'w')
    log_str = '\t'.join(
        map(str, [
            'epoch', 'MAE_va', 'MAE_va_change', 'MAE_te', 'MAE_te_change',
            'MAE_tr', 'MAE_tr_change', 'learing_rate', 'time(sec)'
        ]))
    print log_str  #输出运行日志
    outlog.write(log_str + '\n')
    #Python的标准输出缓冲(这意味着它收集“写入”标准出来之前,将其写入到终端的数据)。调用sys.stdout.flush()强制其“缓冲
    sys.stdout.flush()

    for epoch in range(0, n_epoch):
        t_old = time.time()
        train.algorithm.train(train.dataset)

        Y_va_hat = f(X_va.astype('float32')).astype('float64')
        Y_te_hat = f(X_te.astype('float32')).astype('float64')
        Y_tr_hat_monitor = f(X_tr_monitor.astype('float32')).astype('float64')

        #计算平均绝对误差
        MAE_va = np.abs(Y_va_target - Y_va_hat).mean()
        MAE_te = np.abs(Y_te_target - Y_te_hat).mean()
        MAE_tr = np.abs(Y_tr_monitor_target - Y_tr_hat_monitor).mean()

        #误差变换率
        MAE_va_change = (MAE_va - MAE_va_old) / MAE_va_old
        MAE_te_change = (MAE_te - MAE_te_old) / MAE_te_old
        MAE_tr_change = (MAE_tr - MAE_tr_old) / MAE_tr_old

        #将old误差值更新为当前误差值
        MAE_va_old = MAE_va
        MAE_te_old = MAE_te
        MAE_tr_old = MAE_tr

        #返回当前的时间戳(1970纪元后经过的浮点秒数)
        t_new = time.time()
        l_rate = train.algorithm.learning_rate.get_value()
        log_str = '\t'.join(
            map(str, [
                epoch + 1,
                '%.6f' % MAE_va,
                '%.6f' % MAE_va_change,
                '%.6f' % MAE_te,
                '%.6f' % MAE_te_change,
                '%.6f' % MAE_tr,
                '%.6f' % MAE_tr_change,
                '%.5f' % l_rate,
                int(t_new - t_old)
            ]))
        print log_str
        outlog.write(log_str + '\n')
        sys.stdout.flush()

        if MAE_tr_change > 0:  #训练误差变换率大于0时,学习速率乘上一个衰减因子
            l_rate = l_rate * decay_factor
        if l_rate < l_rate_min:  #学习速率小于最小速率时,更新为最小速率
            l_rate = l_rate_min

        train.algorithm.learning_rate.set_value(np.float32(l_rate))

        if MAE_va < MAE_va_best:
            MAE_va_best = MAE_va
            outmodel = open(base_name + '_bestva_model.pkl', 'wb')
            pkl.dump(model, outmodel)
            outmodel.close()
            np.save(base_name + '_bestva_Y_te_hat.npy', Y_te_hat)
            np.save(base_name + '_bestva_Y_va_hat.npy', Y_va_hat)

    print 'MAE_va_best : %.6f' % (MAE_va_best)
    outlog.write('MAE_va_best : %.6f' % (MAE_va_best) + '\n')
    outlog.close()
Esempio n. 6
0
def main():
    base_name = sys.argv[1]
    n_epoch = int(sys.argv[2])
    n_hidden = int(sys.argv[3])
    include_rate = float(sys.argv[4])

    in_size = 943
    out_size = 4760
    b_size = 200
    l_rate = 3e-4
    l_rate_min = 1e-5
    decay_factor = 0.9
    lr_scale = 3.0
    momentum = 0.5
    init_vals = np.sqrt(6.0/(np.array([in_size, n_hidden, n_hidden, n_hidden])+np.array([n_hidden, n_hidden, n_hidden, out_size])))
    
    print 'loading data...'
    
    X_tr = np.load('bgedv2_X_tr_float64.npy')
    Y_tr = np.load('bgedv2_Y_tr_4760-9520_float64.npy')
    Y_tr_target = np.array(Y_tr)
    X_va = np.load('bgedv2_X_va_float64.npy')
    Y_va = np.load('bgedv2_Y_va_4760-9520_float64.npy')
    Y_va_target = np.array(Y_va)
    X_te = np.load('bgedv2_X_te_float64.npy')
    Y_te = np.load('bgedv2_Y_te_4760-9520_float64.npy')
    Y_te_target = np.array(Y_te)

    X_1000G = np.load('1000G_X_float64.npy')
    Y_1000G = np.load('1000G_Y_4760-9520_float64.npy')
    Y_1000G_target = np.array(Y_1000G)
    X_GTEx = np.load('GTEx_X_float64.npy')
    Y_GTEx = np.load('GTEx_Y_4760-9520_float64.npy')
    Y_GTEx_target = np.array(Y_GTEx)

    
    random.seed(0)
    monitor_idx_tr = random.sample(range(88807), 5000)
    
    data_tr = p2_dt_dd.DenseDesignMatrix(X=X_tr.astype('float32'), y=Y_tr.astype('float32'))
    X_tr_monitor, Y_tr_monitor_target = X_tr[monitor_idx_tr, :], Y_tr_target[monitor_idx_tr, :]
    h1_layer = p2_md_mlp.Tanh(layer_name='h1', dim=n_hidden, irange=init_vals[0], W_lr_scale=1.0, b_lr_scale=1.0)
    h2_layer = p2_md_mlp.Tanh(layer_name='h2', dim=n_hidden, irange=init_vals[1], W_lr_scale=lr_scale, b_lr_scale=1.0)
    h3_layer = p2_md_mlp.Tanh(layer_name='h3', dim=n_hidden, irange=init_vals[2], W_lr_scale=lr_scale, b_lr_scale=1.0)
    o_layer = p2_md_mlp.Linear(layer_name='y', dim=out_size, irange=0.0001, W_lr_scale=lr_scale, b_lr_scale=1.0)
    model = p2_md_mlp.MLP(nvis=in_size, layers=[h1_layer, h2_layer, h3_layer, o_layer], seed=1)
    dropout_cost = p2_ct_mlp_dropout.Dropout(input_include_probs={'h1':1.0, 'h2':include_rate, 'h3':include_rate,
                                                                   'y':include_rate}, 
                                             input_scales={'h1':1.0, 'h2':np.float32(1.0/include_rate),
                                                           'h3':np.float32(1.0/include_rate),
                                                           'y':np.float32(1.0/include_rate)})
    
    algorithm = p2_alg_sgd.SGD(batch_size=b_size, learning_rate=l_rate, 
                               learning_rule = p2_alg_lr.Momentum(momentum),
                               termination_criterion=p2_termcri.EpochCounter(max_epochs=1000),
                               cost=dropout_cost)

    train = pylearn2.train.Train(dataset=data_tr, model=model, algorithm=algorithm)
    train.setup()

    x = T.matrix()
    y = model.fprop(x)
    f = theano.function([x], y)

    MAE_va_old = 10.0
    MAE_va_best = 10.0
    MAE_tr_old = 10.0
    MAE_te_old = 10.0
    MAE_1000G_old = 10.0
    MAE_1000G_best = 10.0
    MAE_GTEx_old = 10.0

    outlog = open(base_name + '.log', 'w')
    log_str = '\t'.join(map(str, ['epoch', 'MAE_va', 'MAE_va_change', 'MAE_te', 'MAE_te_change', 
                              'MAE_1000G', 'MAE_1000G_change', 'MAE_GTEx', 'MAE_GTEx_change',
                              'MAE_tr', 'MAE_tr_change', 'learing_rate', 'time(sec)']))
    print log_str
    outlog.write(log_str + '\n')
    sys.stdout.flush()

    for epoch in range(0, n_epoch):
        t_old = time.time()
        train.algorithm.train(train.dataset)
        
        Y_va_hat = f(X_va.astype('float32')).astype('float64')
        Y_te_hat = f(X_te.astype('float32')).astype('float64')
        Y_tr_hat_monitor = f(X_tr_monitor.astype('float32')).astype('float64')
        Y_1000G_hat = f(X_1000G.astype('float32')).astype('float64')
        Y_GTEx_hat = f(X_GTEx.astype('float32')).astype('float64')

        MAE_va = np.abs(Y_va_target - Y_va_hat).mean()
        MAE_te = np.abs(Y_te_target - Y_te_hat).mean()
        MAE_tr = np.abs(Y_tr_monitor_target - Y_tr_hat_monitor).mean()
        MAE_1000G = np.abs(Y_1000G_target - Y_1000G_hat).mean()
        MAE_GTEx = np.abs(Y_GTEx_target - Y_GTEx_hat).mean()
        
        MAE_va_change = (MAE_va - MAE_va_old)/MAE_va_old
        MAE_te_change = (MAE_te - MAE_te_old)/MAE_te_old
        MAE_tr_change = (MAE_tr - MAE_tr_old)/MAE_tr_old
        MAE_1000G_change = (MAE_1000G - MAE_1000G_old)/MAE_1000G_old
        MAE_GTEx_change = (MAE_GTEx - MAE_GTEx_old)/MAE_GTEx_old

        
        MAE_va_old = MAE_va
        MAE_te_old = MAE_te
        MAE_tr_old = MAE_tr
        MAE_1000G_old = MAE_1000G
        MAE_GTEx_old = MAE_GTEx

        
        t_new = time.time()
        l_rate = train.algorithm.learning_rate.get_value()
        log_str = '\t'.join(map(str, [epoch+1, '%.6f'%MAE_va, '%.6f'%MAE_va_change, '%.6f'%MAE_te, '%.6f'%MAE_te_change,
                                  '%.6f'%MAE_1000G, '%.6f'%MAE_1000G_change, '%.6f'%MAE_GTEx, '%.6f'%MAE_GTEx_change,
                                  '%.6f'%MAE_tr, '%.6f'%MAE_tr_change, '%.5f'%l_rate, int(t_new-t_old)]))
        print log_str
        outlog.write(log_str + '\n')
        sys.stdout.flush()
        
        if MAE_tr_change > 0:
            l_rate = l_rate*decay_factor
        if l_rate < l_rate_min:
            l_rate = l_rate_min

        train.algorithm.learning_rate.set_value(np.float32(l_rate))

        if MAE_va < MAE_va_best:
            MAE_va_best = MAE_va
            outmodel = open(base_name + '_bestva_model.pkl', 'wb')
            pkl.dump(model, outmodel)
            outmodel.close()    
            np.save(base_name + '_bestva_Y_te_hat.npy', Y_te_hat)
            np.save(base_name + '_bestva_Y_va_hat.npy', Y_va_hat)
        
        if MAE_1000G < MAE_1000G_best:
            MAE_1000G_best = MAE_1000G
            outmodel = open(base_name + '_best1000G_model.pkl', 'wb')
            pkl.dump(model, outmodel)
            outmodel.close()    
            np.save(base_name + '_best1000G_Y_1000G_hat.npy', Y_1000G_hat)
            np.save(base_name + '_best1000G_Y_GTEx_hat.npy', Y_GTEx_hat)

    print 'MAE_va_best : %.6f' % (MAE_va_best)
    print 'MAE_1000G_best : %.6f' % (MAE_1000G_best)
    outlog.write('MAE_va_best : %.6f' % (MAE_va_best) + '\n')
    outlog.write('MAE_1000G_best : %.6f' % (MAE_1000G_best) + '\n')
    outlog.close()
Esempio n. 7
0
    Y_te_target = np.array(Y_te)

    

    random.seed(0)   #设置生成随机数用的整数起始值。调用任何其他random模块函数之前调用这个函数
    monitor_idx_tr = random.sample(range(88807), 5000)   #监测训练
    #将训练数据集类型设为32位浮点型,The DenseDesignMatrix class and related code Functionality for representing data that can be described as a dense matrix (rather than a sparse matrix) with each row containing an example and each column corresponding to a different feature.
    data_tr = p2_dt_dd.DenseDesignMatrix(X=X_tr.astype('float32'), y=Y_tr.astype('float32'))
    X_tr_monitor, Y_tr_monitor_target = X_tr[monitor_idx_tr, :], Y_tr_target[monitor_idx_tr, :]
    #一个隐层,用Tanh()作激活函数; 输出层用线性函数作激活函数
    h1_layer = p2_md_mlp.Tanh(layer_name='h1', dim=n_hidden, irange=init_vals[0], W_lr_scale=1.0, b_lr_scale=1.0) 
    o_layer = p2_md_mlp.Linear(layer_name='y', dim=out_size, irange=0.0001, W_lr_scale=lr_scale, b_lr_scale=1.0)
    #Multilayer Perceptron;nvis(Number of “visible units” input units)  layers(a list of layer objects,最后1层指定MLP的输出空间) 
    model = p2_md_mlp.MLP(nvis=in_size, layers=[h1_layer, o_layer], seed=1)
    dropout_cost = p2_ct_mlp_dropout.Dropout(input_include_probs={'h1':1.0, 'y':include_rate}, 
                                             input_scales={'h1':1.0, 
                                                           'y':np.float32(1.0/include_rate)})
    #随机梯度下降法
    algorithm = p2_alg_sgd.SGD(batch_size=b_size, learning_rate=l_rate, 
                               learning_rule = p2_alg_lr.Momentum(momentum),
                               termination_criterion=p2_termcri.EpochCounter(max_epochs=1000),
                               cost=dropout_cost)
    #训练 根据前面的定义 :dataset为一个密集型矩阵,model为MLP多层神经网络,algorithm为SGD
    train = pylearn2.train.Train(dataset=data_tr, model=model, algorithm=algorithm)
    train.setup()

    x = T.matrix()             #定义为一个二维数组
    #fprop(state_below) does the forward prop transformation
    y = model.fprop(x)  
    f = theano.function([x], y)  #定义一个function函数,输入为x,输出为y