コード例 #1
0
ファイル: Eval.py プロジェクト: datax-lab/MiNet
def eval_omics_net(x_tr, age_tr, y_tr, delta_tr, \
            x_va, age_va, y_va, delta_va, \
            x_te, age_te, y_te, delta_te, \
            gene_indices, pathway_indices, \
            in_nodes, gene_nodes, pathway_nodes, hidden_nodes, \
            LR, L2, max_epochs, dropout_rate, step = 100, tolerance = 0.02, sparse_coding = False):

    net = omics_net(in_nodes, gene_nodes, pathway_nodes, hidden_nodes)
    ###if gpu is being used
    if torch.cuda.is_available():
        net = net.cuda()
    ###optimizer
    opt = optim.Adam(net.parameters(), lr=LR, weight_decay = L2)

    prev_sum = 0.0
    for epoch in range(max_epochs):
        net.train()
        ###reset gradients to zeros
        opt.zero_grad() 
        ###Randomize dropout masks
        net.do_m1 = dropout_mask(pathway_nodes, dropout_rate[0])
        net.do_m2 = dropout_mask(hidden_nodes[0], dropout_rate[1])
        ###Forward
        pred = net(x_tr, age_tr, gene_indices, pathway_indices, dropout_rate)
        ###calculate loss
        loss = neg_par_log_likelihood(pred, y_tr, delta_tr)
        ###calculate gradients
        loss.backward() 
        ###force the connections between omics layer and gene layer w.r.t. 'gene_mask'
        net.omics.weight.grad = fixed_s_mask(net.omics.weight.grad, gene_indices)
        ###force the connections between gene layer and pathway layer w.r.t. 'pathway_mask'
        net.gene.weight.grad = fixed_s_mask(net.gene.weight.grad, pathway_indices)
        ###update weights and biases
        opt.step()
        if sparse_coding == True:
            net = sparse_func(net, x_tr, age_tr, y_tr, delta_tr, gene_indices, pathway_indices, dropout_rate)
        if epoch % step == step - 1:
            net.train()
            pred = net(x_tr, age_tr, gene_indices, pathway_indices, dropout_rate)
            train_cindex = c_index(pred.cpu(), y_tr.cpu(), delta_tr.cpu())
            net.eval()
            pred = net(x_va, age_va, gene_indices, pathway_indices, dropout_rate)
            eval_cindex = c_index(pred.cpu(), y_va.cpu(), delta_va.cpu())
            if ((eval_cindex.item() + train_cindex.item() + tolerance) < prev_sum): 
                print('Early stopping in [%d]' % (epoch + 1))
                print('[%d] Best CIndex in Train: %.3f' % (epoch + 1, opt_cidx_tr))
                print('[%d] Best CIndex in Valid: %.3f' % (epoch + 1, opt_cidx_va))
                opt_net.eval()
                pred = opt_net(x_te, age_te, gene_indices, pathway_indices, dropout_rate)
                eval_cindex = c_index(pred.cpu(), y_te.cpu(), delta_te.cpu())
                break
            else:
                prev_sum = eval_cindex.item() + train_cindex.item()
                opt_cidx_tr = train_cindex
                opt_cidx_va = eval_cindex
                opt_net = copy.deepcopy(net)
                print('[%d] CIndex in Train: %.3f' % (epoch + 1, train_cindex))
                print('[%d] CIndex in Valid: %.3f' % (epoch + 1, eval_cindex))

    return (opt_cidx_tr, opt_cidx_va, eval_cindex)
コード例 #2
0
ファイル: Net.py プロジェクト: huangpu1/PAGE-Net
    def forward(self, x_1, x_2, x_3, pathway_idx, Drop_Rate):
        ###force the connections between gene layer and pathway layer w.r.t. 'pathway_mask'
        self.gene.weight.data = fixed_s_mask(self.gene.weight.data, pathway_idx)

        x_1 = self.tanh(self.bn1(self.gene(x_1)))

        if self.training == True: 
            x_1 = (1/(1-Drop_Rate[0])) * x_1.mul(self.do_m1)

        x_1 = self.tanh(self.bn2(self.pathway(x_1)))

        if self.training == True: 
            x_1 = (1 / (1 - Drop_Rate[1])) * x_1.mul(self.do_m2)

        x_1 = self.tanh(self.bn3(self.hidden(x_1)))

        if self.training == True: 
            x_3 = (1 / (1 - Drop_Rate[2])) * x_3.mul(self.do_m4)
        
        x_3 = self.tanh(self.bn4(self.image(x_3)))

        ###integration
        x_cat = torch.cat((x_1, x_2, x_3), 1)
        lin_pred = self.integrative(x_cat)
        
        return lin_pred
コード例 #3
0
ファイル: Net.py プロジェクト: datax-lab/MiNet
 def forward(self, x_1, x_2, gene_idx, pathway_idx, Drop_Rate):
     ###force the connections between omics layer and gene layer w.r.t. 'gene_mask'
     self.omics.weight.data = fixed_s_mask(self.omics.weight.data, gene_idx)
     ###force the connections between gene layer and pathway layer w.r.t. 'pathway_mask'
     self.gene.weight.data = fixed_s_mask(self.gene.weight.data, pathway_idx)
     # bath norm beofre activation
     x_1 = self.relu(self.bn1(self.omics(x_1)))
     x_1 = self.relu(self.bn2(self.gene(x_1)))
     if self.training == True: 
         # inverted dropout
         x_1 = (1/(1-Drop_Rate[0])) * x_1.mul(self.do_m1)
     x_1 = self.relu(self.bn3(self.pathway(x_1)))
     if self.training == True: 
         x_1 = (1 / (1 - Drop_Rate[1])) * x_1.mul(self.do_m2)
     x_1 = self.relu(self.bn4(self.hidden(x_1)))
     ###add age 
     x_cat = torch.cat((x_1, x_2), 1)
     lin_pred = self.hidden2(x_cat)
     return lin_pred
コード例 #4
0
ファイル: Train.py プロジェクト: zhengjing123/PAGE-Net
def train_model(x_tr, age_tr, pt_tr, y_tr, delta_tr, \
            x_va, age_va, pt_va, y_va, delta_va, \
            x_te, age_te, pt_te, y_te, delta_te, \
            pathway_indices, \
            gene_nodes, pathway_nodes, image_nodes, hidden_nodes, \
            lr, l2, max_epochs, dropout_rate, step = 100, tolerance = 0.02, sparse_coding = False, test_phrase = False):

    net = cox_pasnet_pathology(gene_nodes, pathway_nodes, image_nodes,
                               hidden_nodes)
    net = net.cuda()
    ###optimizer
    opt = optim.Adam(net.parameters(), lr=lr, weight_decay=l2)
    prev_sum = 0.0
    temp_loss_list = []

    for epoch in range(max_epochs):
        torch.cuda.empty_cache()
        net.train()
        opt.zero_grad()  ###reset gradients to zeros
        ###Randomize dropout masks
        net.do_m1 = dropout_mask(pathway_nodes, dropout_rate[0])
        net.do_m2 = dropout_mask(hidden_nodes[0], dropout_rate[1])
        net.do_m4 = dropout_mask(image_nodes, dropout_rate[2])
        pred = net(x_tr, age_tr, pt_tr, pathway_indices,
                   dropout_rate)  ###Forward
        loss = neg_par_log_likelihood(pred, y_tr, delta_tr)  ###calculate loss
        loss.backward()  ###calculate gradients
        ###force the connections between gene layer and pathway layer w.r.t. 'pathway_mask'
        net.gene.weight.grad = fixed_s_mask(net.gene.weight.grad,
                                            pathway_indices)
        opt.step()  ###update weights and biases
        if sparse_coding == True:
            net = sparse_func(net, x_tr, age_tr, pt_tr, y_tr, delta_tr,
                              pathway_indices, dropout_rate)
        torch.cuda.empty_cache()
        net.eval()
        valid_pred = net(x_va, age_va, pt_va, pathway_indices, dropout_rate)
        valid_loss = neg_par_log_likelihood(valid_pred, y_va,
                                            delta_va).detach().cpu()
        temp_loss_list.append(valid_loss.detach().cpu())
        if (epoch % step == step - 1) and (epoch + 1 >= 500):
            torch.cuda.empty_cache()
            print("Current LR: ", lr)
            opt_temp_loss = np.min(temp_loss_list)
            gl = valid_loss / opt_temp_loss - 1.0
            net.train()
            pred = net(x_tr, age_tr, pt_tr, pathway_indices, dropout_rate)
            train_cindex = c_index(pred.cpu(), y_tr.cpu(), delta_tr.cpu())
            valid_cindex = c_index(valid_pred.cpu(), y_va.cpu(),
                                   delta_va.cpu())
            del pred
            if (gl > tolerance):
                if epoch + 1 == 500:
                    opt_cidx_tr = train_cindex
                    opt_cidx_ev = valid_cindex
                    opt_net = copy.deepcopy(net)
                print('Early stopping in [%d]' % (epoch + 1))
                print('[%d] GL: %.4f' % (epoch + 1, gl))
                print('[%d] Best CIndex in Train: %.3f' %
                      (epoch + 1, opt_cidx_tr))
                print('[%d] Best CIndex in Valid: %.3f' %
                      (epoch + 1, opt_cidx_ev))
                if (test_phrase == True):
                    opt_net.eval()
                    test_pred = opt_net(x_te, age_te, pt_te, pathway_indices,
                                        dropout_rate)
                    opt_cidx_ev = c_index(test_pred.cpu(), y_te.cpu(),
                                          delta_te.cpu())
                    print('[%d] Final CIndex in Test: %.3f' %
                          (epoch + 1, opt_cidx_ev))
                break
            else:
                opt_cidx_tr = train_cindex
                opt_cidx_ev = valid_cindex
                opt_net = copy.deepcopy(net)
                print('[%d] GL: %.4f' % (epoch + 1, gl))
                print('[%d] CIndex in Train: %.3f' % (epoch + 1, train_cindex))
                print('[%d] CIndex in Valid: %.3f' % (epoch + 1, valid_cindex))
                torch.cuda.empty_cache()
                lr = lr * 0.8
                if (test_phrase == True):
                    del opt_cidx_ev
                    opt_net.eval()
                    test_pred = opt_net(x_te, age_te, pt_te, pathway_indices,
                                        dropout_rate)
                    opt_cidx_ev = c_index(test_pred.cpu(), y_te.cpu(),
                                          delta_te.cpu())
                    print('[%d] Final CIndex in Test: %.3f' %
                          (epoch + 1, opt_cidx_ev))

    return (opt_cidx_tr, opt_cidx_ev)
コード例 #5
0
ファイル: Train.py プロジェクト: zhengjing123/PAGE-Net
def interpret_net(outpath, x, age, pt, y, delta, pathway_indices, \
                    gene_nodes, pathway_nodes, image_nodes, hidden_nodes, \
                    lr, l2, max_epochs, dropout_rate, step = 100, tolerance = 0.05, sparse_coding = False):

    net = cox_pasnet_pathology(gene_nodes, pathway_nodes, image_nodes,
                               hidden_nodes)
    net = net.cuda()
    ###optimizer
    opt = optim.Adam(net.parameters(), lr=lr, weight_decay=l2)
    prev_sum = 0.0
    temp_loss_list = []
    cidx_tr = []
    loss_tr = []
    last_loss = np.inf
    sp_pathway = []
    sp_hidden = []
    for epoch in range(max_epochs):
        torch.cuda.empty_cache()
        net.train()
        opt.zero_grad()  ###reset gradients to zeros
        ###Randomize dropout masks
        net.do_m1 = dropout_mask(pathway_nodes, dropout_rate[0])
        net.do_m2 = dropout_mask(hidden_nodes[0], dropout_rate[1])
        net.do_m4 = dropout_mask(image_nodes, dropout_rate[2])
        pred = net(x, age, pt, pathway_indices, dropout_rate)  ###Forward
        loss = neg_par_log_likelihood(pred, y, delta)  ###calculate loss
        loss.backward()  ###calculate gradients
        ###force the connections between gene layer and pathway layer w.r.t. 'pathway_mask'
        net.gene.weight.grad = fixed_s_mask(net.gene.weight.grad,
                                            pathway_indices)
        opt.step()  ###update weights and biases
        if sparse_coding == True:
            net = sparse_func(net, x, age, pt, y, delta, pathway_indices,
                              dropout_rate)
        torch.cuda.empty_cache()
        if epoch % step == step - 1:
            net.eval()
            pred = net(x, age, pt, pathway_indices, dropout_rate)
            train_loss = neg_par_log_likelihood(pred, y, delta).detach().cpu()
            train_cindex = c_index(pred.cpu(), y.cpu(), delta.cpu())
            if train_loss.item() - tolerance > last_loss:
                break
            else:
                last_loss = train_loss.item()
                opt_net = copy.deepcopy(net)
                loss_tr.append(train_loss.item())
                cidx_tr.append(train_cindex.item())
                sp_pathway.append(net.pathway.weight.nonzero().size(0))
                sp_hidden.append(net.hidden.weight.nonzero().size(0))
                print('[%d] Loss in Train: %.4f' % (epoch + 1, train_loss))
                print('[%d] CIndex in Train: %.3f' % (epoch + 1, train_cindex))
                print('Connections between pathway and hidden layer: ',
                      net.pathway.weight.nonzero().size(0))
                print('Connections between hidden and last hidden layer: ',
                      net.hidden.weight.nonzero().size(0))

    print(step)
    print(epoch)
    x_axis = np.arange(0, epoch + 1, step)
    fig = plt.figure()
    print(len(loss_tr))
    print(len(x_axis))
    plt.plot(x_axis, loss_tr)
    fig.savefig("cindex_" + str(lr) + "_" + str(l2) + ".png")
    np.savetxt("sparsity_pathway.txt", sp_pathway, delimiter=',')
    np.savetxt("sparsity_hidden.txt", sp_hidden, delimiter=',')
    np.savetxt("cindex_entire_data.txt", cidx_tr, delimiter=',')
    torch.save(opt_net.state_dict(), outpath)

    return