Пример #1
0
def main(_):
	sess = tf.compat.v1.Session()
	model = MyModel(sess,model_configs)
	

	if args.mode == "train":
		x_train, y_train, _,_ = load_data(args.data_dir)

		model.train(x_train, y_train,200)


	elif args.mode == "test":
		# Testing on public testing dataset
		_, _, x_test, y_test = load_data(args.data_dir)
		model.evaluate(x_test, y_test)

	elif args.mode == "predict":
		# Predicting and storing results on private testing dataset
		x_test = load_testing_images(args.data_dir)
		predictions = model.predict_prob(x_test)
		np.save("../predictions.npy", predictions)
Пример #2
0
parser = argparse.ArgumentParser()
parser.add_argument("--mode", help="train, test or predict")
parser.add_argument("--data_dir", help="path to the data")
parser.add_argument("--test_file", help="path to the test file")
parser.add_argument("--save_dir", help="path to save the results")
args = parser.parse_args()

if __name__ == '__main__':
    model = MyModel(model_configs, training_configs)

    if args.mode == 'train':
        x_train, y_train, x_test, y_test = load_data(args.data_dir)
        x_train, y_train, x_valid, y_valid = train_valid_split(
            x_train, y_train)

        model.train(x_train, y_train, x_valid, y_valid)
        model.save_weights(
            os.path.join(args.save_dir, model_configs["version"], ""))
        model.evaluate(x_test, y_test)

    elif args.mode == 'test':
        # Testing on public testing dataset
        model.load_weights(
            os.path.join(args.save_dir, model_configs["version"], ""))
        _, _, x_test, y_test = load_data(args.data_dir)
        model.evaluate(x_test, y_test)

    elif args.mode == 'predict':
        # Predicting and storing results on private testing dataset
        model.load_weights(
            os.path.join(args.save_dir, model_configs["version"], ""))
Пример #3
0
device = 'cuda' if torch.cuda.is_available() else 'cpu'

if __name__ == '__main__':
	model = MyModel(model_configs)
	if args.mode == 'train':
		print('----- training mode ----')
		train,test,orig_trainset = load_data(args.data_dir,train_aug=training_configs['train_augmentation']) # augment the train data with config
		
		train,valid = train_valid_split(train,orig_trainset,train_ratio=1) 
		if args.resume_checkpoint is not None:
			checkpoint = torch.load('../saved_models/' + args.resume_checkpoint)
			epoch,accuracy_type,prev_accuracy =  (checkpoint[k] for k in ['epoch','accuracy_type','accuracy'])
			print('RESUME---> Loading model from Epoch %d with %s Accuracy %f' %(epoch,accuracy_type,prev_accuracy))
		else:
			checkpoint = None
		model.train(train, training_configs,valid=None,test=test,checkpoint=checkpoint) # note test data is used only to evaluate model performance during training
		model.evaluate(test)

	elif args.mode == 'test':
		# Testing on public testing dataset
		_, test, _ = load_data(args.data_dir,None)
		if args.checkpoint is not None:
			checkpoint = torch.load('../saved_models/' + args.checkpoint)
			print('Loading Model--->')
		else:
			raise('No Checkpoint file specified! Specify one with --checkpoint')
		
		model.network.load_state_dict(checkpoint['net'])
		test_accuracy, correct, total = model.evaluate(test)
		print("[%s%s test results] Model Accuracy %f, Total Correctt %d, Total Test Samples %d" %(args.checkpoint,utils.get_time(),test_accuracy,correct,total))
Пример #4
0
test_dataloader = DataLoader(LegalDataset(train=False),
                             batch_size=config.batch_size,
                             shuffle=False,
                             num_workers=2)
learning_rate = 0.001


def to_cuda(param_list):
    return [x.cuda() for x in param_list]


model = MyModel().cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for i in range(config.num_epoch):
    total_loss = 0.
    model.train()
    for batch_data in tqdm.tqdm(train_dataloader, desc='Epoch %3d' % (i + 1)):
        curr_fact, term, law, accu = to_cuda(batch_data)
        optimizer.zero_grad()
        # print('term', term)
        # print('law', law)
        # print('accu', accu)
        pred1, pred2, pred3 = model(curr_fact)
        accu_loss = loss_function(pred1, accu)
        law_loss = loss_function(pred2, law)
        term_loss = loss_function(pred3, term)
        loss = accu_loss + law_loss + term_loss
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    truth1, truth2, truth3 = [], [], []
Пример #5
0
def main(miRNA_Disease_Association, disease_feature, disease_graph1,
         disease_graph2, disease_graph3, miRNA_feature, miRNA_graph1,
         miRNA_graph2, miRNA_graph3):

    adjProcess = adjTrainTestSplit(miRNA_Disease_Association)
    graph_train_kfold, graph_test_kfold = adjProcess.split_graph(KFold, SEED)

    auc_kfold = []
    aupr_kfold = []
    mean_tpr = 0.0  # 用来记录画平均ROC曲线的信息
    mean_fpr = np.linspace(0, 1, 100)

    for i in range(KFold):
        print("Using {} th fold dataset.".format(i + 1))
        graph_train = graph_train_kfold[i]
        graph_test = graph_test_kfold[i]

        # m = graph_train.shape[0]
        # n = graph_train.shape[1]
        # eval_coord = [(i, j) for i in range(m) for j in range(n)]
        # train_edge_x, train_edge_y = graph_train.nonzero()
        # one_index = list(zip(train_edge_x, train_edge_y))
        # zero_index = set(eval_coord) - set(set(zip(train_edge_x, train_edge_y)))
        # zero_index = list(zero_index)

        adj_traget = torch.FloatTensor(graph_train)
        model = MyModel(disease_feature, disease_graph1, disease_graph2,
                        disease_graph3, miRNA_feature, miRNA_graph1,
                        miRNA_graph2, miRNA_graph3)
        model.cuda()
        obj = Myloss(adj_traget.cuda())
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=LR,
                                     amsgrad=True,
                                     weight_decay=GAMA)
        evaluator = Evaluator(graph_train, graph_test)
        #obj_test = Myloss(torch.FloatTensor(graph_test).cuda())

        for j in range(EPOCH):
            model.train()
            optimizer.zero_grad()
            Y_hat, m_x0, m_x1, m_x2, m_x3, d_x0, d_x1, d_x2, d_x3 = model()
            loss = obj.cal_loss(Y_hat, m_x0.cuda(), m_x1.cuda(), m_x2.cuda(),
                                m_x3.cuda(), d_x0.cuda(), d_x1.cuda(),
                                d_x2.cuda(), d_x3.cuda(), ALPHA, BETA, GAMA)
            loss = loss.cuda()
            # loss = obj.cal_loss(Y_hat,one_index,zero_index)
            #loss = obj.cal_loss(Y_hat,one_index,zero_index,m_x0,m_x1,m_x2,m_x3,d_x0, d_x1, d_x2,d_x3)
            loss.backward()
            optimizer.step()

            need_early_stop_check = j > TOLERANCE_EPOCH and abs(
                (loss.item() - last_loss) / last_loss) < STOP_THRESHOLD
            if (j % EVAL_INTER
                    == 0) or need_early_stop_check or j + 1 >= EPOCH:
                t = time.time()
                model.eval()
                with torch.no_grad():
                    Y_hat, m_x0, m_x1, m_x2, m_x3, d_x0, d_x1, d_x2, d_x3 = model(
                    )
                    #test_loss = obj_test.cal_loss(Y_hat, m_x0, m_x1, m_x2, m_x3, d_x0, d_x1, d_x2, d_x3,ALPHA,BETA)
                    # Y_hat = torch.sigmoid(Y_hat)
                    #  eval_coord = [(i, j) for i in range(m) for j in range(n)]
                    #  test_edge_x, test_edge_y = graph_test.nonzero()
                    #  test_one_index = list(zip(test_edge_x, test_edge_y))
                    #  #test_zero_index = set(eval_coord) - set(set(zip(test_edge_x, test_edge_y)))
                    #  test_zero_index = list(test_zero_index)
                    #  #test_loss = obj_test.cal_loss(Y_hat, test_one_index, test_zero_index)
                    auc_test, aupr_test, fpr, tpr = evaluator.eval(Y_hat.cpu())

                    print("Epoch:", '%04d' % (j + 1), "train_loss=",
                          "{:0>9.5f}".format(loss.item()), "test_auc=",
                          "{:.5f}".format(auc_test), "test_aupr=",
                          "{:.5f}".format(aupr_test), "time=",
                          "{:.2f}".format(time.time() - t))
                if need_early_stop_check or j + 1 >= EPOCH:
                    auc_kfold.append(auc_test)
                    aupr_kfold.append(aupr_test)
                    mean_tpr += np.interp(mean_fpr, fpr, tpr)
                    mean_tpr[0] = 0.0
                    if need_early_stop_check:
                        print("Early stopping...")
                    else:
                        print("Arrived at the last Epoch...")
                    break

            last_loss = loss.item()
            torch.cuda.empty_cache()

    print("\nOptimization Finished!")
    mean_tpr /= KFold
    mean_tpr[-1] = 1.0
    np.save("../Data/Result/mean_tpr.npy", mean_tpr)
    mean_auc = sum(auc_kfold) / len(auc_kfold)
    mean_aupr = sum(aupr_kfold) / len(aupr_kfold)
    print("mean_auc:{0:.3f},mean_aupr:{1:.3f}".format(mean_auc, mean_aupr))
Пример #6
0
parser = argparse.ArgumentParser()
parser.add_argument("mode", help="train, test or predict")
parser.add_argument("data_dir", help="path to the data")
parser.add_argument("--result_dir", help="path to save the results")
args = parser.parse_args()

if __name__ == '__main__':
    print(args.mode, args.data_dir)
    model = MyModel(model_configs)
    # model.load()
    if args.mode == 'train':
        x_train, y_train, x_test, y_test = load_data(args.data_dir)
        x_train, y_train, x_valid, y_valid = train_valid_split(
            x_train, y_train)

        train_stats = model.train(x_train, y_train, training_configs, x_valid,
                                  y_valid)
        w = csv.writer(
            open(
                os.path.join(model_configs["save_dir"], model_configs['name'])
                + ".csv", "w"))
        for key, val in train_stats.items():
            w.writerow([key, val])
        score, loss = model.evaluate(x_test, y_test)
        print("The test score is: {:.3f}% ({:.4f})".format(score * 100, loss))

    elif args.mode == 'test':
        model.load()
        # Testing on public testing dataset
        _, _, x_test, y_test = load_data(args.data_dir)
        score, loss = model.evaluate(x_test, y_test)
        print("The test score is: {:.3f}% ({:.4f})".format(score * 100, loss))