Пример #1
0
def main():
    args = parse_args()
    OpenKSModel.list_modules()
    model: ExpertRecModel = OpenKSModel.get_module("PyTorch", "HGTExpertRec")(
        "openks/data/nsf_dblp_kg/nsfkg/", args)
    model.preprocess_data()
    model.load_data_and_model()
    logger.info('Training HGT with #param: %d' % model.get_n_params())
    model.train_expert()
    model.evaluate()
Пример #2
0
parser.add_argument('--nu_datasets', type=int, default=6)
parser.add_argument('--num_pass',
                    type=int,
                    default=5,
                    help='num of pass for evaluation')
parser.add_argument('--cuda', action='store_true', help='use CUDA')
parser.add_argument('--pretrain', action='store_true')
parser.add_argument('--is_semi', action='store_true')
parser.add_argument('--udata',
                    type=str,
                    default='./udata/stmts-demo-unlabeled-pubmed',
                    help='location of the unlabeled data')
parser.add_argument('--AR', action='store_true')
parser.add_argument('--TC', action='store_true')
parser.add_argument('--TCDEL', action='store_true')
parser.add_argument('--SH', action='store_true')
parser.add_argument('--DEL', action='store_true')
parser.add_argument('--run_eval', action='store_true')
args = parser.parse_args()

# 算法模型选择配置
platform = 'PyTorch'
executor = 'openie'
model = 'mimo'
print("根据配置,使用 {} 框架,{} 类型的 {} 模型。".format(platform, executor, model))
print("-----------------------------------------------")
# 模型训练
executor = OpenKSModel.get_module(platform, executor)
hypernym_discovery = executor(args=args)
hypernym_discovery.run()
print("-----------------------------------------------")
Пример #3
0
loader_config.data_name = 'my-data-set'
# 图谱数据结构载入
graph_loader = GraphLoader(loader_config)
graph = graph_loader.graph
graph.info_display()
''' 图谱表示学习模型训练 '''
# 列出已加载模型
OpenKSModel.list_modules()
# 算法模型选择配置
args = {
	'gpu': False, 
	'learning_rate': 0.001, 
	'epoch': 10, 
	'batch_size': 1000, 
	'optimizer': 'adam', 
	'hidden_size': 50, 
	'margin': 4.0, 
	'model_dir': './', 
	'eval_freq': 10
}
platform = 'Paddle'
executor = 'KGLearn'
model = 'TransR'
print("根据配置,使用 {} 框架,{} 执行器训练 {} 模型。".format(platform, executor, model))
print("-----------------------------------------------")
# 模型训练
executor = OpenKSModel.get_module(platform, executor)
kglearn = executor(graph=graph, model=OpenKSModel.get_module(platform, model), args=args)
kglearn.run()
print("-----------------------------------------------")
Пример #4
0
                    help="Whether not to use CUDA when available")
parser.add_argument('--seed', type=int, default=0,
                    help="random seed for initialization")
parser.add_argument("--bertadam", action="store_true", help="If bertadam, then set correct_bias = False")
parser.add_argument("--entity_output_dir", type=str, default=None, help="The directory of the prediction files of the entity model")
parser.add_argument("--entity_predictions_dev", type=str, default="ent_pred_dev.json", help="The entity prediction file of the dev set")
parser.add_argument("--entity_predictions_test", type=str, default="ent_pred_test.json", help="The entity prediction file of the test set")
parser.add_argument("--prediction_file", type=str, default="predictions.json", help="The prediction filename for the relation model")
parser.add_argument("--feature_file", type=str, default="feature_default", help="The prediction filename for the relation model")
parser.add_argument('--task', type=str, default=None, required=True, choices=['ace04', 'ace05', 'scierc'])
parser.add_argument('--context_window', type=int, default=0)
parser.add_argument('--add_new_tokens', action='store_true', 
                    help="Whether to add new tokens as marker tokens instead of using [unusedX] tokens.")
parser.add_argument('--loss_scale', type=float, default=0,
                    help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                            "0 (default value): dynamic loss scaling.\n"
                            "Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()

platform = 'PyTorch'
executor = 'RelationExtraction'
model = 'RelationExtraction'
print("根据配置,使用 {} 框架,{} 执行器训练 {} 模型。".format(platform, executor, model))
print("-----------------------------------------------")
# 模型训练
executor = OpenKSModel.get_module(platform, executor)
nero = executor(dataset=dataset, model=OpenKSModel.get_module(platform, model), args=args)
nero.run()

print("-----------------------------------------------")
Пример #5
0
# 列出已加载模型
OpenKSModel.list_modules()
# 算法模型选择配置
args = {
    'extractor': 'topic-rake', 
    'finetuned': '/path/to/finetuned/word_embedding',
    'stopword': '/path/to/domain/stopwords.txt',
    'stopword_open': '/path/to/common/stopwords.txt', 
    'params': {
        'MIN_SCORE_TOTAL': 0.2,
        'MIN_WORD_LEN': 3,
        'SUFFIX_REMOVE': True,
        'STOPWORD_SINGLE_CHECK': True,
        'OPEN_STOPWORD': True,
        'WORD_SEPARATOR': True
    },
    'result_dir': loader_config.source_uris, 
    'rank': 'average'
}

platform = 'MLLib'
executor = 'KELearn'
model = 'keyphrase-rake-topic'
print("根据配置,使用 {} 框架,{} 执行器训练 {} 模型。".format(platform, executor, model))
print("-----------------------------------------------")
# 模型训练
executor = OpenKSModel.get_module(platform, executor)
text_keyphrase = executor(dataset=dataset, model=OpenKSModel.get_module(platform, model), args=args)
text_keyphrase.run()
print("-----------------------------------------------")
val_data = demand_data(val_texts, val_abs, val_label)

#%%
'''TRAINING MODEL'''

print('--'*10)
print('TRAINING MODEL...')
print('--'*10)

# from openks.models.pytorch.attn_inter import AttInter
from torch_geometric.data import Data
from openks.models import OpenKSModel

platform = 'PyTorch'
model_name = 'AttInter'
AttInter = OpenKSModel.get_module(platform, model_name)


import argparse
def parse_args(args=None):
	parser = argparse.ArgumentParser(
		description='Training and Testing Command Predictions Models',
	)
	parser.add_argument('--batch_size', default=1, type=int)
	parser.add_argument('--feat_dim', default=768, type=int)
	parser.add_argument('--conv_emb_dim', default=300, type=int)
	parser.add_argument('--pred_hid_dim', default=84, type=int)
	parser.add_argument('--graph_pooling', default="mean", type=str)
	parser.add_argument('--gnn_type', default="gin", type=str)
	parser.add_argument('--conv_drop_ratio', default=0.0, type=float)
	parser.add_argument('--JK', type=str, default="last",
OpenKSModel.list_modules()
# 算法模型选择配置
args = {
    'gpu': True,
    'learning_rate': 0.001,
    'epoch': 500,
    'batch_size': 1024,
    'optimizer': 'adam',
    'hidden_size': 500,
    'margin': 4.0,
    'model_dir': './',
    'eval_freq': 20,
    'gamma': 12.0,
    'epsilon': 2.0
}
platform = 'PyTorch'
executor = 'KGLearn_Dy'
model = 'DyE'
args['model_dir'] = model + '.pt'
print("根据配置,使用 {} 框架,{} 执行器训练 {} 模型。".format(platform, executor, model))
print("-----------------------------------------------")
# 模型训练
executor = OpenKSModel.get_module(platform, executor)
print('--')
print(OpenKSModel.get_module(platform, model))
kglearn = executor(graph=graph,
                   model=OpenKSModel.get_module(platform, model),
                   args=args)
kglearn.run()
print("-----------------------------------------------")