Exemplo n.º 1
0
                    help="load pretrain embedding")
parser.add_argument('-notrain', default=False, type=bool, help="train or not")
parser.add_argument('-limit', default=0, type=int, help="data limit")
parser.add_argument('-log', default='', type=str, help="log directory")
parser.add_argument('-unk', default=True, type=bool, help="replace unk")
parser.add_argument('-memory',
                    default=False,
                    type=bool,
                    help="memory efficiency")
parser.add_argument('-label_dict_file',
                    default='./data/data/rcv1.json',
                    type=str,
                    help="label_dict")

opt = parser.parse_args()
config = utils.read_config(opt.config)
torch.manual_seed(opt.seed)

# checkpoint
if opt.restore:
    print('loading checkpoint...\n')
    checkpoints = torch.load(opt.restore)

# cuda
use_cuda = torch.cuda.is_available() and len(opt.gpus) > 0
use_cuda = True
if use_cuda:
    torch.cuda.set_device(opt.gpus[0])
    torch.cuda.manual_seed(opt.seed)
print(use_cuda)
Exemplo n.º 2
0
from attention.model import StructuredSelfAttention
from attention.train import train
import torch
import data.utils as utils
import data_got
config = utils.read_config("config.yml")
if config.GPU:
    torch.cuda.set_device(3)
print('loading data...\n')
label_num = 54
train_loader, test_loader, label_embed, embed, X_tst, word_to_id, Y_tst, Y_trn = data_got.load_data(
    batch_size=config.batch_size)
label_embed = torch.from_numpy(label_embed).float()  # [L*256]
embed = torch.from_numpy(embed).float()
print("load done")


def multilabel_classification(attention_model,
                              train_loader,
                              test_loader,
                              epochs,
                              GPU=True):
    loss = torch.nn.BCELoss()
    opt = torch.optim.Adam(attention_model.parameters(),
                           lr=0.001,
                           betas=(0.9, 0.99))
    train(attention_model, train_loader, test_loader, loss, opt, epochs, GPU)


attention_model = StructuredSelfAttention(
    batch_size=config.batch_size,