Пример #1
0
def train(from_version=1, epoches=50):
    global embd, disc, embd_optim, disc_optim
    embd = Embedding(len(dictionary), VEC_SIZE).load(
        '.', from_version, device)
    disc = Discriminator(VEC_SIZE).load('.', from_version, device)
    embd_optim = optim.SGD(embd.parameters(), lr=lr)
    disc_optim = optim.SGD(disc.parameters(), lr=lr)
    for i in range(epoches):
        train_epoch()
        embd.save('.', i+from_version)
        disc.save('.', i+from_version)
Пример #2
0
    if torch.cuda.is_available():
        torch.cuda.set_device(args.gpuId)
        torch.cuda.manual_seed(seed)
        embedding.cuda()
        tagger.cuda()
        print('**** Running with GPU-' + str(args.gpuId) + ' ****\n')
    else:
        useGpu = False
        print('**** Warning: GPU is not available ****\n')

criterionTagger = nn.CrossEntropyLoss(size_average=False, ignore_index=-1)

batchListTrain = utils.buildBatchList(len(corpus.trainData), batchSize)
batchListDev = utils.buildBatchList(len(corpus.devData), batchSize)

totalParams = list(embedding.parameters()) + list(tagger.parameters())
lstmParams = []
mlpParams = []
withoutWeightDecay = []
for name, param in list(embedding.named_parameters()) + list(
        tagger.named_parameters()):
    if not param.requires_grad:
        continue
    if 'bias' in name or 'Embedding' in name:
        withoutWeightDecay += [param]
    elif 'encoder' in name:
        lstmParams += [param]
    else:
        mlpParams += [param]
optParams = [{
    'params': lstmParams,