示例#1
0
    zh_hidden = 1800
    zh_dims = 712
    input_dropout_p = 0.5
    dropout_p = 0.5
    enc_layers = 2
    dec_layers = 2
    en_max_len = 50
    zh_max_len = 60
    beam_size = 5

    net = Seq2Seq(en_dims=en_dims,
                  zh_dims=zh_dims,
                  input_dropout_p=input_dropout_p,
                  dropout_p=dropout_p,
                  en_hidden=en_hidden,
                  zh_hidden=zh_hidden,
                  enc_layers=enc_layers,
                  dec_layers=dec_layers,
                  beam_size=beam_size,
                  en_max_len=en_max_len,
                  zh_max_len=zh_max_len)

    pre_trained = torch.load(
        '/data/xuwenshen/ai_challenge/code/fix_lens/models/' + args.model_path)
    net.load_state_dict(pre_trained)
    print(net)
    net.eval()

    test(test_loader=test_loader,
         transform=transform,
         net=net,
示例#2
0
文件: test.py 项目: hmxv2/NMT
    zh_voc_path = '/data/xuwenshen/ai_challenge/data/train/train/zh_voc.json'
    transform = Transform(en_voc_path=en_voc_path, zh_voc_path=zh_voc_path)

    en_dims = 800
    en_voc = 50004
    zh_dims = 800
    zh_voc = 4004
    en_hidden = 800
    zh_hidden = 1000
    atten_vec_size = 1200

    net = Seq2Seq(en_dims=en_dims,
                  en_voc=en_voc,
                  zh_dims=zh_dims,
                  zh_voc=zh_voc,
                  dropout=1,
                  en_hidden=en_hidden,
                  zh_hidden=zh_hidden,
                  atten_vec_size=atten_vec_size,
                  entext_len=60)

    pre_trained = torch.load(
        '/data/xuwenshen/ai_challenge/code/bengio/models/ssprob-0.666313-loss-5.194733-score-0.339406-steps-41200-model.pkl'
    )
    net.load_state_dict(pre_trained)
    print(net)

    test(test_loader=test_loader,
         transform=transform,
         net=net,
         batch_size=batch_size)
示例#3
0
    en_dims = 712
    en_voc = 50004
    zh_dims = 712
    zh_voc = 4004
    dropout = 0.5
    en_hidden = 800
    zh_hidden = 712
    channels = 128
    kernel_size = 3

    net = Seq2Seq(en_dims=en_dims,
                  en_voc=en_voc,
                  zh_dims=zh_dims,
                  zh_voc=zh_voc,
                  dropout=dropout,
                  en_hidden=en_hidden,
                  zh_hidden=zh_hidden,
                  channels=channels,
                  kernel_size=kernel_size,
                  entext_len=60)

    #pre_trained = torch.load('./models/ssprob-1.000000-loss-6.934007-steps-134044 model.pkl')
    #net.load_state_dict(pre_trained)
    print(net)

    epoch = 10000
    lr = 0.001

    hyperparameters = {
        'learning rate': lr,
        'batch size': batch_size,
示例#4
0
    
    
    en_dims = 256
    zh_dims = 256
    dropout = 0.5
    en_hidden = 256
    zh_hidden = 400
    atten_vec_size = 712
    channels = 1024
    kernel_size = 1
    
    net = Seq2Seq(en_dims=en_dims, 
                  zh_dims=zh_dims,
                  dropout=dropout,
                  en_hidden=en_hidden, 
                  zh_hidden=zh_hidden,
                  atten_vec_size=atten_vec_size,
                  channels=channels,
                  kernel_size=kernel_size,
                  entext_len=60)
    
    pre_trained = torch.load('./models/ssprob-0.777249-loss-5.034320-score-0.366200-steps-100400-model.pkl') 
    net.load_state_dict(pre_trained)
    print (net)
 
    epoch = 10000
    lr = 0.001

    hyperparameters = {
        'learning rate': lr,
        'batch size': batch_size,