Exemplo n.º 1
0
    'n_layers': 1,
    'hidden_size': nli_args.hidden_size,
    'bidirectional': nli_args.bidirectional,
    'embedding_size': 300,
    'fix_emb': True,
    'dp_ratio': 0.3,
    'd_out': 2,  # 2 classes
    'mlp_classif_hidden_size_list': [512, 512],
    'cuda': torch.cuda.is_available(),
})
squad_state = {k: v for k, v in squad_args.items()}

if __name__ == "__main__":
    print(nli_args)

    nli_dm = wrangle.DataManager(nli_args)
    nli_args.n_embed = nli_dm.vocab.n_words
    if nli_args.type == 'siamese':
        nli_model = siamese_pytorch.SiameseClassifier(config=nli_args)

        squad_dm = SquadDataManager(squad_args)
        squad_args.n_embed = squad_dm.vocab.n_words
        squad_model = squad_pytorch.SquadClassifier(
            config=squad_args,
            encoder=nli_model.encoder,
        )
    else:
        raise Exception('model type not supported')

    print("number of trainable parameters found {}".format(
        sum(param.nelement()
Exemplo n.º 2
0
    'embedding_size': 300,
    'fix_emb': True,
    'dp_ratio': 0.3,
    'd_out': 2,  # 2 classes
    'mlp_classif_hidden_size_list': [512, 512],
    'cuda': torch.cuda.is_available(),
})
squad_state = {k: v for k, v in squad_args.items()}


if __name__ == "__main__":
    print(args)
    checkpoint = sys.argv[1]
    print('found checkpoint dir {}'.format(checkpoint))

    dm = wrangle.DataManager(args)
    if args.add_squad:  # add squad to vocab to match checkpoint
        squad_dm = SquadDataManager(squad_args, vocab=dm.vocab)
    args.n_embed = dm.vocab.n_words
    if args.type == 'siamese':
        model = siamese_pytorch.SiameseClassifier(config=args)
        model.embed.weight.data = load_embeddings.load_embeddings(
            dm.vocab, constants.EMBED_DATA_PATH, args.embedding_size)
    elif args.type == 'decomposable':
        model = decomposable_pytorch.SNLIClassifier(config=args)
        model.encoder.embedding.weight.data = load_embeddings.load_embeddings(
            dm.vocab, constants.EMBED_DATA_PATH, args.embedding_size)
    else:
        model = Seq2SeqPytorch(args=args, vocab=dm.vocab)
        model.encoder.embedding.weight.data = load_embeddings.load_embeddings(
            dm.vocab, constants.EMBED_DATA_PATH, args.embedding_size)