args.gpu,
                                  rnn=rnn,
                                  pre_computed_patterns=None)

    if args.gpu:
        print("Cuda!")
        model.to_cuda(model)
        state_dict = torch.load(args.input_model)
    else:
        state_dict = torch.load(args.input_model,
                                map_location=lambda storage, loc: storage)

    # Loading model
    model.load_state_dict(state_dict)

    interpret_documents(model, args.batch_size, dev_data, dev_text, args.ofile,
                        args.max_doc_len)

    return 0


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        parents=[soft_pattern_arg_parser(),
                 general_arg_parser()])
    parser.add_argument("--ofile", help="Output file", required=True)

    sys.exit(main(parser.parse_args()))
Esempio n. 2
0
    else:
        rnn = None

    model = SoftPatternClassifier(pattern_specs, mlp_hidden_dim, num_mlp_layers, num_classes, embeddings, vocab,
                                  semiring, args.bias_scale_param, args.gpu, rnn=rnn, pre_computed_patterns=None,
                                  no_sl=args.no_sl, shared_sl=args.shared_sl, no_eps=args.no_eps,
                                  eps_scale=args.eps_scale, self_loop_scale=args.self_loop_scale)

    if args.gpu:
        state_dict = torch.load(args.input_model)
    else:
        state_dict = torch.load(args.input_model, map_location=lambda storage, loc: storage)

    model.load_state_dict(state_dict)

    if args.gpu:
        model.to_cuda(model)

    visualize_patterns(model, dev_data, dev_text, args.k_best, args.max_doc_len, num_padding_tokens)

    return 0


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description=__doc__,
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
                                     parents=[soft_pattern_arg_parser(), general_arg_parser()])
    parser.add_argument("-k", "--k_best", help="Number of nearest neighbor phrases", type=int, default=5)

    sys.exit(main(parser.parse_args()))
Esempio n. 3
0
    if model_save_dir is not None:
        if not os.path.exists(model_save_dir):
            os.makedirs(model_save_dir)

    print("Training with", model_file_prefix)
    train(train_data,
          dev_data,
          model,
          num_classes,
          model_save_dir,
          args.num_iterations,
          model_file_prefix,
          args.learning_rate,
          args.batch_size,
          args.scheduler,
          gpu=args.gpu,
          clip=args.clip,
          debug=args.debug,
          dropout=args.dropout,
          word_dropout=args.word_dropout,
          patience=args.patience)


if __name__ == '__main__':
    parser = \
        argparse.ArgumentParser(description=__doc__,
                                formatter_class=argparse.ArgumentDefaultsHelpFormatter,
                                parents=[training_arg_parser(), general_arg_parser()])
    main(parser.parse_args())
Esempio n. 4
0
    if model_save_dir is not None:
        if not os.path.exists(model_save_dir):
            os.makedirs(model_save_dir)

    print("Training with", model_file_prefix)
    train(train_data,
          dev_data,
          model,
          num_classes,
          model_save_dir,
          args.num_iterations,
          model_file_prefix,
          args.learning_rate,
          args.batch_size,
          args.scheduler,
          gpu=args.gpu,
          clip=args.clip,
          debug=args.debug,
          dropout=dropout,
          word_dropout=args.word_dropout,
          patience=args.patience)


if __name__ == '__main__':
    parser = \
        argparse.ArgumentParser(description=__doc__,
                                formatter_class=argparse.ArgumentDefaultsHelpFormatter,
                                parents=[lstm_arg_parser(), mlp_arg_parser(), training_arg_parser(), general_arg_parser()])
    main(parser.parse_args())
Esempio n. 5
0
                   "--num_cnn_layers",
                   help="Number of MLP layers",
                   type=int,
                   default=2)
    p.add_argument("-z",
                   "--window_size",
                   help="Size of window of CNN",
                   type=int,
                   default=3)
    p.add_argument("-o",
                   "--pooling",
                   help="Type of pooling to use [max, sum, avg]",
                   type=str,
                   default="max")
    return p


def pooling_cnn_arg_parser():
    p = ArgumentParser(add_help=False,
                       parents=[cnn_arg_parser(),
                                mlp_arg_parser()])
    return p


if __name__ == '__main__':
    parser = \
        ArgumentParser(description=__doc__,
                       formatter_class=ArgumentDefaultsHelpFormatter,
                       parents=[pooling_cnn_arg_parser(), training_arg_parser(), general_arg_parser()])
    main(parser.parse_args())