Пример #1
0
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)

    sess = tf.InteractiveSession(config=tf.ConfigProto(
        gpu_options=gpu_options))
    logger.info('Creating model')
    vocab_size = embeddings.shape[0]
    embedding_size = embeddings.shape[1]

    # test
    print("embeddings size: ", vocab_size, " , ", embedding_size)

    if args.model == 'mlp':
        model = MultiFeedForwardClassifier(args.num_units,
                                           2,
                                           vocab_size,
                                           embedding_size,
                                           use_intra_attention=args.use_intra,
                                           training=True,
                                           project_input=args.no_project,
                                           optimizer=args.optim)
    else:
        model = LSTMClassifier(args.num_units,
                               2,
                               vocab_size,
                               embedding_size,
                               training=True,
                               project_input=args.no_project,
                               optimizer=args.optim)

    model.initialize(sess, embeddings)

    # this assertion is just for type hinting for the IDE
Пример #2
0
"""
Plot weights in a RTE FF model
"""

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('model',
                        help='Directory containing the '
                        'trained model')
    parser.add_argument('-o',
                        help='Output directory for images '
                        '(default is same as the model)',
                        dest='output')
    args = parser.parse_args()
    output = args.output or args.model

    sess = tf.InteractiveSession()
    model = MultiFeedForwardClassifier.load(args.model, sess)

    for var in tf.trainable_variables():
        if 'bias' in var.name:
            continue

        # create valid filenames
        name = var.name.replace(':0', '').replace('/', '_')
        values = var.eval()
        pl.matshow(values)
        pl.colorbar()
        path = os.path.join(output, name)
        pl.savefig(path)
                            train_data.sentences2.shape))
    logger.debug(msg.format('Validation',
                            valid_data.sentences1.shape,
                            valid_data.sentences2.shape))
    logger.debug(msg.format('Test',
                            test_data.sentences1.shape,
                            test_data.sentences2.shape))

    sess = tf.InteractiveSession()
    logger.info('Creating model')
    vocab_size = embeddings.shape[0]
    embedding_size = embeddings.shape[1]

    model = MultiFeedForwardClassifier(args.num_units,
                           3, vocab_size, embedding_size, max_len,
                           use_intra_attention=args.use_intra,
                           training=True, learning_rate=args.rate,
                           clip_value=args.clip_norm, l2_constant=args.l2,
                           project_input=args.no_project, mode=args.mode)
    model.initialize(sess, embeddings)

    if args.load:
        vars = tf.contrib.framework.list_variables(args.load)
        # with tf.Graph().as_default(), tf.Session().as_default() as sess:
        #     new_vars = []
        #for name, shape in vars:
        #    print(name)
        #         v = tf.contrib.framework.load_variable(args.load, name)
        #         new_vars.append(tf.Variable(v, name = name.replace('biases', 'bias')))
        #     saver = tf.train.Saver(new_vars)
        #     sess.run(tf.global_variables_initializer())
        #     saver.save(sess, args.load + '/new-model')
Пример #4
0
    embedding_size = embeddings.shape[1]

    if is_really_cont:
        model_class = utils.get_model_class(params)
        model, saver = model_class.load(warmup_model,
                                        sess,
                                        training=True,
                                        embeddings=embeddings)

    else:
        if args.model == 'mlp':
            model = MultiFeedForwardClassifier(
                args.num_units,
                3,
                vocab_size,
                embedding_size,
                use_intra_attention=args.use_intra,
                training=True,
                project_input=args.no_project,
                optimizer=args.optim)
        else:
            model = LSTMClassifier(args.num_units,
                                   3,
                                   vocab_size,
                                   embedding_size,
                                   training=True,
                                   project_input=args.no_project,
                                   optimizer=args.optim)

        model.initialize(sess, embeddings)