예제 #1
0
        ques_emb = tf.tanh(ques_emb_drop)

        output, state = stacked_lstm(ques_emb, state)

    cnn_mean, cnn_var = tf.nn.moments(image, [0])
    cnn_out_true_n = tf.nn.batch_normalization(
        image, cnn_mean, cnn_var, None, None,
        epsilon)  #,var_dict['cnnoutbeta'],
    #var_dict['cnnoutscale'],epsilon)

    rnn_mean, rnn_var = tf.nn.moments(state, [0])
    rnn_out_true_n = tf.nn.batch_normalization(
        state, rnn_mean, rnn_var, None, None,
        epsilon)  #var_dict['rnnoutbeta'],

    features = combine_embeddings(cnn_out_true_n, rnn_out_true_n, var_dict)
    #features = tf.concat([image,state], 1)
    #features = tf.concat([state,image], 1)

    features2 = tf.concat([features, noise], 1)
    scores_emb = generator_me(features2, var_dict)

    #v1 = tf.Variable(tf.truncated_normal([2048,1000]))
    #v2 = tf.Variable(tf.constant(0.1, shape=[1000]))
    #scores_emb = tf.nn.relu_layer(state, v1, v2)

#    saver = tf.train.Saver()

#    saver.restore(sess, "./train_g_v3.3")

#    #var_dict['gbeta'] = tf.Variable(tf.zeros([1000]), name='gbeta')
예제 #2
0
    for i in range(max_words_q):
        if i == 0:
            ques_emb_linear = tf.zeros([batch_size, input_embedding_size])
        else:
            tf.get_variable_scope().reuse_variables()
            ques_emb_linear = tf.nn.embedding_lookup(var_dict['rnnqW'],
                                                     question[:, i - 1])
            #ques_emb_linear = tf.gather(var_dict['rnnqW'], question[:,i-1])

        ques_emb_drop = tf.nn.dropout(ques_emb_linear, 1 - drop_out_rate)
        ques_emb = tf.tanh(ques_emb_drop)

        output, state = stacked_lstm(ques_emb, state)

    print(state.get_shape())
    features = combine_embeddings(image, state, var_dict)
    #features = tf.concat([image,state], 1)
    #features = tf.concat([state,image], 1)

    features2 = tf.concat([features, noise], 1)
    scores_emb = generator_me(features2, var_dict)

    #v1 = tf.Variable(tf.truncated_normal([2048,1000]))
    #v2 = tf.Variable(tf.constant(0.1, shape=[1000]))
    #scores_emb = tf.nn.relu_layer(state, v1, v2)

    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=scores_emb,
                                                labels=answers_true))

tvars = tf.trainable_variables()
예제 #3
0
        output, state = stacked_lstm(ques_emb, state)

    cnn_mean, cnn_var = tf.nn.moments(image, [0])
    cnn_out_true_n = tf.nn.batch_normalization(
        image, cnn_mean, cnn_var, None, None,
        epsilon)  #,var_dict['cnnoutbeta'],
    #var_dict['cnnoutscale'],epsilon)

    rnn_mean, rnn_var = tf.nn.moments(state, [0])
    rnn_out_true_n = tf.nn.batch_normalization(
        state, rnn_mean, rnn_var, None, None,
        epsilon)  #var_dict['rnnoutbeta'],
    #var_dict['rnnoutscale'],epsilon)

    features = combine_embeddings(cnn_out_true_n, rnn_out_true_n, var_dict)
    #features = tf.concat([image,state], 1)
    #features = tf.concat([state,image], 1)

    #features2 = tf.concat([features,noise], 1)
    features2 = tf.add(features, noise)
    scores_emb = generator_me(features2, var_dict)

    #v1 = tf.Variable(tf.truncated_normal([2048,1000]))
    #v2 = tf.Variable(tf.constant(0.1, shape=[1000]))
    #scores_emb = tf.nn.relu_layer(state, v1, v2)

    #saver = tf.train.Saver()

    #saver.restore(sess, "./train_g_v3.2.2")