Esempio n. 1
0
        max_gamma_Avg_prec = 0
        max_beta_Avg_prec = 0
        APK = []
        ASK = []
        APK1 = []
        ASK1 = []
        min_one_err = 1000000
        min_gamma_one_err = 0
        min_beta_one_err = 0
        max_S_2_train = 0
        max_S_2_test = 0
        with tf.Session(config=config) as sess:
            han = HAN(vocab_size=FLAGS.vocab_size,
                      vocab_dict=vocab,
                      A_matrix=A_matrix,
                      num_classes=FLAGS.num_classes,
                      num_classes_8=FLAGS.num_classes_8,
                      sess=sess,
                      embedding_size=FLAGS.embedding_size,
                      hidden_size=FLAGS.hidden_size)

            regularizer = tf.contrib.layers.l2_regularizer(0.1)
            reg_term = tf.contrib.layers.apply_regularization(regularizer)
            loss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    labels=han.input_y, logits=han.out)) + reg_term

            predict = tf.argmax(han.out, axis=1, name='predict')
            label = tf.argmax(han.input_y, axis=1, name='label')
            acc = tf.reduce_mean(tf.cast(tf.equal(predict, label), tf.float32))

            global_step = tf.Variable(0, trainable=False)
Esempio n. 2
0
FLAGS = tf.flags.FLAGS

print(FLAGS.max_sent_in_doc)
print(FLAGS.max_word_in_sent)

train_x, train_y, dev_x, dev_y, vocab = load_dataset(FLAGS.yelp_json_path,
                                                     FLAGS.labels_json_path,
                                                     FLAGS.max_sent_in_doc,
                                                     FLAGS.max_word_in_sent)
print("data load finished")

#print(train_x)

with tf.Session() as sess:
    han = HAN(vocab_size=FLAGS.vocab_size,
              num_classes=FLAGS.num_classes,
              embedding_size=FLAGS.embedding_size,
              hidden_size=FLAGS.hidden_size)

    with tf.name_scope('loss'):
        loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(labels=han.input_y,
                                                    logits=han.out,
                                                    name='loss'))
    with tf.name_scope('accuracy'):
        predict = tf.argmax(han.out, axis=1, name='predict')
        label = tf.argmax(han.input_y, axis=1, name='label')
        acc = tf.reduce_mean(tf.cast(tf.equal(predict, label), tf.float32))

    timestamp = str(int(time.time()))
    out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
    print("Writing to {}\n".format(out_dir))
Esempio n. 3
0
y_train = to_categorical(y_train)
y_test  = to_categorical(y_test)
#add one extra dimention as the sentence (1 sentence per doc!)
X_train = np.expand_dims(X_train, axis=1)
X_test = np.expand_dims(X_test, axis=1)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)

##########----------字典的大小-----
vocabulary_size = np.max([np.max(X_train[i]) for i in range(X_train.shape[0])])+1
print("vocab_size,--字典大小   ",vocabulary_size)


nn = HAN(
    vocab_size=vocabulary_size,
    num_classes=y_train.shape[1],
    embedding_size=embedding_dim,
    hidden_size=rnn_size
)


########--------使用SGD梯度下降最小loss
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(nn.loss)

tf.summary.scalar('loss',nn.loss)
tf.summary.scalar("accuracy",nn.accuracy)

merged = tf.summary.merge_all()

# Batch generators
# batch_size = 256
# num_epochs = 3