cross_entropy = -tf.reduce_sum(output_label_*tf.log(output_label))

  # get gradients and clip them, use adaptive to prevent explosions
  tvars = tf.trainable_variables()
  grads = [tf.clip_by_value(grad, -2., 2.) for grad in tf.gradients(cross_entropy, tvars)]
  optimizer = tf.train.AdagradOptimizer(0.01)
  train_step = optimizer.apply_gradients(zip(grads, tvars))


  # =============== TRAINING AND TESTING ================== #
  # initialize
  sess.run([tf.variables.initialize_all_variables()])
  # run over many epochs
  for i in range(50001):
    # get data dynamically from my data generator
    dat, lab = dg.gen_data_batch(batch_size, example_length)
    # do evaluation every 100 epochs
    if (i % 100 == 0):
      print("====current accuracy==== at epoch ", i)
      pos_data, pos_label = dg.gen_data_batch(100, example_length, pos_neg = True)
      neg_data, neg_label  = dg.gen_data_batch(100, example_length, pos_neg = False)
      correct_prediction = tf.equal(tf.argmax(output_label,1), tf.argmax(output_label_,1))
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
      res = sess.run([accuracy], feed_dict={input_seq: pos_data, output_label_: pos_label})
      print("pos accuracy: ", res)
      res = sess.run([accuracy], feed_dict={input_seq: neg_data, output_label_: neg_label})
      print("neg accuracy: ", res)
    # continuously train at every epoch
    sess.run(train_step, feed_dict={input_seq: dat, output_label_: lab})

Exemplo n.º 2
0
num_epochs = 2000

inputs = keras.Input(shape=(length, input_size), dtype=tf.float32)
lstm = keras.layers.LSTM(num_units,
                         input_shape=(None, length, input_size))(inputs)
relu = keras.layers.Dense(hidden_units, activation=tf.nn.relu)(lstm)
out = keras.layers.Dense(label_size, activation=tf.nn.softmax)(relu)

model = keras.Model(inputs=inputs, outputs=out)

model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

for i in range(num_epochs):
    data, label = dg.gen_data_batch(batch_size, length)
    if (i % 100 == 0):
        print("\ncurrent_accuracy at epoch ", i)
        pos_data, pos_label = dg.gen_data_batch(100, length, True)
        neg_data, neg_label = dg.gen_data_batch(100, length, False)
        loss1, accuracy1 = model.evaluate(pos_data,
                                          pos_label,
                                          batch_size=batch_size)
        loss2, accuracy2 = model.evaluate(neg_data,
                                          neg_label,
                                          batch_size=batch_size)
        print("\nPositive Loss: %.3f\n Positive Acc: %.3f" %
              (loss1, accuracy1))
        print("\nNegative Loss: %.3f\n Negative Acc: %.3f" %
              (loss2, accuracy2))
    model.fit(data, label, epochs=1, batch_size=batch_size)
Exemplo n.º 3
0
  w2 = tf.Variable(tf.random_normal([hidden_units, label_size], mean=0.1, stddev=0.035))
  b2 = tf.Variable(tf.zeros([label_size]))
  output_label = tf.nn.softmax(tf.matmul(relu1, w2) + b2)

  # minimize cross entropy against the true label
  cross_entropy = -tf.reduce_sum(output_label_*tf.log(output_label))

  # get gradients and clip them, use adaptive to prevent explosions
  tvars = tf.trainable_variables()
  grads = [tf.clip_by_value(grad, -2., 2.) for grad in tf.gradients(cross_entropy, tvars)]
  optimizer = tf.train.AdagradOptimizer(0.01)
  train_step = optimizer.apply_gradients(zip(grads, tvars))


  # =============== TRAINING AND TESTING ================== #
  # initialize
  sess.run([tf.variables.initialize_all_variables()])
  # run over many epochs
  for i in range(50001):
    # get data dynamically from my data generator
    dat, lab = dg.gen_data_batch(batch_size, example_length)
    # do evaluation every 100 epochs
    if (i % 100 == 0):
      print("====current accuracy==== at epoch ", i)
      correct_prediction = tf.equal(tf.argmax(output_label,1), tf.argmax(output_label_,1))
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
      res = sess.run([accuracy], feed_dict={input_seq: dat, output_label_: lab})
      print(res)
    # continuously train at every epoch
    sess.run(train_step, feed_dict={input_seq: dat, output_label_: lab})