Exemplo n.º 1
0
    dataset = dataset.repeat(args.num_epochs)
    dataset = dataset.repeat(args.batch_size)
    iterator = dataset.make_initializable_iterator()
    next_train_batch = iterator.get_next()

    x = tf.placeholder(tf.float32, shape=(batch_size, size_features))
    y_ = tf.placeholder(tf.float32, shape=(batch_size, num_labels))

    keep_prob = tf.placeholder(tf.float32)

    #tf_valid_dataset = tf.constant(valid_dataset)
    # tf_test_dataset = tf.constant(test_dataset)

    y_conv = nn.inference(x,
                          size_features,
                          num_labels=num_labels,
                          keep_prob=keep_prob,
                          batch_size=batch_size,
                          regularization_constant=reg_constant)

    # calculate the loss from the results of inference and the labels
    loss = nn.loss(y_conv, y_)

    tf.summary.scalar(loss.op.name, loss)

    #intersection_sum, label_sum, example_sum, precision = nn.evaluation(y_conv, y_)

    #tf.summary.scalar ("Precision op", precision)

    # setup the training operations
    #train_step = nn.training(loss, learning_rate, decay_steps, decay_rate)
    # setup the summary ops to use TensorBoard
Exemplo n.º 2
0
#   return accuracy.eval()

graph = tf.Graph()

with graph.as_default():

# run inference on the input data
  x = tf.placeholder(tf.float32,shape=(batch_size, size_features))
  y_ = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
  
  keep_prob = tf.placeholder(tf.float32)

  #tf_valid_dataset = tf.constant(valid_dataset)
  # tf_test_dataset = tf.constant(test_dataset)

  y_conv = nn.inference(x, size_features, num_labels, keep_prob, batch_size)

# calculate the loss from the results of inference and the labels
  #loss = nn.loss(y_conv, y_)

  accuracy_eval = nn.evaluation(y_conv, y_)

  #tf.summary.scalar(loss.op.name, loss)

  #intersection_sum, label_sum, example_sum, precision = nn.evaluation(y_conv, y_)

  #tf.summary.scalar ("Precision op", precision)

# setup the training operations
  #train_step = nn.training(loss, learning_rate, decay_steps, decay_rate)
  # setup the summary ops to use TensorBoard
Exemplo n.º 3
0
    valid_dataset = valid_dataset.repeat(1)
    valid_dataset = valid_dataset.batch(batch_size)
    valid_iterator = valid_dataset.make_initializable_iterator()
    next_valid_data, next_valid_labels = valid_iterator.get_next()

    x = tf.placeholder(tf.float32, shape=(None, size_features))
    y_ = tf.placeholder(tf.float32, shape=(None, num_labels))

    keep_prob = tf.placeholder(tf.float32)

    #tf_valid_dataset = tf.constant(valid_dataset)
    # tf_test_dataset = tf.constant(test_dataset)

    y_conv = nn.inference(x,
                          size_features,
                          num_labels=num_labels,
                          keep_prob=keep_prob,
                          batch_size=batch_size,
                          is_training=True)

    # calculate the loss from the results of inference and the labels
    loss = nn.loss(y_conv, y_)

    tf.summary.scalar(loss.op.name, loss)

    #intersection_sum, label_sum, example_sum, precision = nn.evaluation(y_conv, y_)

    #tf.summary.scalar ("Precision op", precision)

    # setup the training operations
    #train_step = nn.training(loss, learning_rate, decay_steps, decay_rate)
    # setup the summary ops to use TensorBoard
Exemplo n.º 4
0
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
print('num_labels', num_labels)

graph = tf.Graph()

with graph.as_default():

    # run inference on the input data
    x = tf.placeholder(tf.float32, shape=(None, size_features))
    y_ = tf.placeholder(tf.float32, shape=(None, num_labels))

    keep_prob = tf.placeholder(tf.float32)

    y_conv = nn.inference(x, size_features, num_labels=num_labels)

    logits_eval = tf.nn.softmax(y_conv)
    label_eval = tf.argmax(logits_eval, axis=1)

    auc_eval, fn_eval, fp_eval, tn_eval, tp_eval = nn.metrics(logits_eval, y_)

    tf.summary.scalar("auc_0", auc_eval[0])
    tf.summary.scalar("auc_1", auc_eval[1])
    tf.summary.scalar("fn_eval", fn_eval[1])
    tf.summary.scalar("fp_eval", fp_eval[1])
    tf.summary.scalar("tn_eval", tn_eval[1])
    tf.summary.scalar("tp_eval", tp_eval[1])

    summary_op = tf.summary.merge_all()