def build_graph(X, y):
    algo = Mlp(input_dim=NUM_FEATURES, num_classes=NUM_CLASSES)
    py_x = algo.forward(X)

    loss = melt.sparse_softmax_cross_entropy(py_x, y)
    tf.scalar_summary('loss_%s' % loss.name, loss)

    #tf.scalar_summary('loss', loss)
    ##loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(py_x, y))

    accuracy = melt.precision_at_k(py_x, y, 1)
    tf.scalar_summary('precision@1_%s' % accuracy.name, accuracy)

    #below will cause  tensorflow.python.framework.errors.InvalidArgumentError: Duplicate tag precicsion@1 found in summary inputs
    #the problem here is we want to share all other things but without scalar summarys in graph
    #so if we want to build more than once ... then scalar_summary must use op.name
    #or else do it outof build_graph, setting names by yourself!
    #eval_loss, eval_accuracy = build_grapy(X, y)
    #tf.scalar_summary('eval_loss', eval_loss)
    #tf.scalar_summary('eval_accuracy', eval_accuracy)
    #since tensorboard has 'Split on underscores', so for better comparaion side by side
    #loss_train, loss_eval, accuracy_train, accuracy_eval is better then train_loss,eval_loss

    #tf.scalar_summary('precicsion@1', accuracy)
    return loss, accuracy
def build_graph(X, y):
    algo = Mlp(NUM_FEATURES, NUM_CLASSES)
    py_x = algo.forward(X)

    loss = melt.sparse_softmax_cross_entropy(py_x, y)
    #loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(py_x, y))

    accuracy = melt.precision_at_k(py_x, y, 1)

    return loss, accuracy
Exemple #3
0
def build_graph(X, y):
  #---build forward graph
  py_x = predict(X)
  
  #-----------for classification we can set loss function and evaluation metrics,so only forward graph change
  #---set loss function
  loss = melt.sparse_softmax_cross_entropy(py_x, y)

  #---choose evaluation metrics
  accuracy = melt.precision_at_k(py_x, y, 1)

  return loss, accuracy
Exemple #4
0
def build_graph(X, y):
    #---build forward graph
    algo = Mlp(input_dim=NUM_FEATURES, num_classes=NUM_CLASSES)
    py_x = algo.forward(X)

    #-----------for classification we can set loss function and evaluation metrics,so only forward graph change
    #---set loss function
    loss = melt.sparse_softmax_cross_entropy(py_x, y)
    #tf.scalar_summary('loss_%s'%loss.name, loss)

    #---choose evaluation metrics
    accuracy = melt.precision_at_k(py_x, y, 1)
    #tf.scalar_summary('precision@1_%s'%accuracy.name, accuracy)

    return loss, accuracy