예제 #1
0
def layer_perceptron(X, input_shape, layer_width, layer_name=None):
    with tf.name_scope("weights"):
        W = tf.Variable(tf.random_normal(input_shape + layer_width))
        ts.variable_summaries(W)
    with tf.name_scope("bias"):
        bias = tf.Variable(tf.random_normal(layer_width))
        ts.variable_summaries(bias)
    return tf.sigmoid(tf.matmul(X, W) + bias)
예제 #2
0
def layer_perceptron(X, input_shape, layer_width, layer_name=None):
    if layer_name is None:
        layer_name = "layer"

    with tf.name_scope(layer_name):
        with tf.name_scope("weights"):
            W = tf.Variable(tf.random_normal(input_shape + layer_width))
            ts.variable_summaries(W)
        with tf.name_scope("bias"):
            bias = tf.Variable(tf.random_normal(layer_width))
            ts.variable_summaries(bias)
        with tf.name_scope("activate_function"):
            activate = tf.sigmoid(tf.matmul(X, W) + bias)

    return activate
예제 #3
0
def model_NN_softmax():

    # placeHolder
    ph_set = nn.placeholders_init()

    # NN layer
    layer1 = nn.layer_perceptron(ph_set["X"], [FLAGS.image_size],
                                 [FLAGS.perceptron_output_shape_size],
                                 "softmax_L1")
    layer2 = nn.layer_perceptron(layer1, [FLAGS.perceptron_input_shape_size],
                                 [FLAGS.perceptron_output_shape_size],
                                 "softmax_L2")
    layer3 = nn.layer_perceptron(layer2, [FLAGS.perceptron_input_shape_size],
                                 [FLAGS.label_number], "softmax_L3")

    # softmax layer
    with tf.name_scope("softmax_func"):
        W_softmax = tf.Variable(tf.zeros([10, 10]), name="W_softmax")
        h = tf.nn.softmax(tf.matmul(layer3, W_softmax), name="h")
        ts.variable_summaries(h)

    # cross entropy function
    with tf.name_scope("cross_entropy"):
        cost = tf.reduce_mean(
            -tf.reduce_sum(ph_set["Y"] * tf.log(h), reduction_indices=1))
        ts.variable_summaries(cost)

    # train op
    train_op = tf.train.GradientDescentOptimizer(
        FLAGS.learning_rate).minimize(cost)

    # predicted label and batch batch_acc
    predicted_label = tf.cast(tf.arg_max(h, 1, name="predicted_label"),
                              tf.float32)
    with tf.name_scope("softmax_batch_acc"):
        with tf.name_scope("accuracy"):
            batch_acc = tf.reduce_mean(tf.cast(
                tf.equal(predicted_label, ph_set["Y_label"]), tf.float32),
                                       name="batch_acc")
            tf.summary.scalar("accuracy", batch_acc)
        with tf.name_scope("batch_hit_count"):
            batch_hit_count = tf.reduce_sum(tf.cast(
                tf.equal(predicted_label, ph_set["Y_label"]), tf.float32),
                                            name="batch_hit_count")
            tf.summary.scalar("hit_count", batch_hit_count)

    # merge summary
    summary = tf.summary.merge_all()

    # init op
    init_op = tf.global_variables_initializer()

    # save tensor
    tensor_set = {
        "X": ph_set["X"],
        "Y": ph_set["Y"],
        "Y_label": ph_set["Y_label"],
        "layer1": layer1,
        "layer2": layer2,
        "layer3": layer3,
        "W_softmax ": W_softmax,
        "h": h,
        "cost": cost,
        "train_op": train_op,
        "predicted_label": predicted_label,
        "batch_acc": batch_acc,
        "batch_hit_count ": batch_hit_count,
        "init_op": init_op,
        "summary": summary,
    }
    return tensor_set
예제 #4
0
def model_NN():

    # placeholder x, y, y_label
    ph_set = nn.placeholders_init()

    # NN layer
    layer1 = nn.layer_perceptron(ph_set["X"], [FLAGS.image_size],
                                 [FLAGS.perceptron_input_shape_size],
                                 "layer_1")
    layer2 = nn.layer_perceptron(layer1, [FLAGS.perceptron_input_shape_size],
                                 [FLAGS.perceptron_output_shape_size],
                                 "layer_2")
    h = nn.layer_perceptron(layer2, [FLAGS.perceptron_input_shape_size],
                            [FLAGS.label_number], "layer_3")

    # cost function
    with tf.name_scope("cost_function"):
        # TODO LOOK ME logistic regression does not work, for now use square error method
        # cost function square error method
        cost = tf.reduce_mean((h - ph_set["Y"])**2, name="cost")
        # logistic regression
        # cost = -tf.reduce_mean(Y * tf.log(h) + (1 - Y) * tf.log(1 - h), name="cost")
        ts.variable_summaries(cost)

    # train op
    with tf.name_scope("train_op"):
        train_op = tf.train.GradientDescentOptimizer(
            FLAGS.learning_rate).minimize(cost)
        # if train_op is None:
        #     pass
        # tf.summary.histogram(train_op)

    # predicted label and batch batch_acc
    predicted_label = tf.cast(tf.arg_max(h, 1, name="predicted_label"),
                              tf.float32)
    with tf.name_scope("NN_batch_acc"):
        batch_acc = tf.reduce_mean(tf.cast(
            tf.equal(predicted_label, ph_set["Y_label"]), tf.float32),
                                   name="batch_acc")
        tf.summary.scalar("accuracy", batch_acc)
        batch_hit_count = tf.reduce_sum(tf.cast(
            tf.equal(predicted_label, ph_set["Y_label"]), tf.float32),
                                        name="batch_hit_count")
        tf.summary.scalar("hit_count", batch_hit_count)

    # merge summary
    summary = tf.summary.merge_all()

    # init op
    init_op = tf.global_variables_initializer()

    # save tensor
    tensor_set = {
        "X": ph_set["X"],
        "Y": ph_set["Y"],
        "Y_label": ph_set["Y_label"],
        "layer1": layer1,
        "layer2": layer2,
        "h": h,
        "cost": cost,
        "train_op": train_op,
        "predicted_label": predicted_label,
        "batch_acc": batch_acc,
        "batch_hit_count ": batch_hit_count,
        "init_op": init_op,
        "summary": summary,
    }

    return tensor_set