Exemple #1
0
def simple_net(x):

    # 3. Define weight and bias variables using tf.Variable
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))

    # 3. Define weight and bias variables using tf.Variable
    logits = tf.matmul(x, W) + b
    y = tf.nn.softmax(logits)
    return y


if __name__ == '__main__':
    # 1. Load data using lab utils/get brain body data
    mnist_data = get_mnist_data("./data/", one_hot=True, verbose=True)
    print("mnist_data: " + str(mnist_data))

    # 2. Define appropriate placeholders using tf.placeholder
    x = tf.placeholder(tf.float32, [None, 784])
    y_ = tf.placeholder(tf.float32, [None, 10])

    # Create the model
    y = simple_net(x)

    # Define loss and optimizer
    eps = np.finfo("float32").eps
    loss = tf.reduce_mean(tf.reduce_sum(y_ * -1 * tf.log(y + eps),
                                        1))  # acc=0,66 senza rediction_indices

    #cross_entropy_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
            initial_value=tf.random_normal(shape=[hidden_dim, n_classes]),
            name='l2_weights')
        b_2 = tf.Variable(initial_value=tf.zeros(shape=[n_classes]),
                          name='l2_biases')

        logits = tf.matmul(hidden_1, W_2) + b_2

        y = tf.nn.softmax(logits)

        return y


if __name__ == '__main__':

    # Load MNIST data
    mnist = get_mnist_data('/tmp/mnist', verbose=True)

    # Placeholders
    x = tf.placeholder(dtype=tf.float32, shape=[None,
                                                784])  # input placeholder

    # Placeholder for targets
    targets = tf.placeholder(dtype=tf.float32, shape=[None, 10])

    # Define model output
    y = multi_layer_net(x)

    # Define loss function
    loss = tf.reduce_mean(
        -tf.reduce_sum(targets * tf.log(y + EPS), reduction_indices=1))