Exemple #1
0
def test_perceptron():
    Graph().as_default()
    x = Placeholder()
    w = Variable([1, 1])
    b = Variable(0)
    p = sigmoid(add(matmul(w, x), b))

    session = Session()
    output = session.run(p, {x: [3, 2]})
    print(output)
Exemple #2
0
def test_compute_graph():
    Graph().as_default()

    A = Variable([[1, 0], [0, -1]])
    b = Variable([1, 1])

    x = Placeholder()
    y = matmul(A, x)
    z = add(y, b)

    session = Session()
    output = session.run(z, {x: [1, 2]})
    print(output)
Exemple #3
0
def test_train():
    red_points = np.random.randn(50, 2) - 2 * np.ones((50, 2))
    blue_points = np.random.randn(50, 2) + 2 * np.ones((50, 2))
    Graph().as_default()

    X = Placeholder()
    c = Placeholder()

    # Initialize weights randomly
    W = Variable(np.random.randn(2, 2))
    b = Variable(np.random.randn(2))

    # Build perceptron
    p = softmax(add(matmul(X, W), b))

    # Build cross-entropy loss
    J = negative(reduce_sum(reduce_sum(multiply(c, log(p)), axis=1)))

    # Build minimization op
    minimization_op = GradientDescentOptimizer(learning_rate=0.01).minimize(J)

    # Build placeholder inputs
    feed_dict = {
        X: np.concatenate((blue_points, red_points)),
        c: [[1, 0]] * len(blue_points) + [[0, 1]] * len(red_points)
    }

    # Create session
    session = Session()

    # Perform 100 gradient descent steps
    for step in range(100):
        J_value = session.run(J, feed_dict)
        if step % 10 == 0:
            print("Step:", step, " Loss:", J_value)
        session.run(minimization_op, feed_dict)

    # Print final result
    W_value = session.run(W)
    print("Weight matrix:\n", W_value)
    b_value = session.run(b)
    print("Bias:\n", b_value)
Exemple #4
0
def test_perceptron_loss():
    red_points = np.random.randn(50, 2) - 2 * np.ones((50, 2))
    blue_points = np.random.randn(50, 2) + 2 * np.ones((50, 2))

    Graph().as_default()
    X = Placeholder()
    c = Placeholder()

    W = Variable([[1, -1], [1, -1]])

    b = Variable([0, 0])
    p = softmax(add(matmul(X, W), b))
    J = negative(reduce_sum(reduce_sum(multiply(c, log(p)), axis=1)))

    session = Session()
    print(
        session.run(
            J, {
                X: np.concatenate((blue_points, red_points)),
                c: [[1, 0]] * len(blue_points) + [[0, 1]] * len(red_points)
            }))
Exemple #5
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" An example that builds the graph which performs the following transformation:

		(1  0)      (1)
	z = (0 -1)* x + (1)
"""

from graph import Graph
from graph import Variable
from graph import Placeholder
from operations import matmul
from operations import add

# Create a new graph
Graph().as_default()

# Create variables
A = Variable([[1, 0], [0, -1]])
b = Variable([1, 1])

# Create placeholder
x = Placeholder()

# Create hidden node y
y = matmul(A, x)

# Create output node z
z = add(y, b)
from operations import log
from operations import reduce_sum
from operations import multiply
from session import Session
import numpy as np

# Create a new graph
Graph().as_default()

X = Placeholder()
c = Placeholder()

# Create a weight matrix for 2 outout classes:
# One with a weight vector (1, 1) for blue and one with a
# weight vector (-1, -1) for red
W = Variable([[1, -1], [1, -1]])

b = Variable([0, 0])
p = softmax(add(matmul(X, W), b))

# Cross-entropy loss
J = negative(reduce_sum(reduce_sum(multiply(c, log(p)), axis=1)))

# Create red points centered at (-2, -2)
red_points = np.random.randn(50, 2) - 2 * np.ones((50, 2))
# Create blue points centered at (2, 2)
blue_points = np.random.randn(50, 2) + 2 * np.ones((50, 2))

session = Session()
print(
    session.run(
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
""" Build a perceptron that classifies a point between two sets
	which are divided by a line between (4,0) and (0,4).
"""

from graph import Graph
from graph import Variable
from graph import Placeholder
from operations import matmul
from operations import add
from operations import sigmoid
from session import Session

# Create a new graph
Graph().as_default()

x = Placeholder()
w = Variable([1, 1])
b = Variable(0)
p = sigmoid(add(matmul(w, x), b))

# Let's try it to calculate the probability for point (3,2)T
# being a blue point (being over the line or p(wTx+b) > 0.5)
session = Session()
print(session.run(p, {x: [3, 2]}))