示例#1
0
 def show_bias(self):
     if len(self.weights) is 1:
         mp = MyPlot()
         mp.set_labels('Step', 'Bias')
         mp.show_list(self.biases)
     else:
         print('Cannot show the bias! Call print_bias mehtod.')
示例#2
0
    def show_weight(self):
        print('shape=', self.weights)

        if len(self.weights[0]) is 1:
            mp = MyPlot()
            mp.set_labels('Step', 'Weight')
            mp.show_list(self.weights)
        else:
            print('Cannot show the weight! Call print_weight method.')
示例#3
0
#----- a neuron
w = tf.Variable(tf.random_normal([2, 1]))
b = tf.Variable(tf.random_normal([1]))
hypo = tf.matmul(x, w) + b
#-----

cost = tf.reduce_mean((hypo - y) * (hypo - y))

train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)

sess = tf.Session()
sess.run(tf.global_variables_initializer())

costs = []

for i in range(2001):
    sess.run(train)

    if i % 50 == 0:
        print('hypo:', sess.run(hypo), '|', sess.run(w), sess.run(b),
              sess.run(cost))

        costs.append(sess.run(cost))

hypo2 = tf.matmul([[4., 4]], w) + b
print(sess.run(hypo2))

p = MyPlot()
p.show_list(costs)
示例#4
0
 def show_error(self):
     mp = MyPlot()
     mp.set_labels('Step', 'Error')
     mp.show_list(self.costs)
示例#5
0
train = tf.train.GradientDescentOptimizer(learning_rate=0.2).minimize(cost)

sess = tf.Session()
sess.run(tf.global_variables_initializer())

errors = []
for step in range(10001):
    sess.run(train)  #계산그래프, 데이터 4개에 대하여 forward prop. 오류값을 구하고 충분히 작지 않으면

    if step % 500 == 0:
        #print(step, sess.run(cost))
        errors.append(sess.run(cost))

#----- testing(classification)
predicted = tf.cast(hypo > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(
    tf.cast(tf.equal(predicted, y_data), dtype=tf.float32))

h = sess.run(hypo)
print("\nHypo: ", h)

p = sess.run(predicted)
print("Predicted: ", p)

a = sess.run(accuracy)
print("Accuracy(%): ", a * 100)

from myplot import MyPlot
p = MyPlot()
p.show_list(errors)
示例#6
0
from myplot import MyPlot

x_data = [1]
y_data = [1]

#----- a neuron
w = tf.Variable(tf.random_normal([1]))
b = tf.Variable(tf.random_normal([1]))
hypo = w * x_data + b
#-----

cost = (hypo - y_data) ** 2

train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)

sess = tf.Session()
sess.run(tf.global_variables_initializer())

costs = []

for i in range(1001):
    sess.run(train)

    if i % 50 == 0:
        print(sess.run(w), sess.run(b), sess.run(cost))
        costs.append(sess.run(cost))

gildong = MyPlot()
gildong.show_list(costs)