コード例 #1
0
ファイル: lin_reg.py プロジェクト: chusri/tech-interview
def main():
	# Read in data from the .txt file
	data, n_samples = utils.read_birth_life_data(DATA_FILE)

	X = tf.placeholder(tf.float32, shape=(), name='X')
	Y = tf.placeholder(tf.float32, shape=(), name='Y')

	w = tf.get_variable('weights', initializer=tf.constant(0.))
	b = tf.get_variable('bias', initializer=tf.constant(0.))

	# Build linear regression model
	Y_predicted = tf.add(tf.multiply(w, X), b)

	loss = tf.square(tf.subtract(Y_predicted, Y))

	# Use gradient descent with learning rate of 0.001 to minimize loss
	optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)

	start = time.time()

	with tf.Session() as sess:
		sess.run(tf.global_variables_initializer())

		# Train the model for 100 epochs
		for i in range(100):
			for x, y in data:
				sess.run(optimizer, feed_dict={X: x, Y: y})

		w_out, b_out = sess.run([w, b])
		print('w: %f b: %f' % (w_out, b_out))

	print('Time: %fs' % (time.time() - start))
コード例 #2
0
def model(loss_type="MSE"):

    # Step 1: read in data
    data, n_samples = utils.read_birth_life_data(DATA_FILE)

    # Step 2: create placeholders for X (birth_rate) and Y (life expectancy)
    X = tf.placeholder(tf.float32, name='X')
    Y = tf.placeholder(tf.float32, name='Y')

    # Step 3: create weight and bias, initialize to 0
    w = get_scope_variable('linreg', 'wieghts')
    b = get_scope_variable('linreg', 'bias')

    # Step 4: build the model to predict Y
    Y_predicted = w * X + b

    # Step 5: use the squared error as the loss function
    # Mean squared error
    if loss_type == "MSE":
        print("[INFO] Training with Square Loss")
        loss = tf.square(Y - Y_predicted, name='loss')
        writer = tf.summary.FileWriter('./graphs/linear_reg_MSE',
                                       tf.get_default_graph())
    else:
        print("[INFO] Training with Huber Loss")
        loss = huber_loss(Y, Y_predicted)
        writer = tf.summary.FileWriter('./graphs/linear_reg_HUBER',
                                       tf.get_default_graph())

    # Step 6: Use GD t minimize the loss
    optimizer = tf.train.GradientDescentOptimizer(
        learning_rate=0.001).minimize(loss)

    start = time.time()
    with tf.Session() as sess:
        # Step 7: initialze the necessary variables, in this case W and b
        sess.run(tf.global_variables_initializer())

        # Step 8: Train the model for 100 epochs
        for i in range(100):
            total_loss = 0
            for x, y in data:
                # Session execute optimizer and fetch value of loss
                _, l = sess.run([optimizer, loss], feed_dict={X: x, Y: y})
                total_loss += l

            if i % 10 == 0:
                print('Epoch {0}: {1}'.format(i, total_loss / n_samples))

        # close the writer when done
        writer.close()

        # Step 9: output the value of W and b
        w_out, b_out = sess.run([w, b])

    print('[INFO] Training Time: %f seconds' % (time.time() - start))
    print()

    return w_out, b_out
コード例 #3
0
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import time

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf

import utils

DATA_FILE = 'data/birth_life_2010.txt'

# Step 1: read in the data
data, n_samples = utils.read_birth_life_data(DATA_FILE)

# Step 2: create Dataset and iterator
dataset = tf.data.Dataset.from_tensor_slices((data[:, 0], data[:, 1]))

iterator = dataset.make_initializable_iterator()
X, Y = iterator.get_next()

# Step 3: create weight and bias, initialized to 0
w = tf.get_variable('weights', initializer=tf.constant(0.0))
b = tf.get_variable('bias', initializer=tf.constant(0.0))

# Step 4: build model to predict Y
Y_predicted = X * w + b

# Step 5: use the square error as the loss function
loss = tf.square(Y - Y_predicted, name='loss')
# loss = utils.huber_loss(Y, Y_predicted)
コード例 #4
0
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
import utils
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# read data
DATA_FILE = 'data/birth_life_2010.txt'
data, n_samples = utils.read_birth_life_data(DATA_FILE)  #data: numpy array
dataset = tf.data.Dataset.from_tensor_slices((data[:, 0], data[:, 1]))
iterator = dataset.make_initializable_iterator()
X, Y = iterator.get_next()

# initialze parameters
w = tf.get_variable('weights', initializer=tf.constant(0.0))
b = tf.get_variable('bias', initializer=tf.constant(0.0))

# build model:
y_pred = X * w + b
loss = tf.square(y_pred - Y, name='loss')

# optimize
optimizer = tf.train.GradientDescentOptimizer(
    learning_rate=0.001).minimize(loss)

# train
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
コード例 #5
0
Lecture 03
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import time

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf

import utils

DATA_FILE = 'data/birth_life_2010.txt'

# Step 1: read in the data
data, n_samples = utils.read_birth_life_data(DATA_FILE)

# Step 2: create Dataset and iterator
dataset = tf.data.Dataset.from_tensor_slices((data[:,0], data[:,1]))

iterator = dataset.make_initializable_iterator()
X, Y = iterator.get_next()

# Step 3: create weight and bias, initialized to 0
w = tf.get_variable('weights', initializer=tf.constant(0.0))
b = tf.get_variable('bias', initializer=tf.constant(0.0))

# Step 4: build model to predict Y
Y_predicted = X * w + b

# Step 5: use the square error as the loss function
コード例 #6
0
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import utils
import tensorflow as tf
import matplotlib.pyplot as plt

data, nsamples = utils.read_birth_life_data('birth_life_2010.txt')

X = tf.placeholder(tf.float32, name='x')
Y = tf.placeholder(tf.float32, name='y')

weight = tf.get_variable('weight', [],
                         initializer=tf.constant_initializer(0.0))
bias = tf.get_variable('bias', [], initializer=tf.constant_initializer(0.0))

pred = weight * X + bias

loss = tf.square(Y - pred)

optimizer = tf.train.GradientDescentOptimizer(
    learning_rate=0.01).minimize(loss)

writer = tf.summary.FileWriter('.graphs/lin_reg', tf.get_default_graph())

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(data.shape)
    for i in range(100):
        total_loss = 0.0
        for x, y in data:
            _, l = sess.run([optimizer, loss], feed_dict={X: x, Y: y})
コード例 #7
0
# Simple linear regression using placeholders
import os
import time
import matplotlib.pyplot as plt
import tensorflow as tf
import utils
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

DATA_FILE = 'birth_life_2010.txt'

# Step 1: read in data from the .txt file
data, n_samples = utils.read_birth_life_data(
    DATA_FILE)  # utils is a module written in advance

# Step 2: create placeholders for X (birth rate) and Y (life expectancy)
X = tf.placeholder(
    tf.float32, name='X'
)  # If the shape is not specified, you can feed a tensor of any shape.
Y = tf.placeholder(tf.float32, name='Y')

# Step 3: create weight and bias as variable, initialized to 0
w = tf.get_variable('weights', initializer=tf.constant(0.0))
b = tf.get_variable('bias', initializer=tf.constant(0.0))

# Step 4: build model to predict Y
Y_predicted = w * X + b  # Y_predicted is a tensor
'''
It's true that a Variable can be used any place a Tensor can, but the key differences between the two are that a 
Variable maintains its state across multiple calls to run() and a variable's value can be updated by backpropagation 
(it can also be saved, restored etc as per the documentation).These differences mean that you should think of a 
variable as representing your model's trainable parameters (for example, the weights and biases of a neural network), 
コード例 #8
0
import os
# os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import time

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf

import utils

# ROOT_PATH = '/Users/administrator/stanford-tensorflow-tutorials/examples/'
ROOT_PATH = '/opt/deeplearning/workspace/stanford-tensorflow-tutorials/examples/'
DATA_FILE = 'data/birth_life_2010.txt'

# Step 1: read in data from the .txt file
data, n_samples = utils.read_birth_life_data(os.path.join(ROOT_PATH,DATA_FILE))

# Step 2: create placeholders for X (birth rate) and Y (life expectancy)
# Remember both X and Y are scalars with type float
X = tf.placeholder(tf.float32, name="X")
Y = tf.placeholder(tf.float32, name="Y")

# Step 3: create weight and bias, initialized to 0.0
# Make sure to use tf.get_variable
w = tf.get_variable('weights', initializer=tf.constant(0.0))
b = tf.get_variable('bias', initializer=tf.constant(0.0))

# Step 4: build model to predict Y
# e.g. how would you derive at Y_predicted given X, w, and b
Y_predicted = tf.scalar_mul(w, X) + b
コード例 #9
0
# print(c.eval()) # we can use 'c.eval()' without explicitly stating a session
# sess.close()

#placeholder和feeddict
# tf.placeholder(shape=None,name=None)  # 当shape=none的时候, 表名可以接受任意shape的张量tensors
# a = tf.placeholder(tf.int32,shape=[3],name='name')
# b = tf.constant([5,5,5],tf.int32)
# c = a+b
# with tf.Session() as sess:
#     print(sess.run(c,feed_dict={a:[1,2,3]}))

# 使用线性回归预测人均寿命
data_file = './datasets/birth_life_2010.txt'

# step1:read datasets
data, n_samples = utils.read_birth_life_data(data_file)

# step2:create placeholder for X(birth_rate) and Y(life_expectancy)
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')

# step3:create weight and bias ,initialized to 0
w = tf.get_variable('Weight', initializer=tf.constant(0.0))
b = tf.get_variable('Bias', initializer=tf.constant(0.0))

# step4:construct model for prediction
predict_y = w * X + b

# step5:use the square error to be loss function
loss = tf.square(Y - predict_y, name='loss')