예제 #1
0
def nn_add_one_layer(input, insize, outsize,activation_function=None):
    W = tf.Varaible(tf.truncated_normal((insize,outsize)),dtype=tf.float32)
    b = tf.Variable(tf.zeros([1, outsize]) + 0.1)
    if activation_function is None:
       return tf.nn.relu(tf.matmul(input,W)+b)
    else:
       return activation_function(tf.matmul(input,W)+b)
예제 #2
0
def bias_variable(shape):
	initial = tf.constant(0.1, shape=shape)
	return tf.Varaible(initial)
t1 = tf.Variable(test_list)
t2 = tf.Variable(test_np)

print(t1)
print(t2)

print(type(t1))
print(type(t2))

# %%

t1 = tf.constant(test_list)
t2 = tf.Variable(test_list)

# t3 = tf.constant(t2)
t4 = tf.Varaible(t1)

# %%
t1 = tf.convert_to_tensor(test_list)
t2 = tf.convert_to_tensor(test_np)

t3 = tf.Variable(test_list)
t4 = tf.convert_to_tensor(t3)

print(type(t3))
print(type(t4))

# %%
t1 = tf.constant(test_list)
t2 = tf.constant(test_list)
예제 #4
0
import tensorflow as tf
import numpy as np
import matplotlib as plt

num_points = 1000
vectors_set=[]

for i in range(num_points):
    x1 = np.random.normal(0.0,0.55)
    y1 = x1*0.1+0.3+np.random.normal(0.0,0.03)
    vectors_set.append([x1, y1])

x_data = [v[0] for v in vectors_set]
y_data = [v[1] for v in vectors_set]

w = tf.Varaible(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = w*x_data+b
loss = tf.reduce_sean(tf.square(y - y_data))

optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

for i in range(8):
    sess.run(train)
    print(sess.run(w), sess.run(b))
    plt.plot(x_data, y_data, 'ro')
예제 #5
0
W3 = tf.Variable(tf.random_normal([3,3,64,128],stddev=0.01))

L3 = tf.nn.conv2d(L2,W3,strides=[1,1,1,1],padding = 'SAME')
L3 = tf.nn.relu(L3)
L3 = tf.nn.max_pool(L3, ksize=[1,2,2,1],strides=[1,2,2,1], padding='SAME')
L3 = tf.nn.dropout(L3,keep_prob=keep_prob)
L3 = tf.reshape(L3,[-1,128*4*4])


W4 = tf.get_variable("W4",shape=[128*4*4,625],initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.random_normal([625]))
L4 = tf.nn.relu(tf.matmul(L3,W4)+b)
L4 = tf.nn.dropout(L4,keep_prop=keep_prob)

W5 = tf.get_variable("W5",shape=[625,10],initializer=tf.conrib.layers.xavier_initializer())
b2 = tf.Varaible(tf.random_normal([10]))
hypothesis = tf.matmul(L4,W5)+b2

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis,labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

sess = tf.Session()
sess.run(tf.global_variables_initializer())

training_epoch = 15
batch_size = 100

for epoch in range(training_epoch):
    avg_cost = 0
    total_batch = int(mnist.train.num_examples / batch_size)
    for i in range(total_batch):
예제 #6
0
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)

#Default values, in case of empty columns, Also specifies the type of the decoded result.
record_defaults = [[0.], [0.], [0.], [0.]]
xy = tf.decode_csv(value, record_defulats=record_defaults)

#collect batches of csf in
train_x_batch, train_y_batch = \tf.train.batch([xy.[0:-1], xy[-1:]], batch_size = 10)

#placeholder for a tensor that will be always fed
X = tf.placeholder(tf.float32, shape = [None, 3])
Y = tf.placeholder(tf.flaot32, shpae = [None, 1])

W = tf.Variable(tf.random_normal([3, 1]), name = 'weight')
b = tf.Varaible(tf.random_normal([1]), name = 'bias')

#Hypothesis
hypothesis = tf.matmul(X, W) + b

#Simplified cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))

#Minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 1e-5)
train = optimizer.minimize(cost)

#Launch the graph in a session
sess = tf.Session()
#Initialize global variables in the graph
sess.run(tf.global_variables_initializer())
예제 #7
0
파일: Basic_GAN.py 프로젝트: Justin-A/GANs
# Generator
G_Weight1 = tf.Variable(tf.random_normal([128, 256], stddev = 0.01))
G_Weight2 = tf.Variable(tf.random_normal([256, 28 * 28], stddev = 0.01))
G_Bias1 = tf.Variable(tf.zeros([256]))
G_Bias2 = tf.Variable(tf.zeros([28 * 28]))

def generator(noise):
	G_Hidden_Layer = tf.nn.relu(tf.matmul(noise, G_Weight1) + G_Bias1)
	output = tf.nn.sigmoid(tf.matmul(G_Hidden_Layer, G_Weight2) + G_Bias2)
	return output

# Discriminator
D_Weight1 = tf.Variable(tf.random_normal([28 * 28, 256], stddev = 0.01))
D_Weight2 = tf.Variable(tf.random_normal([256, 1], stddev = 0.01))
D_Bias1 = tf.Varaible(tf.zeros([256]))
D_Bias2 = tf.Variable(tf.zeros([1]))

def discriminator(inputs):
	D_Hidden_Layer = tf.nn.relu(tf.matmul(inputs, D_Weight1) + D_Bias1)
	output = tf.nn.sigmoid(tf.matmul(D_Hidden_Layer, D_Weight2) + D_Bias2)
	return output

# Main
G = generator(Z)
Loss_D = -tf.reduce_mean(tf.log(discriminator(X)) - tf.log(1 - discriminator(G)))
Loss_G = -tf.reduce_mean(tf.log(discriminator(G)))

Train_D = tf.train.AdamOptimizer(learning_rate = 0.0001).minimize(loss_D, var_list = [D_Weight1, D_Bias1, D_Weight2, D_Bias2])
Train_G = tf.train.AdamOptimizer(learning_rate = 0.0001).minimize(loss_G, var_list = [G_Weight1, G_Bias1, G_Weight2, G_Bias2])
예제 #8
0
def bias_variable(shape, type, metadata):
    return tf.Varaible(type(shape, metadata))