Exemplo n.º 1
0
def simple_classification():
    batch_size = 100

    x = tf.placeholder(tf.float32, (None, 784))

    b = tf.Variable(tf.zeros((batch_size, )))
    W = tf.Variable(tf.random_uniform((784, batch_size), -1, 1))

    h = tf.nn.relu(tf.matmul(x, W) + b)

    prediction = tf.nn.softmax(h)

    label = tf.placeholder(tf.float32, [batch_size, 10])

    cross_entropy = -tf.reduc_sum(label * tf.log(prediction), axis=1)

    # 0.5  is learning rate
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    tf.reset_default_graph()
    sess = tf.Session()
    sess.run(tf.initialize_all_variable())

    for i in range(1000):
        batch_x, batch_label = data.next_batch()
        sess.run(train_step, feed_dict={x: batch_x, label: batch_label})
Exemplo n.º 2
0
def train():
    sess = tf.Session()

    model = ConvModel1()
    data_reader = DataReader()
    loss = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(model.y_, model.y))))
    train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

    sess.run(tf.initialize_all_variable())
    saver = tf.train.Saver()

    for epoch in range(NUM_EPOCH):
        for train_batch in data_reader.train_batch(BATCH_SIZE):
            train_step.run(feed_dict={
                model.x: train_batch[0],
                model.y_: train_batch[1]
            })
            train_error = loss.eval(feed_dict={
                model.x: train_batch[0],
                model.y_: train_batch[1]
            })

        if epoch % 10 == 0:
            cv_set = data_reader.cv_set()
            cv_error = loss.eval(feed_dict={
                model.x: cv_set[0],
                model.y_: cv_set[1]
            })
            print("Step: %d, train loss: %g, cv loss: " % epoch, train_error,
                  cv_error)

    checkpoint_path = os.path.join(CKPT_DIR, CKPT_FILE)
    filename = saver.save(sess, checkpoint_path)
    print('Model saved in file: %s' % filename)
def train_neural_network(x):
    prediction = recurrent_neural_network(x)
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(prediction, y))
    optimizer = tf.train.AdamOptimizer().minimize(cost)

    with tf.Session() as sess:
        sess.run(tf.initialize_all_variable())

        for epoch in range(hm_epochs):
            epoch_loss = 0
            for _ in range(int(mnist.train.num_examples / batch_size)):
                epoch_x, epoch_y = mnist.train.next_batch(batch_size)
                epoch_x = epoch_x.reshape((batch_size, n_chunks, chunk_size))

                _, c = sess.run([optimizer, cost],
                                feed_dict={
                                    x: epoch_x,
                                    y: epoch_y
                                })
                epoch_loss += c

            print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:',
                  epoch_loss)

        correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        print(
            'Accuracy:',
            accuracy.eval({
                x:
                mnist.test.images.reshape((-1, n_chunks, chunk_size)),
                y:
                mnist.test.labels
            }))
Exemplo n.º 4
0
import tensorflow as tf

state = tf.Variable(0,name='counter')
#print(state.name)
one = tf.constant(1)

new_value = tf.add(state,noe)
update = tf.assign(state,new_value)

init = tf.initialize_all_variable() # must have if define variable


with tf.Session as sess:
	sess.run(init)
	for_in range(3):
		sess.run(update)
		print(sess.run()state)
X=tf.constant("float")
Y=tf.constant("float")

# Create Model
W=tf.Variable(rng.randn(),name="weight")
b=tf.Variable(rng.randn(),name="bias")

# Construct a linear model

activation=tf.add(tf.mul(X, W), b)
# minimize squared error
cost=tf.reduce_sum(tf.pow(Y-activation,2)/(2*n_samples))
optimiser=tf.train.GradientDescentOptimizer(learninn_rate).minimize(cost)

# initialize all the variables
init=tf.initialize_all_variable()


# launch the graph
with tf.Session() as sess:
    sess.run(init)
    
    # fit all training data
    for epoch in range(training_epochs):
        for (x,y) in zip(train_X,train_Y):
            sess.run(optimiser,feed_dict={X:x,Y:y})
            
        # display logs each epoch
        if epoch % display_step ==0:
            print "Epoch:",'%04d' % (epoch+1), "cost=","{:.9f}".format(sess.run(cost,feed_dict={X:train_X,Y:train_Y})),"W=",sess.run(W)\
            ,"b=",sess.run(b)
Exemplo n.º 6
0
import os
import tensorflow as tf
from tensorflow.core.protobuf import saver_pb2
import driving_data #2
import model #3

log = './save'
sess = tf.InteractiveSession()

L2NormConst = 0.001
train_variables = tf.trainable_variables()

loss_func = tf.reduce_mean(tf.square(tf.subtract(model.y_,model.y)) + tf.add_n([tf.nn.l2_loss(v) for v in train_variables]) * L2NormConst )
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
sess.run(tf.initialize_all_variable())

##TO SAVE THE RESULT
tf.summary.scalar("loss", loss)
merged_summary_op = tf.summary.merger_all()
saver = tf.train.Saver(write_version = saver_pb2.SaverDef.V1)
logs_path = './logs'
summary_writer = tf.summay.FileWriter(logs_path,graph=tf.get_default_graph())
####


epochs = 30
batch_size = 100

for epoch in range(epochs):
    for i in range(int(driving_data.num_of_images / batch_size)):
        x, y = driving_data.LoadBatchFromTraining(batch_size)
Exemplo n.º 7
0
import tensorflow as tf

state = tf.Variable(0, name="counter")
one = tf.constant(1)

new_value = tf.add(state, one)
update = tf.assign(state, new_value)

init = tf.initialize_all_variable()

with tf.Session() as sess:
    sess.run(init)
    for _ in range(3):
        sess.run(update)
        print(sess.run(state))
Exemplo n.º 8
0
    def train(self, config):
        d_optim = tf.train.AdamOptimizer(config.learning_rate,
                                         beta1=config.beta1).minimize(
                                             self.d_loss, var_list=self.d_vars)
        g_optim = tf.train.AdamOptimizer(config.learning_rate,
                                         beta1=config.beta1).minimize(
                                             self.g_loss, var_list=self.g_vars)

        try:
            tf.global_variables_initializer().run()
        except:
            tf.initialize_all_variable().run()

        if config.G_img_sum:
            self.g_sum = merge_summary([
                self.z_sum, self.d_sum_, self.G_sum, self.d_loss_fake_sum,
                self.g_loss_sum
            ])
        else:
            self.g_sum = merge_summary([
                self.z_sum, self.d_sum_, self.d_loss_fake_sum, self.g_loss_sum
            ])

        self.d_sum = merge_summary(
            [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
        self.writer = SummaryWriter(os.path.join(self.out_dir, "logs"),
                                    self.sess.graph)

        sample_z = gen_random(config.z_dist,
                              size=(self.sample_num, self.z_dim))

        sample_files = self.data[0:self.sample_num]
        sample = [
            get_image(sample_file,
                      input_height=self.input_height,
                      input_width=self.input_width,
                      resize_height=self.output_height,
                      resize_width=self.output_width,
                      crop=self.crop,
                      grayscale=self.grayscale) for sample_file in sample_files
        ]
        if (self.grayscale):
            sample_inputs = np.array(sample).astype(np.float32)[:, :, :, None]
        else:
            sample_inputs = np.array(sample).astype(np.float32)

        counter = 1
        start_time = time.time()
        could_load, checkpoint_counter = self.load(self.checkpoint_dir)
        if could_load:
            counter = checkpoint_counter
            print("load success")
        else:
            print("load failed")

        for epoch in xrange(config.epoch):
            self.data = glob(
                os.path.join(config.data_dir, config.dataset,
                             self.input_fname_pattern))
            np.random.shuffle(self.data)
            batch_idxs = min(len(self.data),
                             config.train_size) // config.batch_size

            for idx in xrange(0, int(batch_idxs)):
                batch_files = self.data[idx * config.batch_size:(idx + 1) *
                                        config.batch_size]
                batch = [
                    get_image(batch_file,
                              input_height=self.input_height,
                              input_width=self.input_width,
                              resize_height=self.output_height,
                              resize_width=self.output_width,
                              crop=self.crop,
                              grayscale=self.grayscale)
                    for batch_file in batch_files
                ]
                if self.grayscale:
                    batch_images = np.array(batch).astype(np.float32)[:, :, :,
                                                                      None]
                else:
                    batch_images = np.array(batch).astype(np.float32)

                batch_z = gen_random(config.z_dist, size=[config.batch_size, self.z_dim]) \
                 .astype(np.float32)

                _, summary_str = self.sess.run([d_optim, self.d_sum],
                                               feed_dict={
                                                   self.inputs: batch_images,
                                                   self.z: batch_z
                                               })
                self.writer.add_summary(summary_str, counter)

                _, summary_str = self.sess.run([g_optim, self.g_sum],
                                               feed_dict={self.z: batch_z})
                self.writer.add_summary(summary_str, counter)

                _, summary_str = self.sess.run([g_optim, self.g_sum],
                                               feed_dict={self.z: batch_z})
                self.writer.add_summary(summary_str, counter)

                _, summary_str = self.sess.run([g_optim, self.g_sum],
                                               feed_dict={self.z: batch_z})
                self.writer.add_summary(summary_str, counter)

                errD_fake = self.d_loss_fake.eval({self.z: batch_z})
                errD_real = self.d_loss_real.eval({self.inputs: batch_images})
                errG = self.g_loss.eval({self.z: batch_z})

                print("%8d Epoch:[%2d/%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" \
                 % (counter, epoch, config.epoch, idx, batch_idxs,
                 time.time() - start_time, errD_fake+errD_real, errG))

                if np.mod(counter, config.sample_freq) == 0:
                    try:
                        samples, d_loss, g_loss = self.sess.run(
                            [self.sampler, self.d_loss, self.g_loss],
                            feed_dict={
                                self.z: sample_z,
                                self.inputs: sample_inputs,
                            },
                        )
                        save_images(
                            samples, image_manifold_size(samples.shape[0]),
                            './{}/train_{:08d}.png'.format(
                                config.sample_dir, counter))
                        print("[Sample] d_loss: %.8f, g_loss: %.8f" %
                              (d_loss, g_loss))
                    except:
                        print("one pic error!")

                if np.mod(counter, config.ckpt_freq) == 0:
                    self.save(config.checkpoint_dir, counter)

                counter += 1