Ejemplo n.º 1
0
def main():
    Xtrain, Ytrain, XCV, YCV, Xtest, Ytest, categories = data.load_data(
        shuffle=False)
    input_x, input_y, keep_prob, cost, optimizer, accuracy = build_net()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)

    for epoch in range(epoch_size):
        for batch_x, batch_y in data.next_batch(Xtrain, Ytrain, batch_size):
            loss, _ = sess.run([cost, optimizer],
                               feed_dict={
                                   input_x: batch_x,
                                   input_y: batch_y,
                                   keep_prob: 0.5
                               })
        if epoch % 1 == 0:
            print("epoch %d: %f" % (epoch, loss))
            acc_count = 0
            acc_sum = 0.0
            for batch_x, batch_y in data.next_batch(XCV, YCV, batch_size):
                CV_acc = sess.run(accuracy,
                                  feed_dict={
                                      input_x: batch_x,
                                      input_y: batch_y,
                                      keep_prob: 1.0
                                  })
                acc_count += 1
                acc_sum += CV_acc
            print("CV accuracy: %.2f%%" % (acc_sum / acc_count * 100))
            print()
Ejemplo n.º 2
0
def main():
    Xtrain, Ytrain, XCV, YCV, Xtest, Ytest, categories = data.load_data(
        shuffle=False, size=51)
    input_x, input_y, keep_prob, cost, optimizer, accuracy, output_y = build_net(
    )

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)

    for epoch in range(epoch_size):
        for batch_x, batch_y in data.next_batch(Xtrain, Ytrain, batch_size):
            loss, _ = sess.run([cost, optimizer],
                               feed_dict={
                                   input_x: batch_x,
                                   input_y: batch_y,
                                   keep_prob: 0.5
                               })
        print("epoch %d: %f" % (epoch, loss))
        CV_acc = sess.run(accuracy,
                          feed_dict={
                              input_x: XCV,
                              input_y: YCV,
                              keep_prob: 1.0
                          })
        print("CV accuracy: %.2f%%" % (CV_acc * 100))
        print()
        if CV_acc > 0.93:
            saver.save(sess, "model/shaonet.model")
            break
Ejemplo n.º 3
0
def train():
    tf.global_variables_initializer().run()

    could_load, checkpoint_counter = load()
    if could_load:
        start_epoch = (int)(checkpoint_counter / num_batches)
        start_batch_id = checkpoint_counter - start_epoch * num_batches
        counter = checkpoint_counter
        print(" [*] Load SUCCESS")
        print(" [*] Load SUCCESS")
    else:
        start_epoch = 0
        start_batch_id = 0
        counter = 1
        print(" [!] Load failed...")

    for i in range(start_epoch, 60):
        for j in range(start_batch_id, 12000 // batch_size):

            x_batch, y_batch = next_batch()
            feed_dict = {img: x_batch, label: y_batch}

            _, loss, pred1 = sess.run([train_step, cross_entropy_loss, pred],
                                      feed_dict=feed_dict)

            print('epoch', i, '|loss', loss)
            counter += 1
        start_batch_id = 0
        saver.save(sess, './checkpoint/unet.ckpt', global_step=counter)
Ejemplo n.º 4
0
            # check summary shape , and value
            val_acc, val_loss, pred = sess.run([accuracy, cost, pred_op],
                                               feed_dict=test_feedDict)
            val_acc_mean.append(val_acc)
            val_loss_mean.append(val_loss)
            pred_all.append(pred)
        val_acc_mean = np.mean(np.asarray(val_acc_mean))
        val_loss_mean = np.mean(np.asarray(val_loss_mean))
        summary = tf.Summary(value=[
            tf.Summary.Value(tag='Test batch_size 1 loss',
                             simple_value=float(val_loss_mean)),
            tf.Summary.Value(tag='Test batch_size 1  acc',
                             simple_value=float(val_acc_mean)),
            tf.Summary.Value(tag='Train batch_size 1  loss',
                             simple_value=float(train_loss)),
            tf.Summary.Value(tag='Train batch_size 1  acc',
                             simple_value=float(train_acc))
        ])
        writer.add_summary(summary, step)
        print 'Validation Batch Size : 1 Val accuracy : {} loss : {} '.format(
            val_acc_mean, val_loss_mean)

    utils.show_progress(step, max_iter)
    batch_xs, batch_ys = data.next_batch(train_imgs, train_labs, batch_size)
    train_acc, train_loss, _ = sess.run([accuracy, cost, train_op],
                                        feed_dict={
                                            x_: batch_xs,
                                            y_: batch_ys,
                                            phase_train: True
                                        })
Ejemplo n.º 5
0
    def train(self):
        '''
        babala
        '''
        # initialize the hyperparam
        data_set_num = len(self.input)
        batch_size = 512
        data_length = len[input[0]]       # this can be automated in the later version
        iteration = data_set_num / batch_size
        
        h1_num = 100 
        h2_num = 50
        learning_rate1 = 0.01
        learning_rate2 = 0.01
        learning_rate3 = 0.01
        
        # initalize the param
        W1 = weight_variable([data_length, h1_num])
        b1 = bias_variable([h1_num])
        W1_ = weight_variable([h1_num, data_length])
        b1_ = bias_variable([data_length])
        
        W2 = weight_variable([h1_num, h2_num])
        b2 = bias_variable([h2_num])
        W2_ = weight_variable([h2_num, h1_num])
        b2_ = bias_variable([h1_num])
        
        W_out = weight_variable([h2_num, self.class_num])
        b_out = bias_variable([self.class_num])
        
        # construct the graph of the first encoder
        X1 = tf.placeholder("float", [None, data_length])# none means the number is unsure
        h1_out = tf.nn.sigmod(tf.matmul(X1, W1) + b1)
        X1_ = tf.nn.sigmod(tf.matmul(h1_out, W1_) + b1_)
        
        cost1 =  tf.reduce_sum(tf.pow(X1_-X1 , tf.to_float(tf.convert_to_tensor\
        (2 * np.ones([data_length]))))) / batch_size
        train_1 = tf.train.GradientDescentOptimizer(learning_rate1).minimize(cost1)
        sess = tf.Session()
        sess.run(tf.initialize_variables(W1, b1, W1_, b1_))
        
        # train the fisrt encoder
        for i in xrange(iteration):
            batch_x,batch_y = data_set.next_batch(batch_size) # does this mean: the batch_x will still be conveyed to nn one by one?
            sess.run(train1, feed_dict = {X1 : batch_x})
        
        # prepare for the next layer
        tf.to_float(np.reshape(self.input, [data_set_num, data_length])) # use reshape we can transfer the list to array
        encoder1_out = tf.nn.sigmod(tf.matmul(self.input, W1) + b1)
        input2 = encoder1_out
        
        # construct the graph of the second encoder
        X2 = tf.placeholder("float", [None, h1_num])
        h2_out = tf.nn.sigmod(tf.matmul(X2, W2) + b2)
        X2_ = tf.nn.sigmod(tf.matmul(h2_out, W2_) + b2_)
        
        cost2 = tf.reduce_sum(tf.pow(X2_-X2, tf.to_float(tf.convert_to_tensor\
        (2 * np.ones([h1_num]))))) / batch_size
        train_2 = tf.train.GradientDescentOptimizer(learning_rate2).minimize(2)
        sess.run(tf.initialize_variable(W2, b2, W2_, b2_))
        
        # train the second encoder
        for i in xrange(iteration):
            batch_x,batch_y = dt.next_batch(input2, batch_size) # does this mean: the batch_x will still be conveyed to nn one by one?
            sess.run(train2, feed_dict = {X2 : batch_x})
        
        # prepare for the output layer
        encoder2_out = tf.nn.sigmod(tf.matmul(input2, W2) + b2)
        input3 = encoder2_out

        # construct the output layer        
        X_out = tf.placeholder("float", shape = [None, h2_num])
        Y_out = tf.nn.softmax(tf.matmul(x_out, W_out) + b_out)
        Y_label = tf.placeholder("float", shape = [None, self.class_num])
        cost_out = tf.reduce_sum(Y_label * tf.log(Y_out)) / batch_size
        train_out = tf.train.GradientDescentOptimizer.minimize(learning_rate3)
        sess.run(tf.initialize_variable(W_out, b_out))
        
        # evaluate
        correction_prediction = tf.equal(tf.argmax(Y_out, 1), tf.argmax(Y_label, 1))
        accuracy = tf.reduce_mean(tf.cast(correction_prediction, "float"))
        
        # train and show the evaluate dynamicly
        for i in xrange(iteration):
            batch_X, batch_Y = next_batch(input3, self.label, batch_size)
            sess.run(train_out, feed_dict = {Y_label : batch_Y, X_out : batch_X})
            print("the accuracy of the No.%d iteration is %d" %(iteration, accuracy.eval(feed_dict = {Y_out : batch_Y, Y_label : batch_X}))
        
        # store the param
        W_all = [W1, W1_, W2, W2_, W_out]
        b_all = [b1, b1_, b2, b2_, b_out]
        self.W = copy.deepco
    
    @property
    def predict(self):
        
Ejemplo n.º 6
0
        size=int(input_x.shape[1]))

    loss_summary = tf.summary.scalar('loss', cost)
    accuracy_summary = tf.summary.scalar('accuracy', accuracy)

    # start sess
    sess = tf.Session()
    saver = tf.train.Saver()
    writer = tf.summary.FileWriter('%s_logs/' % args.network, sess.graph)
    init = tf.global_variables_initializer()
    sess.run(init)

    for epoch in range(epoch_size):
        print()
        for batch, (batch_x, batch_y) in enumerate(
                data.next_batch(Xtrain, Ytrain, batch_size)):
            loss, _ = sess.run([cost, optimizer],
                               feed_dict={
                                   input_x: batch_x,
                                   input_y: batch_y,
                                   keep_prob: 0.5
                               })
            if batch % 10 == 0:
                log.log("epoch %d batch %d: %f" % (epoch, batch, loss))
                CV_acc = sess.run(accuracy,
                                  feed_dict={
                                      input_x: XCV,
                                      input_y: YCV,
                                      keep_prob: 1.0
                                  })
                log.log("CV    accuracy: %.2f%%" % (CV_acc * 100))
Ejemplo n.º 7
0
def load_data():
    while True:
        print 'loading next batch...'
        x,y = data.next_batch(batch_size)
        data_queue.put((x,y))
Ejemplo n.º 8
0
init = tf.initialize_all_variables()

# Launch the graph
with tf.Session() as sess:
    #sess.run(init)
    #step = 1
    cpoint = tf.train.latest_checkpoint('.')
    saver.restore(sess, cpoint)
    step = int(cpoint.split('-')[1])
    print 'restored from %d' % (step)
    
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        print 'get batch...', step, (step*batch_size)
        #batch_xs, batch_ys = data_queue.get()
        batch_xs, batch_ys = data.next_batch(batch_size)

        # Fit training using batch data
        feed_dict={y: batch_ys, keep_prob: dropout}
        for inp_idx, place_x in enumerate(inputs):
            feed_dict[place_x] = [X[inp_idx] for X in batch_xs]
        sess.run(optimizer, feed_dict=feed_dict)
        
        if step % display_step == 0:
            # Calculate batch accuracy
            feed_dict={y: batch_ys, keep_prob: 1.}
            for inp_idx, place_x in enumerate(inputs):
                feed_dict[place_x] = [X[inp_idx] for X in batch_xs]
            acc = sess.run(accuracy, feed_dict=feed_dict)
            # Calculate batch loss
            feed_dict={y: batch_ys, keep_prob: 1.}
Ejemplo n.º 9
0
saver = tf.train.Saver()

n_epochs = 50
batch_size = 500

data.import_data([1, 2, 3, 4, 5], ['test_batch'])

test_x_batch = data.testing_data[0]
test_y_batch = get_label(data.testing_data[1])

max_val = 0

with tf.Session() as sess:
    init.run()
    for epoch in range(n_epochs):
        x_batch = None
        y_batch = None
        for iteration in range(data.data_size() // batch_size):
            x_batch, y_batch = data.next_batch(batch_size)
            y_batch = get_label(y_batch)
            sess.run(training_op, feed_dict={X: x_batch, y: y_batch})
        acc_train = accuracy.eval(feed_dict={X: x_batch, y: y_batch})
        acc_val = accuracy.eval(feed_dict={X: test_x_batch, y: test_y_batch})
        if acc_val > max_val:
            max_val = acc_val
            saver.save(sess, './models/best.ckpt')

        print(epoch, "Train:", acc_train, "Val:", acc_val)

    save_path = saver.save(sess, './models/model.ckpt')