示例#1
0
def train(param, model):

    # Launch the graph
    with tf.Session() as sess:
        sess.run(model.init)
        step = 0
        # Keep training until reach max iterations
        while step < param.training_iters:

            #generate train data---total data
            batch_x = param.x_train
            batch_y = param.y_train
            batch_x = batch_x.reshape(
                (param.batch_size, param.n_steps, param.n_input))
            batch_y = np.reshape(batch_y, (-1, param.n_classes))

            fd = {model.xx: batch_x, model.y: batch_y}

            # Run optimization op (backprop)
            sess.run(model.optimizer, feed_dict=fd)
            #get the gradient or residual
            grads_wrt_input = sess.run(tf.gradients(model.cost, model.x),
                                       feed_dict=fd)
            residual = sess.run(tf.concat(1, grads_wrt_input))
            print(residual[0, :])

            if step % param.display_step == 0:
                # Calculate batch accuracy
                acc = sess.run(model.accuracy, feed_dict=fd)
                # Calculate batch loss
                loss = sess.run(model.cost, feed_dict=fd)
                print("Iter " + str(step) + ", Minibatch Loss= " + \
                      "{:.6f}".format(loss) + ", Training Accuracy= " + \
                      "{:.5f}".format(acc))
            step += 1

        print("Optimization Finished!")

        #test data
        test_data = param.x_test.reshape(
            (len(param.x_test), param.n_steps, param.n_input))
        test_label = np.reshape(param.y_test, (-1, 2))

        fd = {model.xx: test_data, model.y: test_label}
        loss = sess.run(model.cost, feed_dict=fd)

        print("{:.6f}".format(loss))
        print("Testing Accuracy:", \
            sess.run(accuracy, feed_dict=fd))

        #get the predict classifier and probability value
        classifier_value = sess.run([classifier_collection],
                                    feed_dict={model.xx: test_data})

        proba_value = sess.run([proba_collection],
                               feed_dict={model.xx: test_data})

        v = Validation()
        v.calculateF1(param.label_value, classifier_value[0])
        v.allValidation(param.laebel_value, proba_value[0][:, 1])
示例#2
0
 def get_result(self, predict_classifier, predict_probability):
     v = Validation()
     v.calculateF1(self.y_test, predict_classifier)
     v.allValidation(self.y_test, predict_probability)
def train(param, model, forest_net, dataset):

    # Launch the graph
    with tf.Session() as sess:
        sess.run(model.init)
        step = 0
        redisual = list()
        # Keep training until reach max iterations
        sum_time = []
        while step < param.training_iters:
            s_time = float(time.time())
            #generate train data---mini batch
            #using the forest net generate hidden data : n_steps * batch_size * n_input
            batch_x, batch_y = generate_lstm_input(param, forest_net, dataset,
                                                   redisual)
            #batch_x = forest_net.get_train_data(mnist.train.images, redisual, mnist.test.images)

            m_time = float(time.time())
            sum_time.append(m_time - s_time)
            batch_x = np.transpose(batch_x, [1, 0, 2])
            #batch_y = np.reshape(batch_y,(-1,param.n_classes))

            fd = {model.xx: batch_x, model.y: batch_y}

            # Run optimization op (backprop)
            sess.run(model.optimizer, feed_dict=fd)
            #get the gradient or residual
            m1_time = float(time.time())
            grad_val = np.array(
                sess.run(tf.gradients(model.cost, model.xx), feed_dict=fd)[0])
            m2_time = float(time.time())

            redisual = param.step * np.transpose(grad_val, [1, 0, 2])
            e_time = float(time.time())
            #print (m_time-s_time, m2_time-m1_time, e_time-m2_time)
            if step % param.display_step == 0:
                # Calculate batch accuracy
                acc = sess.run(model.accuracy, feed_dict=fd)
                # Calculate batch loss
                loss = sess.run(model.cost, feed_dict=fd)
                print("Iter " + str(step) + ", Minibatch Loss= " + \
                      "{:.6f}".format(loss) + ", Training Accuracy= " + \
                      "{:.5f}".format(acc))
            step += 1
            batch_x = list()
            batch_y = list()
        print(sum(sum_time) / len(sum_time))
        print("Optimization Finished!")

        #test data

        test_label = dataset.y_test  #[:test_len]
        #test_data_temp = forest_net.forest_test_predict(test_d)
        test_data_temp = np.array(forest_net.get_test_data())
        #print (test_data_temp.shape)
        test_data = sess.run(tf.transpose(test_data_temp, [1, 0, 2]))

        fd = {model.xx: test_data, model.y: test_label}
        loss = sess.run(model.cost, feed_dict=fd)

        print("{:.6f}".format(loss))
        print("Testing Accuracy:", \
            sess.run(model.accuracy, feed_dict=fd))

        #get the predict classifier and probability value
        classifier_value = sess.run([model.classifier_collection],
                                    feed_dict={model.xx: test_data})
        proba_value = sess.run([model.proba_collection],
                               feed_dict={model.xx: test_data})

        #print (np.array(classifier_value).shape, np.array(proba_value).shape)

        v = Validation()
        result = [str(item + 1) for item in classifier_value[0]]
        v.calculateF1(dataset.y_input, result)
示例#4
0
        step += 1
        start = start + batch_size
        end = end + batch_size
    print("Optimization Finished!")

    # Calculate accuracy for 128 mnist test images
    #test_len = 128
    #test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
    #test_label = mnist.test.labels[:test_len]
    test_data = x_test.reshape((-1, n_steps, n_input))
    test_label = np.reshape(y_test, (-1, 2))

    loss = sess.run(cost, feed_dict={x: test_data, y: test_label})

    print("{:.6f}".format(loss))
    print("Testing Accuracy:", \
        sess.run(accuracy, feed_dict={x: test_data, y: test_label}))

    classifier_value = sess.run([classifier_collection],
                                feed_dict={x: test_data})
    print(len(classifier_value))
    for value in classifier_value:
        print(value)
    proba_value = sess.run([proba_collection], feed_dict={x: test_data})
    for value in proba_value:
        print(value)

    v = Validation()
    v.calculateF1(input_value, classifier_value[0])
    v.allValidation(input_value, proba_value[0][:, 1])
示例#5
0
        if step % display_step == 0:
            # Calculate batch accuracy
            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
            # Calculate batch loss
            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
            print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc))
        #print(sess.run(tf.gradients(cost, x), feed_dict={x:batch_x, y:batch_y})[0])
        step += 1
    print("Optimization Finished!")

    # Calculate accuracy for 128 mnist test images

    #test_data = x_test.reshape((-1, n_steps, n_input))
    #test_data = np.transpose(x_test, [1,0,2])
    test_data = x_test
    test_label = y_test
    print("Testing Accuracy:", \
        sess.run(accuracy, feed_dict={x: test_data, y: test_label}))

    classifier_value = sess.run([classifier_collection],
                                feed_dict={x: test_data})[0]
    proba_value = sess.run([proba_collection], feed_dict={x: test_data})
    v = Validation()

    v.calculateF1(y_input, classifier_value + 1)
    v.allValidation(y_input - 1, proba_value[0][:, 1])
    #v.top_accuacy(y_input, classifier_value[0], proba_value[0][:,0],[1000,10000,50000])
示例#6
0
        if step % display_step == 0:
            # Calculate batch accuracy
            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
            # Calculate batch loss
            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
            print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc))
        #print(sess.run(tf.gradients(cost, x), feed_dict={x:batch_x, y:batch_y})[0])
        step += 1
    print("Optimization Finished!")

    # Calculate accuracy for 128 mnist test images

    #test_data = x_test.reshape((-1, n_steps, n_input))
    #test_data = np.transpose(x_test, [1,0,2])
    test_data = x_test
    test_label = y_test
    print("Testing Accuracy:", \
        sess.run(accuracy, feed_dict={x: test_data, y: test_label}))


    classifier_value = sess.run([classifier_collection], feed_dict={x:test_data})[0]
    proba_value = sess.run([proba_collection], feed_dict={x:test_data})
    result = [str(item+1) for item in classifier_value]
    v = Validation()
    v.calculateF1(y_input, result)
    #v.allValidation(y_input, proba_value[0][:,1])
    #v.top_accuacy(y_input, classifier_value[0], proba_value[0][:,0],[1000,10000,50000])
def train(param, model, forest_net, dataset):

    # Launch the graph
    with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
        sess.run(model.init)
        step = 0
        redisual = list()
        # Keep training until reach max iterations
        while step < param.training_iters:

            #generate train data---mini batch
            #using the forest net generate hidden data : n_steps * batch_size * n_input
            batch_x, batch_y = generate_lstm_input(param, forest_net, dataset,
                                                   redisual)

            batch_x = np.transpose(batch_x, [1, 0, 2])

            fd = {model.xx: batch_x, model.y: batch_y}

            # Run optimization op (backprop)
            sess.run(model.optimizer, feed_dict=fd)
            #get the gradient or residual

            grad_val = np.array(
                sess.run(tf.gradients(model.cost, model.xx), feed_dict=fd)[0])

            redisual = -200000 * np.transpose(grad_val, [1, 0, 2])

            print(redisual[0][0])
            if step % param.display_step == 0:
                # Calculate batch accuracy
                acc = sess.run(model.accuracy, feed_dict=fd)
                # Calculate batch loss
                loss = sess.run(model.cost, feed_dict=fd)
                print("Iter " + str(step) + ", Minibatch Loss= " + \
                      "{:.6f}".format(loss) + ", Training Accuracy= " + \
                      "{:.5f}".format(acc))
            step += 1
            batch_x = list()
            batch_y = list()

        print("Optimization Finished!")

        #test data

        test_label = dataset.y_test  #[:test_len]
        #test_data_temp = forest_net.forest_test_predict(test_d)
        test_data_temp = np.array(forest_net.get_test_data())
        test_data = sess.run(tf.transpose(test_data_temp, [1, 0, 2]))
        print(test_label.shape, test_data.shape)
        #test_label = np.reshape(test_label, (-1,2))

        fd = {model.xx: test_data, model.y: test_label}
        loss = sess.run(model.cost, feed_dict=fd)

        print("{:.6f}".format(loss))
        print("Testing Accuracy:", \
            sess.run(model.accuracy, feed_dict=fd))

        #get the predict classifier and probability value
        classifier_value = sess.run([model.classifier_collection],
                                    feed_dict={model.xx: test_data})
        proba_value = sess.run([model.proba_collection],
                               feed_dict={model.xx: test_data})

        v = Validation()
        v.calculateF1(dataset.y_input, classifier_value[0])
        v.allValidation(dataset.y_input, proba_value[0][:, 1])