Beispiel #1
0
def get_iou_callable():
    with tf.Graph().as_default(), tf.device("/cpu:0"):
        A = tf.placeholder(tf.float32, shape=[None, 4])
        B = tf.placehodler(tf.float32, shape=[None, 4])
        iou = pairwise_iou(A, B)
        sess = tf.Session()
        return sess.make_callable(iou, [A, B])
Beispiel #2
0
def train():
    x = tf.placeholder(dtype=tf.float32,
                       shape=[None, 224, 224, 3],
                       name='input')
    y = tf.placehodler(dtype=tf.float32,
                       shape=[None, num_classes],
                       name='label')
    #keep_prob=tf.placehodler(tf.float32)

    #建立模型
    with slim.arg_scope(vgg_arg_scope()):
        outputs, end_points = vgg_16(x, num_classes)

    #定义变量
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=outputs, label=y))
    train_step = tf.train.GradientDescentOptimizer(
        learning_rate=lr).minimize(loss)
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(outputs, 1), tf.argmax(y, 1)), tf.float32))

    #读取batch
    images, labels = read_and_decode('./train.tfrecords')
    img_batch, label_batch = tf.train.shuffle_batch([images, labels],
                                                    batch_size=batch_size,
                                                    capacity=392,
                                                    min_after_dequeue=200)
    label_batch = tf.one_hot(label_batch, num_classes, 1, 0)

    init = tf.global_variables_initializer
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        for i in range(max_steps):
            batch_x, batch_y = sess.run(img_batch, label_batch)
            _, loss_val = sess.run([train_step, loss],
                                   feed_dict={
                                       x: batch_x,
                                       y: batch_y
                                   })

            #每10step 打印
            if i % 10 == 0:
                train_arr = accuracy.eval(feed_dict={x: batch_x, y: batch_y})
                print("%s:step [%d] Loss: %f,training accuracy : %g" %
                      (datetime.now(), i, loss_val, train_arr))

            if (i + 1) == max_steps:
                saver.save(sess, './model/model.ckpt', global_step=i)

        coord.request_stop()
        coord.join(threads)
Beispiel #3
0
    def _build_RNN(self):
        with tf.variable_scope('inputs'):
            self._xs = tf.placeholder(
                tf.float32,
                [self._batch_size, self._time_steps, self._input_size],
                name='xs')
            self._ys = tf.placehodler(
                tf.float32,
                [self._batch_size, self._time_steps, self._output_size],
                name='ys')
        with tf.name_scope('RNN'):
            l_in_x = tf.reshape(self._xs, [-1, self._input_size], name='2_2D')
            #Ws: (in_size, cell_size)
            Wi = self._weight_variable([self._input_size, self._cell_size])
            print(Wi.name)
            #bs: (cell_size, )
            bi = self._bias_variable([
                self._cell_size,
            ])
            #l_in_y: (batch_size*time_steps, cell_size)
            with tf.name_scope('Wx_plus_b'):
                l_in_y = tf.matmul(l_in_x, Wi) + bi
            l_in_y = tf.reshape(l_in_y,
                                [-1, self._time_steps, self._cell_size])

        with tf.variable_scope('cell'):
            cell = tf.contrib.rnn.BasicLSTMCell(self._cell_size)
            with tf.name_scope('initial_state'):
                self._cell_initial_state = cell.zero_state(self._batch_size,
                                                           dtype=tf.float32)

            self.cell_outputs = []
            cell_state = self._cell_initial_state
            for t in range(self._time_steps):
                if t > 0:
                    tf.get_variable_scope().reuse_variables()  #reuse Wi and bi
                cell_output, cell_state = cell(l_in_y[:, t, :], cell_state)
                self.cell_outputs.append(
                    cell_output
                )  #cell_outputsL (time_steps, batch_size, output_size)
            self._cell_final_state = cell_state

        with tf.variable_scope('output_layer'):
            #cell_outputs_reshaped (batch_size*time_steps, cell_size)
            cell_outputs_reshaped = tf.reshape(
                tf.transpose(self.cell_outputs, [1, 0, 2]),
                [-1, self._cell_size])
Beispiel #4
0
"""
import cifar10, cifar10_input
import tensorflow as tf
import numpy as np
import time
"""1.定义参数和算法公式"""
training_nums = 3000
batch_size = 128
data_dir = 'xxx?oooxoxoxox'
cifar10.maybe_download_and_extract()
train_images, train_labels = cifar10_input.distorted_inputs(
    data_dir=data_dir, batch_size=batch_size)
test_images, test_labels = cifar10_input.inputs(eval_data=True,
                                                data_dir=data_dir,
                                                batch_size=batch_size)
images_ph = tf.placehodler(tf.float32, [batch_size, 24, 24, 3])
labels_ph = tf.placehodler(tf.int32, [batch_size])
"""2.定义权重"""


def weight_variable(shape, stddev, w1):
    var = tf.Variable(tf.truncated_normal(shape=shape, stddev=stddev))
    if w1 is not None:  #对权重做L2正则化
        weight_loss = tf.multiply(tf.nn.l2_loss(var), w1, name='weight_loss')
        tf.add_to_collection('losses', weight_loss)
    return var


"""3.定义偏置"""

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("/tmp/data", one_hot=True)

# [0,0,1,0,0,0,0,0,0,0] = 2

n_classes = 10
batch_size = 128

x = tf.placehodler('float', [None, 784])
y = tf.placeholder('float')


def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")


def maxpool2d(x):
    return tf.nn.max_pool(x,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding="SAME")


def cnn(x):
    weight = {
        "W_conv1": tf.Variable(tf.random_normal([5, 5, 1, 32])),
        "W_conv2": tf.Variable(tf.random_normal([5, 5, 32, 64])),
        "W_fc": tf.Variable(tf.random_normal([7 * 7 * 64, 1024])),
        "out": tf.Variable(tf.random_normal([1024, n_classes]))
 def add_placeholders(self):
     self.input_placeholder = tf.placehodler(tf.init32, [None, self.config.window_size])
     self.labels_placeholder = tf.placeholder(tf.float32, [None, self.config.label_size])