Beispiel #1
0
    def __call__(self):
        # init placeholder
        self.x = tf.placeholder(dtype=tf.float32,
                                shape=[None, self.feature_size],
                                name="x")
        self.y = tf.placehoder(dtype=tf.float32, shape=[None], name="y")

        # divide layer
        with tf.variable_scope("divide_layer"):
            divide = tf.layers.dense(self.x,
                                     self.m,
                                     activation=tf.nn.softmax,
                                     kernel_regularizer=l1(0.01))
            divide = tf.nn.dropout(divide, self.keep_prob)

        # fitting layer
        with tf.variable_scope("fitting_layer"):
            fit = tf.layers.dense(self.x,
                                  self.m,
                                  activation=tf.nn.sigmoid,
                                  kernel_regularizer=l1(0.01))
            fit = tf.nn.dropout(fit, self.keep_prob)

        pred = tf.reshape(tf.reduce_sum(divide * fit, -1), [-1, 1])

        loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=pred,
                                                       labels=self.y)
        self.loss = tf.reduce_mean(loss)
        self.train_op = tf.train.FtrlOptimizer(self.learning_rate).minimize(
            self.loss)
        self.auc = roc_auc_score(self.y, pred)
Beispiel #2
0
def _filter_word(example, name=None):
    '''
        TODO
    '''
    with tf.name_scope(name, default_name='filter_word'):
        length = tf.placehoder(tf.int32, 'length')

        if example[2] <= length:
            return True
        else:
            return False
Beispiel #3
0
    def build_inputs(self):
        with tf.name_scope('inputs'):
            self.inputs = tf.placeholder(tf.int32,shape=(self.num_seqs,self.num_steps),name='inputs')
            self.targets = tf.placehoder(tf.int32,shape=(self.num_seqs,self.num_steps),name='target') #targets与inputs一样,第i个input的输出为第i+1个target;我们的目的是训练RNN模型,我们输入一个字符后能够生成一段话;
            self.keep_prob = tf.placeholder(tf.float32,name='keep_prob')

            #对于中文,需要使用embedding层
            #英文字母没有必要用embedding层
            if self.use_embedding is False:
                self.lstm_inputs = tf.one_hot(self.inputs,self.num_classes) #将Input的字母用one-hot表示,长度为num_classes
            else:
                with tf.device("/cpu:0"):
                    embedding = tf.get_variable('embedding',[self.num_classes,self.embedding_size]) #embedding可以进行训练
                    self.lstm_inputs = tf.nn.embedding_lookup(embedding,self.inputs) #self.inputs是一个序列,如何将各个字母分别输入不同的时间片???
Beispiel #4
0
def _data_agument(image, name=None, stretch=1.2):
    '''
        Image Pre-processing without Data Agument
    '''
    with tf.name_scope(name, default_name='data_agument'):
        height, width = image.get_shape()

        if stretch:
            # A stretching factor of 1.2 leads to superior results 
            # than using the original aspect ratio
            width = int(1.2 * width)
            image = tf.image.resize_images(image, [height, width])

        new_width = int(32 * width / height)

        target_width = tf.placehoder(tf.int32, 'target_width')

        if new_width > target_width:
            image = tf.image.resize_images(image, [32, target_width])
        else:
            image = tf.image.resize_images(image, [32, new_width])
            image = tf.pad(image, tf.constant([[0, 0], [0, target_width - new_width]]))

        return image
Beispiel #5
0
import tensorflow as tf
import numpy as np
from scipy.misc import imread, imresize

learning_rate = 0.001
training_iters = 1000
batch_size = 50
display_step = 10

dropout = 0.5
num_classes = 10

keep_prob = tf.placehoder(tf.float32)

def convnet(images, _dropout):
    parameters = [] 
    # conv1
    with tf.name_scope('conv1') as scope: 
        wb = tf.sqrt(6/(228,000+82600)) 
        kernel = tf.Variable(tf.random_uniform([5, 7, 7, 3, 4], minval=-wb, maxval=wb, 
                             dtype=tf.float32), name='weights')
        conv = tf.nn.conv3d(images, kernel, [1, 1, 1, 1, 1], padding='VALID')                                            	
        biases = tf.Variable(tf.constant(1.0, shape=[4], dtype=tf.float32),
                             trainable=True, name='biases')
        out = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(out, name=scope)
        parameters += [kernel, biases]
        
    # pool1
    pool1 = tf.nn.max_pool3d(conv1, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='VALID')
 
Beispiel #6
0
        
        self.test_images = self.test_images.reshape(test_len , 3,32,32).transpose(0,2,3,1)/255
        self.tes = one_hot_encode(np.hstack([d[b"labels"] for d in self.test_batch]) ,10)
        
    def nex_batch(self, batch_size):
        x = self.trainig_images[self.i:self.i+batch_size].reshape(100,32,32,3)
        y = self.trainig_labes[self.i:self.i+batch_size]
        self.i = (self.i + batch_size)% len(self.trainig_images)
        return x, y
    
ch = CifarHelper()
ch.set_up_images()

import tensorflow as tf 

x = tf.placehoder(tf.float32 , shape = [None , 32 , 32 , 3])
y_true = tf.placeholder(tf.float32 , shape=[None ,10])

hold_prob = tf.placeholder(tf.float32)

def init_weights(shape):
    init_random_dist = tf.truncated_normal(shape,stddev = 0.1)
    return tf.Variable(init_random_dist)

def init_bias(shape):
    init_bias_vals = tf.constant(0.1 , shape= shape)
    return tf.Variable(init_bias_vals)

def conv2D(x , W):
    return tf.nn.conv2d(x,W , strides=[1,1,1,1] , padding='SAME')
Beispiel #7
0
def train(pattern,
          alpha,
          beta,
          N,
          M,
          l0,
          w0,
          step,
          decay,
          num_gpus=1,
          alphabet=26):
    '''
    '''
    with tf.Graph().as_default() as graph, tf.device('/device:CPU:0'):
        gradients = []

        with tf.variable_scope('CPU'):
            global_step = tf.train.create_global_step(graph=graph)
            learning_rate = tf.placehoder(tf.float32)
            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)

            dataset = record_input(pattern)
            iterator = dataset.make_initializable_iterator()
            length = tf.get_tensor_by_name('length')
            target_width = tf.get_tensor_by_name('target_width')

        for i in range(num_gpus):
            with tf.device('/device:GPU:{}'.format(i)), tf.name_scope('GPU'):
                images, labels, sequence_length = iterator.get_next()
                inputs = _model(images, name='recognition', alphabet=alphabet)
                loss_op = _loss(labels, inputs, sequence_length)
                gradients.append(_gradient(optimizer, loss_op))

        with tf.name_scope('CPU'):
            gradient_op = _update(optimizer,
                                  _average(gradients),
                                  global_step=global_step)

            train_ops = tf.group(gradient_op)
            saver_ops = tf.train.Saver(var_list=tf.global_variables())
            init_ops = tf.global_variables_initializer()

        graph.finalize()

    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=True)
    config.gpu_options.allow_growth = True

    with tf.Session(graph=graph, config=config) as sess:
        sess.run(init_ops)

        # Warmup training
        log_increment = (np.log(beta) - np.log(alpha)) / N
        lr = alpha
        l = l0

        for i in range(N):
            sess.run(iterator.initializer,
                     feed_dict={
                         length: l,
                         target_width: w0
                     })

            for j in range(step):
                _, loss_value = sess.run([train_ops, loss_op],
                                         feed_dict={learning_rate: lr})
                print('[Warmup training] epoch: {}, step: {}, loss: {}'.format(
                    i, j, loss_value))

            l += 1
            lr += np.power(10, i * log_increment)

        # Post-warmup training
        lr = beta
        w = w0

        for i in range(M):
            sess.run(iterator.initializer,
                     feed_dict={
                         length: 128,
                         target_width: w
                     })

            for j in range(step):
                _, loss_value = sess.run([train_ops, loss_op],
                                         feed_dict={learning_rate: lr})
                print('[Post-warmup training] epoch: {}, step: {}, loss: {}'.
                      format(i, j, loss_value))

            w += 8
            lr *= np.power(10, -int(i / decay))
Beispiel #8
0
    def __init__(self,
                 pb_file,
                 dest_nodes,
                 input_shape=None,
                 in_nodes=None,
                 input_format="NCHW".encode()):
        with open(pb_file, 'rb') as f:
            serialized = f.read()
        tf.reset_default_graph()
        original_graph_def = tf.GraphDef()
        original_graph_def.ParseFromString(serialized)
        self.inputs = list()
        self.outputs = dest_nodes

        sess = tf.Session(graph=tf.get_default_graph())
        sess.run(tf.global_variables_initializer())
        self.infer = ModelInfer(sess)

        original_graph_def = strip_unused_lib.strip_unused(
            input_graph_def=original_graph_def,
            input_node_names=in_nodes,
            output_node_names=dest_nodes,
            placeholder_type_enum=dtypes.float32.as_datatype_enum)

        graph_def = tf.GraphDef()
        graph_def.ParseFromString(original_graph_def.SerializeToString())
        in_type_list = dict()
        for node in graph_def.node:
            if node.name in in_nodes:
                in_type_list[node.name] = node.attr['dtype'].type

        input_shape = list(input_shape)
        if not isinstance(input_shape[0], list):
            input_shape = [input_shape]

        input_map = dict()
        for i in range(len(input_shape)):
            if in_type_list[in_nodes[i]] == 1 or in_type_list[
                    in_nodes[i]] == 0:
                dtype = tf.float32
                x = tf.placeholder(dtype, shape=input_shape[i])
            elif in_type_list[in_nodes[i]] == 3:
                dtype = tf.int32
                x = tf.placehoder(dtype, shape=input_shape[i])
            else:
                raise Exception("Unexpected dtype for input, only support " \
                    "float32 and int32 now")
            input_map[in_nodes[i] + ":0"] = x
            self.inputs.append(x.name.split(':')[0])
            self.infer.gen_sample_data(x.name, input_shape[i])

        tf.import_graph_def(graph_def, name="", input_map=input_map)
        graph_def = tf.get_default_graph()._as_graph_def(add_shapes=True)[0]

        self.tf_graph = TensorflowGraph(graph_def)
        self.tf_graph.build(input_format)

        self.weights = dict()
        for node in graph_def.node:
            if node.op.lower() == "const":
                try:
                    node.attr['value'].tensor.tensor_content
                    weight = tensor_util.MakeNdarray(node.attr['value'].tensor)
                    self.weights[node.name] = weight
                except:
                    continue
Beispiel #9
0
"""
In TensorFlow, data isn’t stored as integers, floats, or strings.
These values are encapsulated in an object called a tensor.
"""

import tensorflow as tf
# Create TensorFlow object called hello_constant
# hello_constant is a 0-dimensional string tensor
hello_constant = tf.constant('Hello World!')
"""
Session
TensorFlow’s api is built around the idea of a computational graph, a way of
visualizing a mathematical process which you learned about in the MiniFlow
lesson. Let’s take the TensorFlow code you ran and turn that into a graph
"""
with tf.Session() as sess:
    # Run the tf.constant operation in the session
    output = sess.run(hello_constant)
    print(output)
"""
What if you want to use a non-constant? This is where tf.placeholder() and
feed_dict come into place.
tf.placeholder() returns a tensor that gets its value from data passed to the
tf.session.run() function, allowing you to set the input right before the
session runs.
"""
x = tf.placehoder(tf.string)
with tf.Session() as sess:
    output = sess.run(x, feed_dict={x: "Hello Emma"})