Exemplo n.º 1
0
    def forward(self,examples,labels):
        """建立前向传播图"""
        opts = self._options
        # 声明所有需要的变量
        # embeddings :[vocab-size,emb_size]
        init_width = 0.5 / opts.emb_dim

        emb = tf.Variable(
            tf.random_uniform([opts.vocab_size,opts.emb_dim], -init_width,init_width),name = "emb")
        self._emb = emb

        # softmax_weights:[vocab_size,emb_dim]
        sm_w_t = tf.Variable(
            tf.zeros([opts.vocab_size,opts.emb_dim]),name="sm_w_t")
        # softmax bias:[emd_dim]
        sm_b = tf.Variable(
            tf.zeros([opts.vocab_size]),name="sm_b")

        # global step:scalar
        self.global_step = tf.Variable(0,name="global_step")

        # 候选采样计算nce loss的节点
        labels_matrix = tf.reshape(
            tf.cast(labels,dtype=tf.int64),[opts.batch_size,1])
        # 负采样
        sampled_ids, _,_ = (tf.nn.fixed_unigram_candidate_sampler(
            true_classes=labels_matrix,
            num_true=1,
            num_sampled=opts.num_samples,
            unique=True,
            range_max=opts.vocab_size,
            distortion=0.75,
            unigrams=opts.vocab_counts.tolist()))

        # 样本的嵌入:[batch_size,emb_dim]
        example_emb = tf.nn.embedding_lookup(emb,examples)

        # 标签的权重w:[batch_size,emb_dim]
        true_w = tf.nn.embedding_lookup(sm_w_t,labels)
        # 标签的偏差b :[batch_size,1]
        true_b = tf.nn.embedding_lookup(sm_b,labels)

        # 采样样本的ids的权重(Weights for sampled ids):[num_sampled,emb_dim]
        sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
        # 采样样本的 bias :[num_sampled,1]
        sampled_b = tf.nn.embedding_lookup(sm_b,sampled_ids)

        # True logits:[batch_size,1]
        true_logits = tf.reduce_sum(tf.multiply(example_emb,true_w),1) + true_b

        # 采样样本预测值 sampled logits:[batch_size,num_sampled]
        sampled_b_vec = tf.reshape(sampled_b,[opts.num_samples])
        sampled_logits = tf.matmul(example_emb,
                                   sampled_w,
                                   transpose_b=True) + sampled_b_vec
        return true_logits,sampled_logits
Exemplo n.º 2
0
    def __init__(self, is_training, config):
        self.batch_size = batch_size = config.batch_size  # batch_size
        self.num_steps = num_steps = config.num_steps  #
        size = config.hidden_size  # 隐藏层
        vocab_size = config.vocab_size  # 词表size
        # 输入占位符
        self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
        self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])

        lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
        if is_training and config.keep_prob < 1:
            lstm_cell = rnn_cell.DropoutWrapper(
                lstm_cell, output_keep_prob=config.keep_prob)
        cell = rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers)

        self._initial_state = cell.zero_state(batch_size, tf.float32)

        with tf.device("/cpu:0"):
            embedding = tf.get_variable("embedding", [vocab_size, size])
            inputs = tf.nn.embedding_lookup(embedding, self._input_data)

        if is_training and config.keep_prob < 1:
            inputs = tf.nn.dropout(inputs, config.keep_prob)

        outputs = []
        states = []
        state = self._initial_state
        with tf.variable_scope("RNN"):
            for time_step in range(num_steps):
                if time_step > 0:
                    tf.get_variable_scope().reuse_variables()
                (cell_output, state) = cell(inputs[:, time_step, :], state)
                outputs.append(cell_output)
                states.append(state)

        output = tf.reshape(tf.concat(outputs, 1), [-1, size])
        softmax_w = tf.get_variable("softmax_w", [size, vocab_size])
        softmax_b = tf.get_variable("softmax_b", [vocab_size])
        logits = tf.matmul(output, softmax_w) + softmax_b
        loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
            [logits], [tf.reshape(self._targets, [-1])],
            [tf.ones([batch_size * num_steps])], vocab_size)
        self._cost = cost = tf.reduce_sum(loss) / batch_size
        self._final_state = states[-1]

        if not is_training:
            return
        self._lr = tf.Variable(0.0, trainable=False)
        tvars = tf.trainable_variables()
        grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
                                          config.max_grad_norm)
        optimizer = tf.train.GradientDescentOptimizer(self.lr)
        self._train_op = optimizer.apply_gradients(zip(grads, tvars))
Exemplo n.º 3
0
def loss(logits, labels):
    # 对所有的可训练参数增加 l2 loss
    sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1])
    indices = tf.reshape(tf.range(FLAGS.batch_size), [FLAGS.batch_size, 1])
    concated = tf.concat([indices, sparse_labels], 1)
    dense_labels = tf.sparse_to_dense(concated,
                                      [FLAGS.batch_size, NUM_CLASSES], 1.0,
                                      0.0)
    # 计算均方误差
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
        logits, dense_labels, name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    return tf.add_n(tf.get_collection('losses'), name='total_loss')
Exemplo n.º 4
0
def read_cifar10(filename_queue):
    class CIFAR10Record(object):
        pass

    result = CIFAR10Record()
    # 输入标准化
    label_bytes = 1
    result.height = 32
    result.width = 32
    result.depth = 3
    image_bytes = result.height * result.width * result.depth
    record_bytes = label_bytes + image_bytes

    reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
    result.key, value = reader.read(filename_queue)
    #
    record_bytes = tf.decode_raw(value, tf.uint8)
    # 把标签从int8转到int32
    result.label = tf.cast(tf.slice(record_bytes, [0], [label_bytes]),
                           tf.int32)
    # reshape图片为长宽高的形式[depth,height,width]
    depth_major = tf.reshape(
        tf.slice(record_bytes, [label_bytes], [image_bytes]),
        [result.depth, result.height, result.width])
    # 转化为[height,width,depth]
    result.uint8image = tf.transpose(depth_major, [1, 2, 0])

    return result
def conv_network(x, weights, biases, dropout):
    # mnist是1-D的784维的向量,reshape维度为[Height*Width*depth]
    # Tensor变成4-D的向量,即[batch_size,height,width,depth]
    x = tf.reshape(x, shape=[-1, 28, 28, 1])

    # j卷积层
    conv1 = conv2d(x, weights['wc1'], biases['bc1'])
    # max pooling
    conv1 = maxpool2d(conv1, k=2)

    # 卷积层
    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
    conv2 = maxpool2d(conv2, k=2)

    # 全连接层
    # 把conv2的维度reshape成全连接层的输入,拉平
    fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)

    # Dropout
    fc1 = tf.nn.dropout(fc1, dropout)
    out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
    return out
Exemplo n.º 6
0
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size):
    """
    :param image: 3D Tensor
    :param label: 1D Tensor int32
    :param min_queue_examples:
    :param batch_size:
    :return:
    """
    num_preprocess_threads = 16
    image, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)

    # 可视化训练数据
    tf.summary.image('images', image)
    return image, tf.reshape(label_batch, [batch_size])
def conv_network(x_dict, n_classes, dropout, reuse, is_training):

    with tf.variable_scope('ConvNetwork', reuse=reuse):
        x = x_dict['images']

        x = tf.reshape(x, shape=[-1, 28, 28, 1])

        conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
        conv1 = tf.layers.max_pooling2d(conv1, 2, 2, padding='SAME')

        conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
        conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
        # 全连接层,需要把上一个输入拉平
        fc1 = tf.contrib.layers.flatten(conv2)

        fc1 = tf.layers.dense(fc1, 1024)

        fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
        out = tf.layers.dense(fc1, n_classes)
    return out
Exemplo n.º 8
0
def conv_net(x, n_classes, dropout, reuse, is_training):
    with tf.variable_scope('ConvNet', reuse=reuse):
        x = tf.reshape(x, shape=[-1, 28, 28, 1])

        x = tf.layers.conv2d(x, 64, 5, activation=tf.nn.relu)
        x = tf.layers.max_pooling2d(x, 2, 2)

        x = tf.layers.conv2d(x, 256, 3, activation=tf.nn.relu)
        x = tf.layers.conv2d(x, 512, 3, activation=tf.nn.relu)

        x = tf.layers.max_pooling2d(x, 2, 2)

        x = tf.contrib.layers.flatten(x)
        # 全连接层
        x = tf.layers.dense(x, 2048)
        x = tf.layers.dropout(x, rate=dropout, training=is_training)

        x = tf.layers.dense(x, 1024)
        x = tf.layers.dropout(x, rate=dropout, training=is_training)

        out = tf.layers.dense(x, n_classes)
        out = tf.nn.softmax(out) if not is_training else out
    return out
Exemplo n.º 9
0
def dynamicRNN(x, seqlen, weights, biases):

    x = tf.unstack(x, seq_max_len, 1)

    # 定义lstm cell
    lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden)

    outputs, states = tf.contrib.rnn.static_rnn(lstm_cell,
                                                x,
                                                dtype=tf.float32,
                                                sequence_length=seqlen)
    # 执行动态计算的时候,必须检索最后一个动态计算的输出,如果序列长度为10 ,需要检索第10个输出。
    # 所以自定义一个OP,针对每个样本的batchsize,获取其长度并且获得相应的输出。
    # outputs 是每个timesteps的输出列表,打包成[batch_size,n_step,n_inputs]
    outputs = tf.stack(outputs)
    outputs = tf.transpose(outputs, [1, 0, 2])

    batch_size = tf.shape(outputs)[0]
    # 每个样本的起始索引
    index = tf.range(0, batch_size) * seq_max_len + (seqlen - 1)

    outputs = tf.gather(tf.reshape(outputs, [-1, n_hidden]), index)

    return tf.matmul(outputs, weights['out']) + biases['out']
Exemplo n.º 10
0
def inference(images):

    # 构造模型
    # 卷积层1
    with tf.variable_scope('conv1') as scope:
        kernel = _variable_with_weight_decay('weights',
                                             shape=[5, 5, 3, 64],
                                             stddev=1e-4,
                                             wd=0.0)
        conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
        bias = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(bias, name=scope.name)
        _activation_summary(conv1)
    # 池化层1
    pool1 = tf.nn.max_pool(conv1,
                           ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME',
                           name="pool1")
    # 正则化
    norm1 = tf.nn.lrn(pool1,
                      4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name='norm1')

    # 卷积层2
    with tf.variable_scope('conv2') as scope:
        kernel = _variable_with_weight_decay('weights',
                                             shape=[5, 5, 64, 64],
                                             stddev=1e-4,
                                             wd=0.0)
        conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
        bias = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(bias, name=scope.name)
        _activation_summary(conv2)
    # 正则化2
    norm2 = tf.nn.lrn(conv2,
                      4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name='norm2')
    # 池化层2
    pool2 = tf.nn.max_pool(norm2,
                           ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME',
                           name='pool2')

    # 线性修正的全连接层,拉平全连接层
    with tf.variable_scope('local3') as scope:
        dim = 1
        # 把 上一层输出的形状拉平
        for d in pool2.get_shape()[1:].as_list():
            dim *= d
        reshape = tf.reshape(pool2, [FLAGS.batch_size, dim])
        weights = _variable_with_weight_decay('weights',
                                              shape=[dim, 384],
                                              stddev=0.04,
                                              wd=0.004)
        biases = _variable_on_cpu('biases', [384],
                                  tf.constant_initializer(0.1))

        local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases,
                            name=scope.name)
        _activation_summary(local3)
    # 线性修正的全连接层。
    with tf.variable_scope('local4') as scope:
        weights = _variable_with_weight_decay('weights',
                                              shape=[384, 192],
                                              stddev=0.04,
                                              wd=0.004)
        biases = _variable_on_cpu('biases', [192],
                                  tf.constant_initializer(0.1))
        local4 = tf.nn.relu(tf.matmul(local3, weights) + biases,
                            name=scope.name)
        _activation_summary(local4)

    # softmax层
    with tf.variable_scope('softmax_linear') as scope:
        weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
                                              stddev=1 / 192.0,
                                              wd=0.0)
        biases = _variable_on_cpu('biases', [NUM_CLASSES],
                                  tf.constant_initializer(0.0))
        softmax_linear = tf.add(tf.matmul(local4, weights),
                                biases,
                                name=scope.name)
        _activation_summary(softmax_linear)

    return softmax_linear
# 定义最大池化函数
def max_pool_2x2(x):
    return tf.nn.max_pool(x,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME')


print("use cnn get feature..")
# 针对mnist开始卷积(第一层)
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])

# 为了卷积需要,需要把x变成一个4d向量,2、3维度对应图片的宽、高。最后一个代表颜色通道数
x_image = tf.reshape(x, [-1, 28, 28, 1])

# 然后,把x_image和权重张量进行卷积,加上偏置项,使用relu作为激活函数,最后进行max_pooling
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)

h_pool1 = max_pool_2x2(h_conv1)
print("second conv..")
# 第二层卷积
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

# 全连接层,图片尺寸减少到7*7,加入全连接层,就是一个全连接的神经网络,处理整张图片。
# 操作:这一步将池化层的tensor乘以W +b。计算relu