Ejemplo n.º 1
0
 def _load_data(self):
     train_data = Dataset(self.config, force_build=self.config.rebuild)
     self.train_iter = train_data.dataset_iter
     self.vocab_size = train_data.vocab_size
     self.word_vectors = train_data.word_vectors
     self.eval_iter = Dataset(self.config, type="eval",
                              force_build=False).dataset_iter
Ejemplo n.º 2
0
class char_CNN:
    with tf.device('/cpu:0'):
        pin = xpinyin.Pinyin()
        # Load data
        print("正在载入数据、模型...")
        #主要是onehot用
        sample_data_source = Dataset(config.sample_data_source)
        # test_data = Dataset(config.test_data_source)
        #获取最新的,可以改
        checkpoint_file = tf.train.latest_checkpoint('./runs/1530261778/checkpoints')
        graph = tf.Graph()
        with graph.as_default():
            sess = tf.Session()
            with sess.as_default():
                # Load the saved meta graph and restore variables
                saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
                saver.restore(sess, checkpoint_file)

                print("载入模型成功1...")
                # Get the placeholders from the graph by name
                input_x = graph.get_operation_by_name("input_x").outputs[0]
                # input_y = graph.get_operation_by_name("input_y").outputs[0]
                dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]

                # Tensors we want to evaluate
                predictions = graph.get_operation_by_name("output_layer/predictions").outputs[0]

                #词向量嵌入, index2label
                embedding_w, embedding_dic = sample_data_source.onehot_dic_build()
                label_dict = {0: 'VIDEO', 1: 'TV', 2: 'APP', 3: 'CONTROL', 4: 'WEATHER', 5: 'MUSIC'}
                print("载入模型成功2...")


                @staticmethod
                def rec(text, sentencepinyin):
                    try:
                        doc_image = []
                        doc_vec = char_CNN.sample_data_source.doc_process(sentencepinyin, char_CNN.embedding_dic)
                        doc_image.append(doc_vec)
                        batch_xx = np.array(doc_image, dtype='int64')
                        prediction = char_CNN.sess.run(char_CNN.predictions, {char_CNN.input_x: batch_xx, char_CNN.dropout_keep_prob: 1.0})
                        ppred = str(prediction[0]).replace('[', '').replace(']', '')
                        label_pred = char_CNN.label_dict[int(ppred)]  # str转int  int转label
                        print(text, '\t', label_pred)
                        return label_pred
                    except:
                        print(text, "rec text wrong!")
Ejemplo n.º 3
0
    def __init__(self, l0, num_classes, conv_layers, fc_layers, l2_reg_lambda):

        #创建占位符placeholder
        self.input_x = tf.placeholder(tf.int32, [None, l0], name='input_x')
        self.input_y = tf.placeholder(tf.float32, [None, num_classes],
                                      name='input_y')
        self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                name='dropout_keep_prob')

        #保存L2正则项loss
        l2_loss = tf.constant(0.0)

        #Embedding-layer
        with tf.device('/cpu:0'), tf.name_scope('embedding'):
            train_data = Dataset(config.train_data_source)
            self.W, _ = train_data.onehot_dic_build()
            self.x_image = tf.nn.embedding_lookup(self.W, self.input_x)
            #将x转换为符合tensor格式的四维变量
            self.x_flat = tf.expand_dims(self.x_image, -1)
            print('嵌入层构建完成!')

        #conv-pool(6层)
        for i, c in enumerate(conv_layers):
            with tf.name_scope('conv_layer-%s' % (i + 1)):
                print('开始第 %d 卷积层的处理' % (i + 1))
                filter_width = self.x_flat.get_shape()[2].value  #卷积滤波器的宽度
                filter_shape = [c[1], filter_width, 1, c[0]]  #卷积滤波器的维度

                stdv = 1 / sqrt(c[0] * c[1])
                #均匀分布
                # w_conv = tf.Variable(tf.random_uniform(shape=filter_shape, minval=-stdv, maxval=stdv),
                #                      dtype='float32', name='W')
                # b_conv = tf.Variable(tf.random_uniform(shape=[c[0]], minval=-stdv, maxval=stdv),
                #                      dtype='float32', name='b')
                #高斯分布,即正态分布
                w_conv = tf.Variable(tf.random_normal(shape=filter_shape,
                                                      mean=0.0,
                                                      stddev=0.05),
                                     dtype='float32',
                                     name='W')
                b_conv = tf.Variable(tf.constant(0.1, shape=[c[0]]), name='b')

                conv = tf.nn.conv2d(self.x_flat,
                                    w_conv,
                                    strides=[1, 1, 1, 1],
                                    padding='VALID',
                                    name='conv')
                h_conv = tf.nn.bias_add(conv, b_conv)
                """忽然发现这里并没有用到激活函数,直接连接到最大池化"""

                #判断池化参数是否为空
                if not c[-1] is None:
                    ksize_shape = [1, c[2], 1, 1]
                    h_pool = tf.nn.max_pool(h_conv,
                                            ksize=ksize_shape,
                                            strides=ksize_shape,
                                            padding='VALID',
                                            name='pool')
                else:
                    h_pool = h_conv
                print('池化后的维度:', h_pool.get_shape())
                """此时输出的结果.shape=[batch, ]"""

                #将输出的结果转置为[batch, sentence_length, embedding_size, channels]
                self.x_flat = tf.transpose(h_pool, [0, 1, 3, 2],
                                           name='transpose')

                print(self.x_flat.get_shape())

        #将输出的维度转换为全连接层的二维输入
        with tf.name_scope('reshape'):
            fc_dim = self.x_flat.get_shape()[1].value * self.x_flat.get_shape(
            )[2].value  #全连接层维度:[batch, fc_dim]
            self.x_flat = tf.reshape(self.x_flat, shape=[-1, fc_dim])

        weights = [fc_dim] + fc_layers
        for i, fl in enumerate(fc_layers):
            with tf.name_scope('fc_layer-%s' % (i + 1)):
                print('开始第 %d 个全连接层的处理' % (i + 1))
                #均匀分布
                # stdv = 1 / sqrt(weights[i])
                # w_fc = tf.Variable(tf.random_uniform(shape=[weights[i], fl], minval=-stdv, maxval=stdv),
                #                    dtype='float32', name='W')
                # b_fc = tf.Variable(tf.random_uniform(shape=[fl], minval=-stdv, maxval=stdv),
                #                    dtype='float32', name='b')

                #高斯分布,即正态分布
                w_fc = tf.Variable(tf.random_normal(shape=[weights[i], fl],
                                                    mean=0.0,
                                                    stddev=0.05),
                                   dtype='float32',
                                   name='W')
                b_fc = tf.Variable(tf.constant(0.1, shape=[fl]),
                                   dtype='float32',
                                   name='b')
                """这个全连接层中间加了激活函数"""
                self.x_flat = tf.nn.relu(tf.matmul(self.x_flat, w_fc) + b_fc)

                with tf.name_scope('drop_out'):
                    self.x_flat = tf.nn.dropout(self.x_flat,
                                                self.dropout_keep_prob)

        with tf.name_scope('output_layer'):
            print('开始输出层的处理')
            #高斯分布,即正态分布
            w_out = tf.Variable(tf.random_normal(
                shape=[fc_layers[-1], num_classes], mean=0.0, stddev=0.05),
                                dtype='float32',
                                name='W')
            b_out = tf.Variable(tf.constant(0.1, shape=[num_classes]),
                                name='b')

            #均匀分布
            # stdv = 1 / sqrt(weights[-1])
            # w_out = tf.Variable(tf.random_uniform(shape=[fc_layers[-1], num_classes], minval=-stdv, maxval=stdv),
            #                     dtype='float32', name='W')
            # b_out = tf.Variable(tf.random_uniform(shape=[num_classes], minval=-stdv, maxval=stdv),
            #                     dtype='float32', name='b')

            self.y_pred = tf.nn.xw_plus_b(self.x_flat,
                                          w_out,
                                          b_out,
                                          name='y_pred')
            self.predictions = tf.argmax(self.y_pred, 1, name='predictions')

        #损失计算
        with tf.name_scope('loss'):
            losses = tf.nn.softmax_cross_entropy_with_logits(
                logits=self.y_pred, labels=self.input_y)
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss

        #精度计算
        with tf.name_scope('accuracy'):
            correct_predictions = tf.equal(self.predictions,
                                           tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions,
                                                   dtype='float'),
                                           name='accuracy')
Ejemplo n.º 4
0
    def __init__(self,
                 l0,
                 num_classes,
                 conv_layers,
                 fc_layers,
                 l2_reg_lambda=0.0):

        # Placeholders for input, output and dropout
        self.input_x = tf.placeholder(tf.int32, [None, l0], name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, num_classes],
                                      name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0)

        # Embedding layer
        with tf.device('/cpu:0'), tf.name_scope("embedding"):
            train_data = Dataset(config.data_source, config.label_source)
            self.W, _ = train_data.onehot_dic_build()
            self.x_image = tf.nn.embedding_lookup(self.W, self.input_x)
            self.x_flat = tf.expand_dims(self.x_image, -1)

        for i, cl in enumerate(conv_layers):
            with tf.name_scope("conv_layer-%s" % (i + 1)):
                print("开始第" + str(i + 1) + "卷积层的处理")
                filter_width = self.x_flat.get_shape()[2].value
                filter_shape = [cl[1], filter_width, 1, cl[0]]

                stdv = 1 / sqrt(cl[0] * cl[1])
                w_conv = tf.Variable(tf.random_uniform(filter_shape,
                                                       minval=-stdv,
                                                       maxval=stdv),
                                     dtype='float32',
                                     name='w')
                # w_conv = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.05), name="W")
                b_conv = tf.Variable(tf.random_uniform(shape=[cl[0]],
                                                       minval=-stdv,
                                                       maxval=stdv),
                                     name='b')
                # b_conv = tf.Variable(tf.constant(0.1, shape=[cl[0]]), name="b")
                conv = tf.nn.conv2d(self.x_flat,
                                    w_conv,
                                    strides=[1, 1, 1, 1],
                                    padding="VALID",
                                    name="conv")
                h_conv = tf.nn.bias_add(conv, b_conv)

                if not cl[-1] is None:
                    ksize_shape = [1, cl[2], 1, 1]
                    h_pool = tf.nn.max_pool(h_conv,
                                            ksize=ksize_shape,
                                            strides=ksize_shape,
                                            padding='VALID',
                                            name='pool')
                else:
                    h_pool = h_conv

                self.x_flat = tf.transpose(h_pool, [0, 1, 3, 2],
                                           name='transpose')

        with tf.name_scope('reshape'):
            fc_dim = self.x_flat.get_shape()[1].value * self.x_flat.get_shape(
            )[2].value
            self.x_flat = tf.reshape(self.x_flat, [-1, fc_dim])

        weights = [fc_dim] + fc_layers
        for i, fl in enumerate(fc_layers):
            with tf.name_scope('fc_layer-%s' % (i + 1)):
                print("开始第" + str(i + 1) + "全连接层的处理")
                stdv = 1 / sqrt(weights[i])
                w_fc = tf.Variable(tf.random_uniform([weights[i], fl],
                                                     minval=-stdv,
                                                     maxval=stdv),
                                   dtype='float32',
                                   name='w')
                b_fc = tf.Variable(tf.random_uniform(shape=[fl],
                                                     minval=-stdv,
                                                     maxval=stdv),
                                   dtype='float32',
                                   name='b')
                # 不同的初始化方式
                # w_fc = tf.Variable(tf.truncated_normal([weights[i], fl], stddev=0.05), name="W")
                # b_fc = tf.Variable(tf.constant(0.1, shape=[fl]), name="b")
                self.x_flat = tf.nn.relu(tf.matmul(self.x_flat, w_fc) + b_fc)

                with tf.name_scope('drop_out'):
                    self.x_flat = tf.nn.dropout(self.x_flat,
                                                self.dropout_keep_prob)

        with tf.name_scope('output_layer'):
            print("开始输出层的处理")
            # w_out = tf.Variable(tf.truncated_normal([fc_layers[-1], num_classes], stddev=0.1), name="W")
            # b_out = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
            stdv = 1 / sqrt(weights[-1])
            w_out = tf.Variable(tf.random_uniform([fc_layers[-1], num_classes],
                                                  minval=-stdv,
                                                  maxval=stdv),
                                dtype='float32',
                                name='W')
            b_out = tf.Variable(tf.random_uniform(shape=[num_classes],
                                                  minval=-stdv,
                                                  maxval=stdv),
                                name='b')
            self.y_pred = tf.nn.xw_plus_b(self.x_flat,
                                          w_out,
                                          b_out,
                                          name="y_pred")
            self.predictions = tf.argmax(self.y_pred, 1, name="predictions")

        # CalculateMean cross-entropy loss
        with tf.name_scope("loss"):
            losses = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.y_pred, labels=self.input_y)
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# coding=utf-8
import tensorflow as tf
from data_helper import Dataset
import time
import os
from tensorflow.python import debug as tf_debug
from charCNN import CharCNN
import datetime
from config import config

# Load data
print("正在载入数据...")
# 函数dataset_read:输入文件名,返回训练集,测试集标签
# 注:embedding_w大小为vocabulary_size × embedding_size
train_data = Dataset(config.train_data_source)
dev_data = Dataset(config.dev_data_source)
train_data.dataset_read()
dev_data.dataset_read()

print "得到120000维的doc_train,label_train"
print "得到9600维的doc_dev, label_train"

with tf.Graph().as_default():
    session_conf = tf.ConfigProto(
      allow_soft_placement=True,
      log_device_placement=False)
    sess = tf.Session(config=session_conf)
    # sess = tf_debug.LocalCLIDebugWrapperSession(sess)
    with sess.as_default():
        cnn = CharCNN(
            l0=config.l0,
Ejemplo n.º 6
0
import numpy as np
import os
import datetime
from data_helper import Dataset
from CharCNN_model import CharCNN
from config import config

#GPU设置
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = "0"

#读取数据
print('正在载入数据......')

print('==========训练集================')
train_data = Dataset(config.train_data_source)
train_data.dataset_read()
"""
train_data:
样本维度: (120000, 1014)
标签维度: (120000, 4)
"""

print('==========测试集================')
dev_data = Dataset(config.dev_data_source)
dev_data.dataset_read()
"""
dev_data:
样本维度: (7600, 1014)
标签维度: (7600, 4)
"""
#print(vectorizer.vocabulary_)
print(count.toarray())
print(count.toarray().shape)

transformer = TfidfTransformer()
tfidf_matrix = transformer.fit_transform(count)
print(tfidf_matrix.toarray())
print(tfidf_matrix.toarray().shape)

x = tfidf_matrix.toarray()

# Load data
print("正在载入数据...")
# 函数dataset_read:输入文件名,返回训练集,测试集标签
# 注:embedding_w大小为vocabulary_size × embedding_size
train_data = Dataset(config.data_source, config.label_source)
train_data.dataset_read()
batch_train = train_data.next_batch()

print(x.shape)
print(batch_train[1].shape)

model1 = SelectKBest(chi2, k=100)  #选择k个最佳特征
model1.fit_transform(
    x, batch_train[1])  #iris.data是特征数据,iris.target是标签数据,该函数可以选择出k个特征

# Randomly shuffle data
np.random.seed(10)  # 使得随机数列可预测
# 产生一个array:起始点0,结束点len(y),步长1。然后将其打乱。
shuffle_indices = np.random.permutation(np.arange(len(batch_train[1])))
x_shuffled = x[shuffle_indices]  # 将文件句子和标签以同样的方式打乱
Ejemplo n.º 8
0
#coding=utf-8
#author@zhangdong

import tensorflow as tf
from data_helper import Dataset
import numpy as np
import os, time
from para_config import config
import datetime
from char_CNN_model import charCNN

#获取训练数据
train_data = Dataset(config.train_data_source)
train_data.load_dataset()
train_x, train_y = train_data.data_x, train_data.data_y

#获取测试数据
dev_data = Dataset(config.dev_data_source)
dev_data.load_dataset()
dev_x, dev_y = dev_data.data_x, dev_data.data_y
print('done')

with tf.Graph().as_default():
    session_config = tf.ConfigProto(allow_soft_placement=True,
                                    log_device_placement=False)
    sess = tf.Session(config=session_config)
    with sess.as_default():
        charcnn = charCNN(config.l0,
                          config.num_classes,
                          config.model.conv_layers,
                          config.model.fc_layers,
    def __init__(self,l0,num_classes,conv_layers,fc_layers,l2_reg_lambda=0.0001):

        #placeholders for input_x,input_y and droupout
        self.input_x=tf.placeholder(dtype=tf.int64,shape=[None,l0],name='input_x')
        self.input_y=tf.placeholder(dtype=tf.float32,shape=[None,num_classes],name='input_y')
        self.dropout_keep_prob=tf.placeholder(dtype=tf.float32,name='dropout_keep_prob')

        #keeping track of l2 regularization loss (optional)
        l2_losses=tf.constant(0.0)

        #Embedding layer
        with tf.device('/cpu:0'), tf.name_scope('embedding'):
            train_data=Dataset(config.train_data_source)
            self.char_embedding_mat,self.char_embedding_dict=train_data.onehot_dict_build()
            self.input_x_vec=tf.nn.embedding_lookup(self.char_embedding_mat,self.input_x)           #获取当前batch的input_x的索引向量     [128,1014,69]
            self.input_x_expanded=tf.expand_dims(self.input_x_vec,-1)               #多加一个维度,变成4维的           [128,1014,69,1]
            self.x_flat=self.input_x_expanded



        #convolutional layers
        #卷积层的输入是self.input_x_expanded   [128,1014,69,1]
        for i , conv in enumerate(conv_layers):
            with tf.name_scope('no.%d_conv_layer'%(i+1)):
                print ('start to process conv layer-%s'%(str(i+1)))
                filter_width=self.x_flat.get_shape()[2].value
                #由于每次卷积的结果都在变化,所以每次卷积处理的维度或者区域是不同的,也就是filter的width不一样,但是width都是get_shape()[2].value

                # conv_layers = [[256, 7, 3],
                #                [256, 7, 3],
                #                [256, 3, None],
                #                [256, 3, None],
                #                [256, 3, None],
                #                [256, 3, 3]]
                filter_shape = [conv[1],filter_width,1,conv[0]]
                #filter_weights=tf.Variable(tf.truncated_normal(shape=filter_shape,stddev=0.05),name='filter_weights')
                stdv = 1 / sqrt(conv[0] * conv[1])
                filter_weights =tf.Variable(tf.random_uniform(filter_shape, minval=-stdv, maxval=stdv),dtype='float32', name='w')
                #下面是另一种权重初始化方式

                #b=tf.Variable(tf.constant(0.1,dtype=tf.float32,shape=[conv[0]]),name='b')
                b = tf.Variable(tf.random_uniform(shape=[conv[0]], minval=-stdv, maxval=stdv), name='b')


                #下面是卷积操作
                conv_results=tf.nn.conv2d(self.x_flat,filter=filter_weights,strides=[1,1,1,1],padding="VALID",name='conv')

                h_conv=tf.nn.bias_add(conv_results,bias=b)

                if  conv[-1] is not None:    #如果需要迟化操作
                    ksize=[1,conv[-1],1,1]
                    pool_conv=tf.nn.max_pool(h_conv,ksize=ksize,strides=ksize,padding='VALID',name='max-pooling')
                else:
                    pool_conv=h_conv        #不进行池化操作


                self.x_flat=tf.transpose(pool_conv,perm=[0,1,3,2])

        #当前循环,6个卷积层结束,最后获得[128,34,256,1]的矩阵,在通过全连接层时需要转换矩阵的维度,转换成128个34*256的特征向量


        with tf.name_scope('reshape'):
            first_fc_input_dim=self.x_flat.get_shape()[1].value*self.x_flat.get_shape()[2].value     #第一层全连接层的输入维度
            self.x_flat=tf.reshape(self.x_flat,[-1,first_fc_input_dim])

        #full_connected layer
        weights_dims=[first_fc_input_dim]+fc_layers
        for j , fc in enumerate(fc_layers):
            with tf.name_scope('no.%s_fc_layer'%(str(j+1))):
                print('start to process the no.%d fc layer'%(j+1))
                stdv = 1 / sqrt(weights_dims[j])
                fc_weights = tf.Variable(tf.random_uniform(shape=[weights_dims[j],fc], minval=-stdv, maxval=stdv), dtype='float32', name='w')
                # fc_weights=tf.Variable(tf.truncated_normal(shape=[weights_dims[j],fc],stddev=0.05),name='fc_weights')
                # fc_b=tf.Variable(tf.constant(0.1,shape=[fc]),name='fc_b')

                fc_b = tf.Variable(tf.random_uniform(shape=[fc], minval=-stdv, maxval=stdv), dtype='float32', name='b')

                self.x_flat=tf.nn.relu(tf.matmul(self.x_flat,fc_weights)+fc_b)

                with tf.name_scope('dropout'):
                    tf.nn.dropout(self.x_flat,keep_prob=self.dropout_keep_prob)

        #此时当前循环下的前两个全连接层结束,后面是输出

        with tf.name_scope('output'):
            # outlayer_weights=tf.Variable(tf.truncated_normal(shape=[fc_layers[-1],num_classes],stddev=0.05),name='outlayer_weights')
            # outlayer_b=tf.Variable(tf.constant(0.1,shape=[num_classes]),name='outlayer_b')

            stdv = 1 / sqrt(weights_dims[-1])
            outlayer_weights = tf.Variable(tf.random_uniform([fc_layers[-1], num_classes], minval=-stdv, maxval=stdv),dtype='float32', name='outlayer_weights')
            outlayer_b = tf.Variable(tf.random_uniform(shape=[num_classes], minval=-stdv, maxval=stdv), name='outlayer_b')

            l2_losses += tf.nn.l2_loss(outlayer_weights)
            l2_losses+=tf.nn.l2_loss(outlayer_b)
            self.y_pred=tf.nn.xw_plus_b(self.x_flat,outlayer_weights,outlayer_b,name='y_pred')
            self.predictions=tf.argmax(self.y_pred,1,name='predictions')


        #计算平均交叉熵
        with tf.name_scope('loss'):
            losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y,logits=self.y_pred)
            self.loss=tf.reduce_mean(losses)+l2_reg_lambda*l2_losses         #平均交叉熵

        with tf.name_scope('accuracy'):
            self.correct_predictions=tf.equal(self.predictions,tf.argmax(self.input_y,1))
            self.accuracy=tf.reduce_mean(tf.cast(self.correct_predictions,'float'),name='accuracy')
Ejemplo n.º 10
0
import numpy as np
import os
import datetime
from data_helper import Dataset
from CharCNN_model import CharCNN
from config import config

#GPU设置
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ['CUDA_VISIBLE_DEVICES'] = "0"

#读取数据
print('正在载入数据......')

print('==========训练集================')
train_data = Dataset(config.train_data_source)
train_data.dataset_read()
"""
train_data:
样本维度: (120000, 1014)
标签维度: (120000, 4)
"""

print('==========测试集================')
dev_data = Dataset(config.dev_data_source)
dev_data.dataset_read()
"""
dev_data:
样本维度: (7600, 1014)
标签维度: (7600, 4)
"""