Пример #1
0
        full_connected_layer_units=[[hidden1,0.5],[hidden2,0.5]],
        embedding_dropout_rate=0.,
        nb_epoch=30,
        earlyStoping_patience=config['earlyStoping_patience'],
        lr = config['lr'],
        batch_size = batch_size,
        embedding_weight_trainable = True,
        embedding_init_weight=init_weight,
    )
    print (w2v_embedding_cnn.embedding_layer_output.get_weights()[0][1])

    w2v_embedding_cnn.print_model_descibe()

    print('+'*80)
    # 训练模型
    train_loss, train_accuracy, val_loss, val_accuracy = w2v_embedding_cnn.fit((train_X_feature, train_y),
                          (test_X_feature, test_y))
    print (w2v_embedding_cnn.embedding_layer_output.get_weights()[0][1])

    print('dev:%f,%f' % (train_loss, train_accuracy))
    print('val:%f,%f' % (val_loss, val_accuracy))
    quit()
    # train
    # w2v_embedding_cnn.accuracy((train_X_feature, train_y))


end_time = timeit.default_timer()
print('end! Running time:%ds!' % (end_time - start_time))
logging.debug('=' * 20)
logging.debug('end! Running time:%ds!' % (end_time - start_time))

logging.debug('=' * 20)
        input_dim=feature_encoder.vocabulary_size + 1,
        word_embedding_dim=config['word_embedding_dim'],
        input_length=config['sentence_padding_length'],
        num_labels=len(label_to_index),
        conv_filter_type=config['conv_filter_type'],
        k=config['kmax_k'],
        embedding_dropout_rate=config['embedding_dropout_rate'],
        output_dropout_rate=config['output_dropout_rate'],
        nb_epoch=int(config['cnn_nb_epoch']),
        earlyStoping_patience=config['earlyStoping_patience'],
    )
    rand_embedding_cnn.print_model_descibe()

    if config['refresh_all_model'] or not os.path.exists(model_file_path):
        # 训练模型
        rand_embedding_cnn.fit((train_X_feature, train_y),
                               (test_X_feature, test_y))
        # 保存模型
        rand_embedding_cnn.save_model(model_file_path)
    else:
        # 从保存的pickle中加载模型
        rand_embedding_cnn.model_from_pickle(model_file_path)

    # -------------- code start : 结束 -------------
    if verbose > 2:
        logging.debug('-' * 20)
        print '-' * 20
    # -------------- region end : 3. 初始化CNN模型并训练 ---------------

    # -------------- region start : 4. 预测 -------------
    if verbose > 1:
        logging.debug('-' * 20)
class RFAndWordEmbeddingCnnMerge(object):
    # 如果使用全体数据作为字典,则使用这个变量来存放权重,避免重复加载权重,因为每次加载的权重都是一样的。
    train_data_weight = None
    # 验证数据是一份权重,不包含测试集了
    val_data_weight = None

    def __init__(self, feature_encoder, num_filter, num_labels, n_estimators,
                 word2vec_model_file_path, **kwargs):

        if kwargs.get('rand_weight', False):
            # CNN(rand)模式
            weight = None
        elif kwargs['dataset_flag'] == 0:
            if RFAndWordEmbeddingCnnMerge.train_data_weight is None:
                # 训练集
                RFAndWordEmbeddingCnnMerge.train_data_weight = feature_encoder.to_embedding_weight(
                    word2vec_model_file_path)
            weight = RFAndWordEmbeddingCnnMerge.train_data_weight
        else:
            # kwargs['dataset_flag']>0
            if RFAndWordEmbeddingCnnMerge.val_data_weight is None:
                RFAndWordEmbeddingCnnMerge.val_data_weight = feature_encoder.to_embedding_weight(
                    word2vec_model_file_path)
            weight = RFAndWordEmbeddingCnnMerge.val_data_weight
        # print(weight)
        self.static_w2v_cnn = WordEmbeddingCNN(
            rand_seed=1377,
            verbose=kwargs.get('verbose', 0),
            feature_encoder=feature_encoder,
            # optimizers='adadelta',
            optimizers='sgd',
            # 当使用CNN (rand) 模式的时候使用到了
            word_embedding_dim=50,
            # 设置embedding使用训练好的w2v模型初始化
            embedding_init_weight=weight,
            # 默认设置为训练时embedding层权重不变
            embedding_weight_trainable=kwargs.get('embedding_weight_trainable',
                                                  False),
            num_labels=num_labels,
            l1_conv_filter_type=[
                [num_filter, 3, -1, 'valid', (-1, 1), 0.5, 'relu', 'none'],
                [num_filter, 4, -1, 'valid', (-1, 1), 0., 'relu', 'none'],
                [num_filter, 5, -1, 'valid', (-1, 1), 0., 'relu', 'none'],
            ],
            l2_conv_filter_type=[],
            full_connected_layer_units=[],
            embedding_dropout_rate=0.,
            nb_epoch=kwargs.get('nb_epoch', 25),
            batch_size=kwargs.get('batch_size', 32),
            earlyStoping_patience=30,
            lr=kwargs.get('lr', 1e-2),
            show_validate_accuracy=True
            if kwargs.get('verbose', 0) > 0 else False,
            # output_regularizer=('l2', 0.5),
            output_constraints=('maxnorm', 3),
            # 必须设为True,才能取中间结果做特征
            save_middle_output=True,
        )

        self.bow_randomforest = BowRandomForest(
            rand_seed=1377,
            verbose=kwargs.get('verbose', 0),
            feature_encoder=feature_encoder,
            # optimizers='adadelta',
            n_estimators=n_estimators,
            min_samples_leaf=1,
        )

    def fit(self, train_data=None, validation_data=None):
        train_X, train_y = train_data
        validation_X, validation_y = validation_data

        self.static_w2v_cnn.fit(train_data, validation_data)

        train_x_features = self.static_w2v_cnn.get_layer_output(train_X)[4]

        validation_x_features = self.static_w2v_cnn.get_layer_output(
            validation_X)[4]

        return self.bow_randomforest.fit((train_x_features, train_y),
                                         (validation_x_features, validation_y))
Пример #4
0
        conv_filter_type=config['conv_filter_type'],
        k=config['kmax_k'],
        embedding_dropout_rate=config['embedding_dropout_rate'],
        output_dropout_rate=config['output_dropout_rate'],
        nb_epoch=int(config['cnn_nb_epoch']),
        earlyStoping_patience=config['earlyStoping_patience'],
        feature_encoder=feature_encoder.vocabulary_size+1,
        optimizers='sgd',
        lr= 1e-1,
        batch_size = 128,
    )
    w2v_embedding_cnn.print_model_descibe()

    if config['refresh_all_model'] or not os.path.exists(model_file_path):
        # 训练模型
        w2v_embedding_cnn.fit((train_w2v_features, train_y),
                               (test_w2v_features, test_y))
        # 保存模型
        w2v_embedding_cnn.save_model(model_file_path)
    else:
        # 从保存的pickle中加载模型
        w2v_embedding_cnn.model_from_pickle(model_file_path)

    # -------------- code start : 结束 -------------
    if verbose > 2:
        logging.debug('-' * 20)
        print '-' * 20
    # -------------- region end : 3. 初始化CNN模型并训练 ---------------



    print index_to_label[w2v_embedding_cnn.predict(feature_encoder.transform_sentence('你好吗'))]
Пример #5
0
        nb_epoch=int(config["cnn_nb_epoch"]),
        earlyStoping_patience=config["earlyStoping_patience"],
        lr=config["lr"],
        batch_size=config["batch_size"],
        embedding_weight_trainable=True,
    )

    rand_embedding_cnn.print_model_descibe()

    if config["refresh_all_model"] or not os.path.exists(model_file_path):

        print ("+" * 80)
        # 训练模型
        print (rand_embedding_cnn.embedding_layer_output.get_weights()[0][1])
        train_loss, train_accuracy, val_loss, val_accuracy = rand_embedding_cnn.fit(
            (train_X_feature, train_y), (test_all_X_feature, test_all_y)
        )
        print (rand_embedding_cnn.embedding_layer_output.get_weights()[0][1])
        # y_pred, is_correct, accu, f1 = rand_embedding_cnn.accuracy((test_all_X_feature, test_all_y))
        #
        # print 'F1(macro)为:%f' % (np.average(f1))
        #
        # # train
        # rand_embedding_cnn.accuracy((train_X_feature, train_y))
        print ("dev:%f,%f" % (train_loss, train_accuracy))
        print ("val:%f,%f" % (val_loss, val_accuracy))
        quit()

        # 五折
        print ("五折")
        counter = 0
Пример #6
0
        word_embedding_dim=config['word_embedding_dim'],
        embedding_init_weight=feature_encoder.to_embedding_weight(word2vec_file_path),
        input_length=config['sentence_padding_length'],
        num_labels=len(label_to_index),
        conv_filter_type=config['conv_filter_type'],
        k=config['kmax_k'],
        embedding_dropout_rate=config['embedding_dropout_rate'],
        output_dropout_rate=config['output_dropout_rate'],
        nb_epoch=int(config['cnn_nb_epoch']),
        earlyStoping_patience=config['earlyStoping_patience'],
    )
    rand_embedding_cnn.print_model_descibe()

    if config['refresh_all_model'] or not os.path.exists(model_file_path):
        # 训练模型
        rand_embedding_cnn.fit((feature_encoder.train_padding_index, train_y),
                               (map(feature_encoder.transform_sentence, test_X), test_y))
        # 保存模型
        rand_embedding_cnn.save_model(model_file_path)
    else:
        # 从保存的pickle中加载模型
        rand_embedding_cnn.model_from_pickle(model_file_path)

    # -------------- code start : 结束 -------------
    if verbose > 2:
        logging.debug('-' * 20)
        print '-' * 20
    # -------------- region end : 3. 初始化CNN模型并训练 ---------------

    # -------------- region start : 4. 预测 -------------
    if verbose > 1:
        logging.debug('-' * 20)
Пример #7
0
        input_dim=feature_encoder.vocabulary_size + 1,
        word_embedding_dim=config['word_embedding_dim'],
        input_length=config['sentence_padding_length'],
        num_labels=len(label_to_index),
        conv_filter_type=config['conv_filter_type'],
        k=config['kmax_k'],
        embedding_dropout_rate=config['embedding_dropout_rate'],
        output_dropout_rate=config['output_dropout_rate'],
        nb_epoch=int(config['cnn_nb_epoch']),
        earlyStoping_patience=config['earlyStoping_patience'],
    )
    rand_embedding_cnn.print_model_descibe()

    if config['refresh_all_model'] or not os.path.exists(model_file_path):
        # 训练模型
        rand_embedding_cnn.fit((train_X_feature, train_y),
                               (test_X_feature, test_y))
        # 保存模型
        rand_embedding_cnn.save_model(model_file_path)
    else:
        # 从保存的pickle中加载模型
        rand_embedding_cnn.model_from_pickle(model_file_path)

    # -------------- code start : 结束 -------------
    if verbose > 2:
        logging.debug('-' * 20)
        print '-' * 20
    # -------------- region end : 3. 初始化CNN模型并训练 ---------------

    # -------------- region start : 4. 预测 -------------
    if verbose > 1:
        logging.debug('-' * 20)
Пример #8
0
class RFAndWordEmbeddingCnnMerge(CnnBaseClass):
    __version__ = '1.4'
    # 如果使用全体数据作为字典,则使用这个变量来存放权重,避免重复加载权重,因为每次加载的权重都是一样的。
    train_data_weight = None
    # 验证数据是一份权重,不包含测试集了
    val_data_weight = None

    def __init__(self, feature_encoder, num_filter, num_labels, n_estimators,
                 word2vec_model_file_path, **kwargs):
        self.static_w2v_cnn = None
        self.bow_randomforest = None
        self.feature_encoder = feature_encoder

        if not kwargs.get('init_model', True):
            # 不初始化模型,一般在恢复模型时候用
            return

        if kwargs.get('rand_weight', False):
            # CNN(rand)模式
            weight = None
        elif kwargs['dataset_flag'] == 0:
            # 训练集
            if RFAndWordEmbeddingCnnMerge.train_data_weight is None:
                # 训练集
                RFAndWordEmbeddingCnnMerge.train_data_weight = feature_encoder.to_embedding_weight(
                    word2vec_model_file_path)
            weight = RFAndWordEmbeddingCnnMerge.train_data_weight
        else:
            # kwargs['dataset_flag']>0
            if RFAndWordEmbeddingCnnMerge.val_data_weight is None:
                RFAndWordEmbeddingCnnMerge.val_data_weight = feature_encoder.to_embedding_weight(
                    word2vec_model_file_path)
            weight = RFAndWordEmbeddingCnnMerge.val_data_weight
        # print(weight)
        self.static_w2v_cnn = WordEmbeddingCNN(
            rand_seed=1377,
            verbose=kwargs.get('verbose', 0),
            feature_encoder=feature_encoder,
            # optimizers='adadelta',
            optimizers='sgd',
            # 当使用CNN (rand) 模式的时候使用到了
            word_embedding_dim=50,
            # 设置embedding使用训练好的w2v模型初始化
            embedding_init_weight=weight,
            # 默认设置为训练时embedding层权重不变
            embedding_weight_trainable=kwargs.get('embedding_weight_trainable',
                                                  False),
            num_labels=num_labels,
            l1_conv_filter_type=[
                [num_filter, 3, -1, 'valid', (-1, 1), 0.5, 'relu', 'none'],
                [num_filter, 4, -1, 'valid', (-1, 1), 0., 'relu', 'none'],
                [num_filter, 5, -1, 'valid', (-1, 1), 0., 'relu', 'none'],
            ],
            l2_conv_filter_type=[],
            full_connected_layer_units=[],
            embedding_dropout_rate=0.,
            nb_epoch=kwargs.get('nb_epoch', 25),
            batch_size=kwargs.get('batch_size', 32),
            earlyStoping_patience=30,
            lr=kwargs.get('lr', 1e-2),
            show_validate_accuracy=True
            if kwargs.get('verbose', 0) > 0 else False,
            # output_regularizer=('l2', 0.5),
            output_constraints=('maxnorm', 3),
            # 必须设为True,才能取中间结果做特征
            save_middle_output=True,
        )

        self.bow_randomforest = BowRandomForest(
            rand_seed=1377,
            verbose=kwargs.get('verbose', 0),
            feature_encoder=feature_encoder,
            # optimizers='adadelta',
            n_estimators=n_estimators,
            min_samples_leaf=1,
        )

    def fit(self, train_data=None, validation_data=None):
        train_X, train_y = train_data
        validation_X, validation_y = validation_data

        self.static_w2v_cnn.fit(train_data, validation_data)

        train_x_features = self.static_w2v_cnn.get_layer_output(train_X)[4]

        validation_x_features = self.static_w2v_cnn.get_layer_output(
            validation_X)[4]

        return self.bow_randomforest.fit((train_x_features, train_y),
                                         (validation_x_features, validation_y))

    def save_model(self, path):
        """
            保存模型,保存成pickle形式
        :param path: 模型保存的路径
        :type path: 模型保存的路径
        :return:
        """

        model_file = open(path, 'wb')
        pickle.dump(self.feature_encoder, model_file)
        pickle.dump(self.static_w2v_cnn, model_file)
        pickle.dump(self.bow_randomforest, model_file)

    def model_from_pickle(self, path):
        '''
            从模型文件中直接加载模型
        :param path:
        :return: RandEmbeddingCNN object
        '''

        model_file = file(path, 'rb')
        self.feature_encoder = pickle.load(model_file)
        self.static_w2v_cnn = pickle.load(model_file)
        self.bow_randomforest = pickle.load(model_file)

    @staticmethod
    def get_feature_encoder(**kwargs):
        """
            获取该分类器的特征编码器

        :param kwargs:  可设置参数 [ input_length(*), full_mode(#,False), feature_type(#,word),verbose(#,0)],加*表示必须提供,加#表示可选,不写则默认。
        :return:
        """

        assert kwargs.has_key('input_length'), '请提供 input_length 的属性值'

        from data_processing_util.feature_encoder.onehot_feature_encoder import FeatureEncoder
        feature_encoder = FeatureEncoder(
            need_segmented=kwargs.get('need_segmented', True),
            sentence_padding_length=kwargs['input_length'],
            verbose=kwargs.get('verbose', 0),
            full_mode=kwargs.get('full_mode', False),
            remove_stopword=True,
            replace_number=True,
            lowercase=True,
            zhs2zht=True,
            remove_url=True,
            padding_mode='center',
            add_unkown_word=True,
            feature_type=kwargs.get('feature_type', 'word'),
            vocabulary_including_test_set=kwargs.get(
                'vocabulary_including_test_set', True),
            update_dictionary=kwargs.get('update_dictionary', True))

        return feature_encoder

    def batch_predict_bestn(self, sentences, transform_input=False, bestn=1):
        """
                    批量预测句子的类别,对输入的句子进行预测

                :param sentences: 测试句子,
                :type sentences: array-like
                :param transform_input: 是否转换句子,如果为True,输入原始字符串句子即可,内部已实现转换成字典索引的形式。
                :type transform_input: bool
                :param bestn: 预测,并取出bestn个结果。
                :type bestn: int
                :return: y_pred_result, y_pred_score
                """
        if transform_input:
            sentences = self.static_w2v_cnn.transform(sentences)
        # sentences = np.asarray(sentences)
        # assert len(sentences.shape) == 2, 'shape必须是2维的!'

        train_x_features = self.static_w2v_cnn.get_layer_output(sentences)[4]
        # print(train_x_features)
        # print(train_x_features.shape)

        return self.bow_randomforest.batch_predict_bestn(train_x_features,
                                                         transform_input=False,
                                                         bestn=bestn)
class RFAndWordEmbeddingCnnMerge(CnnBaseClass):
    __version__ = '1.4'
    # 如果使用全体数据作为字典,则使用这个变量来存放权重,避免重复加载权重,因为每次加载的权重都是一样的。
    train_data_weight = None
    # 验证数据是一份权重,不包含测试集了
    val_data_weight = None

    def __init__(self,
                 feature_encoder,
                 num_filter,
                 num_labels,
                 n_estimators,
                 word2vec_model_file_path,
                 **kwargs
                 ):
        self.static_w2v_cnn = None
        self.bow_randomforest = None
        self.feature_encoder = feature_encoder

        if not kwargs.get('init_model', True):
            # 不初始化模型,一般在恢复模型时候用
            return

        if kwargs.get('rand_weight', False):
            # CNN(rand)模式
            weight = None
        elif kwargs['dataset_flag'] == 0:
            # 训练集
            if RFAndWordEmbeddingCnnMerge.train_data_weight is None:
                # 训练集
                RFAndWordEmbeddingCnnMerge.train_data_weight = feature_encoder.to_embedding_weight(
                    word2vec_model_file_path)
            weight = RFAndWordEmbeddingCnnMerge.train_data_weight
        else:
            # kwargs['dataset_flag']>0
            if RFAndWordEmbeddingCnnMerge.val_data_weight is None:
                RFAndWordEmbeddingCnnMerge.val_data_weight = feature_encoder.to_embedding_weight(
                    word2vec_model_file_path)
            weight = RFAndWordEmbeddingCnnMerge.val_data_weight
        # print(weight)
        self.static_w2v_cnn = WordEmbeddingCNN(
            rand_seed=1377,
            verbose=kwargs.get('verbose', 0),
            feature_encoder=feature_encoder,
            # optimizers='adadelta',
            optimizers='sgd',
            # 当使用CNN (rand) 模式的时候使用到了
            word_embedding_dim=50,
            # 设置embedding使用训练好的w2v模型初始化
            embedding_init_weight=weight,
            # 默认设置为训练时embedding层权重不变
            embedding_weight_trainable=kwargs.get('embedding_weight_trainable', False),
            num_labels=num_labels,
            l1_conv_filter_type=[
                [num_filter, 3, -1, 'valid', (-1, 1), 0.5, 'relu', 'none'],
                [num_filter, 4, -1, 'valid', (-1, 1), 0., 'relu', 'none'],
                [num_filter, 5, -1, 'valid', (-1, 1), 0., 'relu', 'none'],
            ],
            l2_conv_filter_type=[],
            full_connected_layer_units=[],
            embedding_dropout_rate=0.,
            nb_epoch=kwargs.get('nb_epoch', 25),
            batch_size=kwargs.get('batch_size', 32),
            earlyStoping_patience=30,
            lr=kwargs.get('lr', 1e-2),
            show_validate_accuracy=True if kwargs.get('verbose', 0) > 0 else False,
            # output_regularizer=('l2', 0.5),
            output_constraints=('maxnorm', 3),
            # 必须设为True,才能取中间结果做特征
            save_middle_output=True,

        )

        self.bow_randomforest = BowRandomForest(
            rand_seed=1377,
            verbose=kwargs.get('verbose', 0),
            feature_encoder=feature_encoder,
            # optimizers='adadelta',
            n_estimators=n_estimators,
            min_samples_leaf=1,
        )

    def fit(self, train_data=None, validation_data=None):
        train_X, train_y = train_data
        validation_X, validation_y = validation_data

        self.static_w2v_cnn.fit(train_data, validation_data)

        train_x_features = self.static_w2v_cnn.get_layer_output(train_X)[4]

        validation_x_features = self.static_w2v_cnn.get_layer_output(validation_X)[4]

        return self.bow_randomforest.fit((train_x_features, train_y), (validation_x_features, validation_y))

    def save_model(self, path):
        """
            保存模型,保存成pickle形式
        :param path: 模型保存的路径
        :type path: 模型保存的路径
        :return:
        """

        model_file = open(path, 'wb')
        pickle.dump(self.feature_encoder, model_file)
        pickle.dump(self.static_w2v_cnn, model_file)
        pickle.dump(self.bow_randomforest, model_file)

    def model_from_pickle(self, path):
        '''
            从模型文件中直接加载模型
        :param path:
        :return: RandEmbeddingCNN object
        '''

        model_file = file(path, 'rb')
        self.feature_encoder = pickle.load(model_file)
        self.static_w2v_cnn = pickle.load(model_file)
        self.bow_randomforest = pickle.load(model_file)

    @staticmethod
    def get_feature_encoder(**kwargs):
        """
            获取该分类器的特征编码器

        :param kwargs:  可设置参数 [ input_length(*), full_mode(#,False), feature_type(#,word),verbose(#,0)],加*表示必须提供,加#表示可选,不写则默认。
        :return:
        """

        assert kwargs.has_key('input_length'), '请提供 input_length 的属性值'

        from data_processing_util.feature_encoder.onehot_feature_encoder import FeatureEncoder
        feature_encoder = FeatureEncoder(
            need_segmented=kwargs.get('need_segmented', True),
            sentence_padding_length=kwargs['input_length'],
            verbose=kwargs.get('verbose', 0),
            full_mode=kwargs.get('full_mode', False),
            remove_stopword=True,
            replace_number=True,
            lowercase=True,
            zhs2zht=True,
            remove_url=True,
            padding_mode='center',
            add_unkown_word=True,
            feature_type=kwargs.get('feature_type', 'word'),
            vocabulary_including_test_set=kwargs.get('vocabulary_including_test_set', True),
            update_dictionary=kwargs.get('update_dictionary', True)
        )

        return feature_encoder

    def batch_predict_bestn(self, sentences, transform_input=False, bestn=1):
        """
                    批量预测句子的类别,对输入的句子进行预测

                :param sentences: 测试句子,
                :type sentences: array-like
                :param transform_input: 是否转换句子,如果为True,输入原始字符串句子即可,内部已实现转换成字典索引的形式。
                :type transform_input: bool
                :param bestn: 预测,并取出bestn个结果。
                :type bestn: int
                :return: y_pred_result, y_pred_score
                """
        if transform_input:
            sentences = self.static_w2v_cnn.transform(sentences)
        # sentences = np.asarray(sentences)
        # assert len(sentences.shape) == 2, 'shape必须是2维的!'

        train_x_features = self.static_w2v_cnn.get_layer_output(sentences)[4]
        # print(train_x_features)
        # print(train_x_features.shape)

        return self.bow_randomforest.batch_predict_bestn(train_x_features, transform_input=False, bestn=bestn)
Пример #10
0
        full_connected_layer_units=config['full_connected_layer_units'],
        embedding_dropout_rate=config['embedding_dropout_rate'],
        output_dropout_rate=config['output_dropout_rate'],
        nb_epoch=int(config['cnn_nb_epoch']),
        earlyStoping_patience=config['earlyStoping_patience'],
        lr = config['lr'],
        batch_size = config['batch_size'],
    )

    rand_embedding_cnn.print_model_descibe()

    if config['refresh_all_model'] or not os.path.exists(model_file_path):

        print('+'*80)
        # 训练模型
        rand_embedding_cnn.fit((train_X_feature, train_y),
                               (test_all_X_feature, test_all_y))
        y_pred, is_correct, accu, f1 = rand_embedding_cnn.accuracy((test_all_X_feature, test_all_y))

        print 'F1(macro)为:%f' % (np.average(f1))

        # train
        rand_embedding_cnn.accuracy((train_X_feature, train_y))


        # 五折
        print('五折')
        counter = 0
        for dev_X,dev_y,val_X,val_y in data_util.get_k_fold_data(k=5, data=(train_X_feature, train_y)):
            counter += 1

            # quit()