Exemplo n.º 1
0
        vocab_size=feature_encoder.vocabulary_size,
        word_embedding_dim=48,
        # input_length=None,
        input_length=sentence_padding_length,
        num_labels=4,
        conv_filter_type=[
            [100, 2, 'full'],
            [100, 4, 'full'],
            # [100,6,5,'valid'],
        ],
        ktop=1,
        embedding_dropout_rate=0.5,
        output_dropout_rate=0.5,
        nb_epoch=10,
        earlyStoping_patience=5,
    )
    # dcnn.print_model_descibe()
    # 训练模型
    # dcnn.model_from_pickle('model/modelA.pkl')
    dcnn.fit((feature_encoder.train_padding_index, trian_y),
             (map(feature_encoder.transform_sentence, test_X), test_y))
    quit()
    print(dcnn.predict(feature_encoder.transform_sentence(test_X[0])))
    dcnn.accuracy((map(feature_encoder.transform_sentence, test_X), test_y))
    print(dcnn.batch_predict(map(feature_encoder.transform_sentence, test_X)))
    # 保存模型
    # dcnn.save_model('model/modelA.pkl')

    # 从保存的pickle中加载模型
    # print onehot_cnn.predict(feature_encoder.transform_sentence('你好吗'))
    # -------------- region end : 3. 初始化CNN模型并训练 ---------------

    # -------------- region start : 4. 预测 -------------
    if verbose > 1:
        logging.debug('-' * 20)
        print '-' * 20
        logging.debug('4. 预测')
        print '4. 预测'
    # -------------- code start : 开始 -------------

    y_predict = map(rand_embedding_cnn.predict,test_X_feature)
    test_data[u'Y_PRED'] = [index_to_label[item] for item in y_predict]
    data_util.save_data(test_data,path=result_file_path)

    quit()
    rand_embedding_cnn.predict(feature_encoder.transform_sentence('你好吗'))

    print index_to_label[rand_embedding_cnn.predict(feature_encoder.transform_sentence('你好吗'))]

    y_pred, is_correct, accu,f1 = rand_embedding_cnn.accuracy((test_X_feature, test_y))
    logging.debug('F1(macro)为:%f'%(np.average(f1[:-1])))
    print 'F1(macro)为:%f'%(np.average(f1[:-1]))
    test_data[u'IS_CORRECT'] = is_correct
    test_data[u'PREDICT'] = [index_to_label[item] for item in y_pred]
    # data_util.save_data(test_data,'tmp.tmp')
    # quit()
    data_util.save_data(test_data,
                        path=result_file_path)


    # -------------- region start : 生成深度特征编码 -------------
Exemplo n.º 3
0
        verbose=1,
        batch_size=2,
        vocab_size=feature_encoder.vocabulary_size,
        word_embedding_dim=48,
        # input_length=None,
        input_length=sentence_padding_length,
        num_labels=4,
        conv_filter_type=[[100, 2, 'full'],
                          [100, 4, 'full'],
                          # [100,6,5,'valid'],
                          ],
        ktop=1,
        embedding_dropout_rate=0.5,
        output_dropout_rate=0.5,
        nb_epoch=10,
        earlyStoping_patience=5,
    )
    dcnn.print_model_descibe()
    # 训练模型
    # dcnn.model_from_pickle('model/modelA.pkl')
    dcnn.fit((train_X_features, trian_y),
             (test_X_features, test_y))
    print(dcnn.predict(feature_encoder.transform_sentence(test_X[0])))
    dcnn.accuracy((test_X_features, test_y))
    print(dcnn.batch_predict(test_X_features))
    # 保存模型
    # dcnn.save_model('model/modelA.pkl')

    # 从保存的pickle中加载模型
    # print onehot_cnn.predict(feature_encoder.transform_sentence('你好吗'))
Exemplo n.º 4
0
    if verbose > 2:
        logging.debug('-' * 20)
        print('-' * 20)
    # -------------- region end : 3. 初始化CNN模型并训练 ---------------

    # -------------- region start : 4. 预测 -------------
    if verbose > 1:
        logging.debug('-' * 20)
        print('-' * 20)
        logging.debug('4. 预测')
        print('4. 预测')
    # -------------- code start : 开始 -------------



    print(index_to_label[dcnn_model.predict(feature_encoder.transform_sentence('你好吗'))])

    y_pred, is_correct, accu,f1 = dcnn_model.accuracy((map(feature_encoder.transform_sentence, test_X), test_y))
    logging.debug('F1(macro)为:%f'%(np.average(f1[:-1])))
    print('F1(macro)为:%f' % (np.average(f1[:-1])))
    test_data[u'IS_CORRECT'] = is_correct
    test_data[u'PREDICT'] = [index_to_label[item] for item in y_pred]
    # data_util.save_data(test_data,'tmp.tmp')
    # quit()

    data_util.save_data(test_data,
                        path=result_file_path)

    # -------------- code start : 结束 -------------
    if verbose > 1:
        logging.debug('-' * 20)
Exemplo n.º 5
0
    # -------------- code start : 结束 -------------
    if verbose > 2:
        logging.debug('-' * 20)
        print('-' * 20)
    # -------------- region end : 3. 初始化CNN模型并训练 ---------------

    # -------------- region start : 4. 预测 -------------
    if verbose > 1:
        logging.debug('-' * 20)
        print('-' * 20)
        logging.debug('4. 预测')
        print('4. 预测')
    # -------------- code start : 开始 -------------

    print(index_to_label[dcnn_model.predict(
        feature_encoder.transform_sentence('你好吗'))])

    y_pred, is_correct, accu, f1 = dcnn_model.accuracy(
        (map(feature_encoder.transform_sentence, test_X), test_y))
    logging.debug('F1(macro)为:%f' % (np.average(f1[:-1])))
    print('F1(macro)为:%f' % (np.average(f1[:-1])))
    test_data[u'IS_CORRECT'] = is_correct
    test_data[u'PREDICT'] = [index_to_label[item] for item in y_pred]
    # data_util.save_data(test_data,'tmp.tmp')
    # quit()

    data_util.save_data(test_data, path=result_file_path)

    # -------------- code start : 结束 -------------
    if verbose > 1:
        logging.debug('-' * 20)
Exemplo n.º 6
0
    if verbose > 2:
        logging.debug('-' * 20)
        print '-' * 20
    # -------------- region end : 3. 初始化CNN模型并训练 ---------------

    # -------------- region start : 4. 预测 -------------
    if verbose > 1:
        logging.debug('-' * 20)
        print '-' * 20
        logging.debug('4. 预测')
        print '4. 预测'
    # -------------- code start : 开始 -------------



    print index_to_label[rand_embedding_cnn.predict(feature_encoder.transform_sentence('你好吗'))]

    y_pred, is_correct, accu,f1 = rand_embedding_cnn.accuracy((map(feature_encoder.transform_sentence, test_X), test_y))
    logging.debug('F1(macro)为:%f'%(np.average(f1[:-1])))
    print 'F1(macro)为:%f'%(np.average(f1[:-1]))
    test_data[u'IS_CORRECT'] = is_correct
    test_data[u'PREDICT'] = [index_to_label[item] for item in y_pred]
    # data_util.save_data(test_data,'tmp.tmp')
    # quit()
    result_file_path = ''.join(config['result_file_path'])

    data_util.save_data(test_data,
                        path=result_file_path)

    # -------------- code start : 结束 -------------
    if verbose > 1:
Exemplo n.º 7
0
    # -------------- region end : 3. 初始化CNN模型并训练 ---------------

    # -------------- region start : 4. 预测 -------------
    if verbose > 1:
        logging.debug('-' * 20)
        print '-' * 20
        logging.debug('4. 预测')
        print '4. 预测'
    # -------------- code start : 开始 -------------

    y_predict = map(rand_embedding_cnn.predict, test_X_feature)
    test_data[u'Y_PRED'] = [index_to_label[item] for item in y_predict]
    data_util.save_data(test_data, path=result_file_path)

    quit()
    rand_embedding_cnn.predict(feature_encoder.transform_sentence('你好吗'))

    print index_to_label[rand_embedding_cnn.predict(
        feature_encoder.transform_sentence('你好吗'))]

    y_pred, is_correct, accu, f1 = rand_embedding_cnn.accuracy(
        (test_X_feature, test_y))
    logging.debug('F1(macro)为:%f' % (np.average(f1[:-1])))
    print 'F1(macro)为:%f' % (np.average(f1[:-1]))
    test_data[u'IS_CORRECT'] = is_correct
    test_data[u'PREDICT'] = [index_to_label[item] for item in y_pred]
    # data_util.save_data(test_data,'tmp.tmp')
    # quit()
    data_util.save_data(test_data, path=result_file_path)

    # -------------- region start : 生成深度特征编码 -------------