logging.debug('=' * 20)
logging.debug('生成CNN深度特征器')

from deep_learning.cnn.bow_cnn.single_channel_bow_cnn_model import SingleChannelBowCNN

bow_cnn = SingleChannelBowCNN(
    rand_seed=rand_seed,
    verbose=1,
    feature_encoder=feature_encoder,
    num_labels=len(index_to_label),
    input_length=feature_encoder.vocabulary_size,
    l1_conv_filter_type=[
        [layer1, l1_conv_filter_type[0], -1, 'valid', (k[0], 1), 0.5],
        [layer1, l1_conv_filter_type[1], -1, 'valid', (k[0], 1), 0.],
        [layer1, l1_conv_filter_type[2], -1, 'valid', (k[0], 1), 0.],
    ],
    l2_conv_filter_type=[
        [layer2, l2_conv_filter_type[0], -1, 'valid', (k[1], 1), 0.5]
    ],
    full_connected_layer_units=[hidden1, hidden2],
    output_dropout_rate=0.2,
    nb_epoch=nb_epoch,
    earlyStoping_patience=50,
    optimizers='sgd',
    batch_size=32,
    lr=1e-2,
)
bow_cnn.print_model_descibe()

print(bow_cnn.fit(
    (train_X_feature, train_y),
    (test_X_feature, test_y)))
                    print('-' * 80)
                    print('第%d个验证' % counter)
                    fout.write('-' * 80+'\n')
                    fout.write('第%d个验证\n' % counter)

                    bow_cnn = SingleChannelBowCNN(
                        rand_seed=rand_seed,
                        verbose=verbose,
                        feature_encoder=feature_encoder,
                        num_labels=len(index_to_label),
                        input_length=feature_encoder.vocabulary_size,
                        l1_conv_filter_type=[
                            [layer1, l1_conv_filter_type[0], -1, 'valid', (k[0], 1), 0.5],
                            [layer1, l1_conv_filter_type[1], -1, 'valid', (k[0], 1), 0.],
                            [layer1, l1_conv_filter_type[2], -1, 'valid', (k[0], 1), 0.],
                        ],
                        l2_conv_filter_type=[
                            [layer2, l2_conv_filter_type[0], -1, 'valid', (k[1], 1), 0.5]
                        ],
                        full_connected_layer_units=[hidden1, hidden2],
                        output_dropout_rate=0.2,
                        nb_epoch=nb_epoch,
                        earlyStoping_patience=50,
                        optimizers='sgd',
                        batch_size=32,
                        lr=1e-2,
                    )

                    dev_loss, dev_accuracy, \
                    val_loss, val_accuracy = bow_cnn.fit((dev_X,  dev_y),(val_X, val_y))
                    print('dev:%f,%f'%(dev_loss, dev_accuracy))
                    print('val:%f,%f'%(val_loss, val_accuracy))
cv_data = data_util.get_k_fold_data(k=3,
                                    data=train_data,
                                    rand_seed=0,
                                    )


SingleChannelBowCNN.cross_validation(
    cv_data,
    (test_data[u'SENTENCE'].as_matrix(), test_y),
    'single_%s_bow_cv_detail.txt',
    rand_seed=1337,
    nb_epoch=30,
    verbose=0,
    feature_type='word_seg',
    layer1=[3,5,8,18],
    l1_conv_filter_type=[2, 3, 4],
    layer2=[3, 7],
    l2_conv_filter_type=[5],
    k=[2, 2],
    hidden1=[50, 100],
    hidden2=[50, 100],

)

quit()
for layer1 in [3,5,8,18]:
    for layer2 in [3,7,10,20,50]:
        for hidden1 in [50,100,500,1000]:
            for hidden2 in [50,100,300,450]:
                print('=' * 150)
cv_data = data_util.get_k_fold_data(k=3,
                                    data=train_data,
                                    rand_seed=3,
                                    )

SingleChannelBowCNN.cross_validation(
    cv_data,
    (test_data[u'SENTENCE'].as_matrix(), test_y),
    'result/cnn_bow_%s_v2.3S_cv_detail.txt'% feature_type,
    rand_seed=rand_seed,
    nb_epoch=nb_epoch,
    verbose=verbose,
    remove_stopword = remove_stopword,
    feature_type=feature_type,
    layer1=layer1,
    l1_conv_filter_type=l1_conv_filter_type,
    layer2=layer2,
    l2_conv_filter_type=l2_conv_filter_type,
    k=k,
    hidden1=hidden1,
    hidden2=hidden2,
    word2vec_to_solve_oov = word2vec_to_solve_oov,
    word2vec_model_file_path = config['word2vec_model_file_path']
)



end_time = timeit.default_timer()
print 'end! Running time:%ds!' % (end_time - start_time)
logging.debug('=' * 20)
    **config)
train_X_feature, train_y, test_X_feature, test_y,_,_ = all_cv_data[0]

bow_cnn = SingleChannelBowCNN(
    rand_seed=rand_seed,
    verbose=1,
    feature_encoder=feature_encoder,
    num_labels=len(index_to_label),
    input_length=train_X_feature.shape[-1],
    l1_conv_filter_type=[
        # [layer1, l1_conv_filter_type[0], -1, 'valid', (k[0], 1), 0., 'relu', 'none'],
        # [layer1, l1_conv_filter_type[1], -1, 'valid', (k[0], 1), 0., 'relu', 'batch_normalization'],
        # [layer1, l1_conv_filter_type[2], -1, 'valid', (k[0], 1), 0., 'relu', 'batch_normalization'],
    ],
    l2_conv_filter_type=[
        [layer2, -1, -1, 'valid', (0, 1), 0., 'relu', 'none'],
        # [layer2, l2_conv_filter_type[1], -1, 'valid', (k[1], 1), 0.5]
    ],
    full_connected_layer_units=[
        [hidden1, 0., 'relu', 'none'],
        # [hidden2, 0.5, 'relu', 'none']
    ],
    nb_epoch=nb_epoch,
    earlyStoping_patience=50,
    optimizers='sgd',
    batch_size=32,
    lr=2e-2,
)
bow_cnn.print_model_descibe()

print(bow_cnn.fit(