Exemplo n.º 1
0
def main():
    config = args.parser_args()

    if os.path.exists(os.path.join(config.embedding_dir,config.embedding_file)):
        pretrain_embedding = get_embedding(config.embedding_dir, config.embedding_file)
        config.word_embedding=pretrain_embedding

    if not os.path.exists(config.save_path):
        os.makedirs(config.save_path)
    if not os.path.exists(config.restore_path):
        os.makedirs(config.restore_path)
    if not os.path.exists(config.summary):
        os.makedirs(config.summary)

    config.train=True
    if config.train:
        train_file=os.path.join(config.data_dir,config.train_file)
        print(train_file)
        traindata = Dataprocess(train_file, config.word_dict_file,use_pos_fea=False)
        train_data=traindata.get_processdata()
        print(type(train_data))
        print(len(train_data))
        data_train =   BatchGenerator(train_data,shuffle=True,maxsen_len=config.max_seq_len)

        config.word_vocab_size = traindata.get_vocab_size()

        dev_file=os.path.join(config.data_dir,config.dev_file)
        devdata = Dataprocess(dev_file, config.word_dict_file,use_pos_fea=False)
        dev_data = devdata.get_processdata()
        data_dev =  BatchGenerator(dev_data,shuffle=False,maxsen_len=config.max_seq_len)



        train(config, data_train, data_dev)
Exemplo n.º 2
0
            s,
            output_num, [filter_width],
            padding='SAME',
            data_format='NWC',
            activation_fn=tf.nn.relu,
            weights_initializer=xavier_initializer_conv2d(),
            weights_regularizer=l2_regularizer(self.l2_reg))

    def pooling(self, s, pooling_width):
        #一维平均池化只有tf>1.4以上版本有。
        '''

        :param s:[bs,len,hidden]
        :param pooling_width: pooling width
        :return:
        '''
        s = tf.expand_dims(s, 1)
        pool = tf.nn.avg_pool(s, [1, 1, pooling_width, 1], [1, 1, 1, 1],
                              padding='SAME')
        return tf.squeeze(pool, axis=1)


if __name__ == '__main__':
    import config

    config = config.parser_args()
    config.word_vocab_size = 5
    config.char_vocab_size = 6
    config.is_train = True
    model = ABCNN(config)