Ejemplo n.º 1
0
def main():
    """Procedure to build data

    You MUST RUN this procedure. It iterates over the whole dataset (train,
    dev and test) and extract the vocabularies in terms of words, tags, and
    characters. Having built the vocabularies it writes them in a file. The
    writing of vocabulary in a file assigns an id (the line #) to each word.
    It then extract the relevant GloVe vectors and stores them in a np array
    such that the i-th entry corresponds to the i-th word in the vocabulary.


    Args:
        config: (instance of Config) has attributes like hyper-params...

    """
    # get config and processing of words
    config = Config(load=False)
    processing_word = get_processing_word(lowercase=True)

    # Generators
    dev   = CoNLLDataset(config.filename_dev, processing_word)
    test  = CoNLLDataset(config.filename_test, processing_word)
    train = CoNLLDataset(config.filename_train, processing_word)

    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test])
    vocab_glove = get_glove_vocab(config.filename_glove)

    vocab = vocab_words & vocab_glove
    vocab.add(UNK)
    vocab.add(NUM)

    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # Trim GloVe Vectors
    vocab = load_vocab(config.filename_words)
    export_trimmed_glove_vectors(vocab, config.filename_glove,
                                config.filename_trimmed, config.dim_word)

    # Build and save char vocab
    train = CoNLLDataset(config.filename_train)
    vocab_chars = get_char_vocab(train)
    write_vocab(vocab_chars, config.filename_chars)
Ejemplo n.º 2
0
def main():
    """Procedure to build data

    You MUST RUN this procedure. It iterates over the whole dataset (train,
    dev and test) and extract the vocabularies in terms of words, tags, and
    characters. Having built the vocabularies it writes them in a file. The
    writing of vocabulary in a file assigns an id (the line #) to each word.
    It then extract the relevant GloVe vectors and stores them in a np array
    such that the i-th entry corresponds to the i-th word in the vocabulary.


    Args:
        config: (instance of Config) has attributes like hyper-params...

    """
    # get config and processing of words
    config = Config(parser, load=False)
    processing_word = get_processing_word(lowercase=True)

    # Generators
    dev   = Dataset(config.filename_dev, processing_word)
    test  = Dataset(config.filename_test, processing_word)
    train = Dataset(config.filename_train, processing_word)

    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test])
    # vocab_glove = get_wordvec_vocab(config.filename_wordvec)

    # vocab = vocab_words & vocab_glove
    vocab = list(vocab_words)
    vocab.insert(0, UNK)
    vocab.append(NUM)

    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # Trim GloVe Vectors
    vocab = load_vocab(config.filename_words)
    export_trimmed_wordvec_vectors(vocab, config.filename_wordvec,
                                config.filename_wordvec_trimmed)
Ejemplo n.º 3
0
def main():
    # get config and processing of words
    config = Config(load=False)
    # should be source_x.txt

    # or ontonotes-nw if you like

    config.filename_train = "../datasets/ontonotes-nw/train"
    config.filename_dev = "../datasets/ontonotes-nw/dev"
    config.filename_test = "../datasets/ontonotes-nw/test"

    processing_word = get_processing_word(lowercase=True)

    # Generators
    dev = NERDataset(config.filename_dev, processing_word)
    test = NERDataset(config.filename_test, processing_word)
    train = NERDataset(config.filename_train, processing_word)
    #for word, tag in train:
    #print("word:{}".format(word))
    #print ("tag:{}".format(tag))
    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test])
    vocab_glove = get_glove_vocab(config.filename_glove)

    vocab = vocab_words & vocab_glove
    vocab.add(UNK)
    vocab.add(NUM)
    vocab_tags.add(UNK)
    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # Trim Word Vectors
    vocab = load_vocab(config.filename_words)
    export_trimmed_glove_vectors(vocab, config.filename_glove,
                                 config.filename_trimmed, config.dim_word)

    # Build and save char vocab
    train = NERDataset(config.filename_train)
    vocab_chars = get_char_vocab(train)
    write_vocab(vocab_chars, config.filename_chars)
Ejemplo n.º 4
0
def main():
    """Procedure to build data

    You MUST RUN this procedure. It iterates over the whole dataset (train, dev
    and test) and extract the vocabularies in terms of words, tags. Having built
    the vocabularies it writes them in a file. The writing of vocabulary in a
    file assigns an id (the line #) to each word. It then extract the relevant
    polyglot vectors and stores them in a np array such that the i-th entry
    corresponds to the i-th word in the vocabulary.


    Args:
        config: (instance of Config) has attributes like hyper-params...

    """
    # get config and processing of words
    config = Config(load=False)
    processing_word = get_processing_word()

    # Generators
    dev = getDataset(config.filename_dev, processing_word)
    test = getDataset(config.filename_test, processing_word)
    train = getDataset(config.filename_train, processing_word)

    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test])
    vocab_poly = get_polyglot_vocab(config.filename_polyglot)

    # Get common vocab
    vocab = vocab_words & vocab_poly
    vocab.add(UNK)

    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # Trim Polygloe Vectors
    vocab = load_vocab(config.filename_words)
    export_trimmed_polyglot_vectors(vocab, config.filename_polyglot, \
                                  config.filename_trimmed, config.dim)
Ejemplo n.º 5
0
def main():
    """Procedure to build data

    Args:
        config: (instance of Config) has attributes like hyper-params...

    """
    # get config and processing of words
    config = Config(load=False)
    processing_word = get_processing_word(lowercase=True)

    # Generators
    dev = CoNLLDataset(config.filename_dev, processing_word)
    test = CoNLLDataset(config.filename_test, processing_word)
    train = CoNLLDataset(config.filename_train, processing_word)

    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test])
    vocab_glove = get_glove_vocab(config.filename_glove)

    vocab = vocab_words & vocab_glove
    vocab.add(UNK)
    vocab.add(NUM)

    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # Trim GloVe Vectors
    vocab = load_vocab(config.filename_words)
    export_trimmed_glove_vectors(vocab, config.filename_glove,
                                 config.filename_trimmed, config.dim_word)

    # Build and save char vocab
    train = CoNLLDataset(config.filename_train)
    vocab_chars = get_char_vocab(train)
    write_vocab(vocab_chars, config.filename_chars)
Ejemplo n.º 6
0
def main():
    """Procedure to build data

    This procedure iterates over the SemEval dataset and builds a vocabulary 
    of words and tags, then writes them to a file. Each word is labelled by 
    an ID. The GloVe vectors of the words are then extracted and stored
    in a numpy array. The word id is used to index into that numpy array.

    """
    # get config and processing of words
    config = Config(load=False)
    processing_word = get_processing_word(lowercase=True)

    # Generators for the dev, test and training files
    dev = GloveDataset(config.filename_dev, processing_word)
    test = GloveDataset(config.filename_test, processing_word)
    train = GloveDataset(config.filename_train, processing_word)

    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test])
    vocab_glove = get_glove_vocab(config.filename_glove)

    #find the intersection between the vocabs from the chosen dataset and GloVe
    vocab = vocab_words & vocab_glove
    #adds the unknown and numeric value to the vocab
    vocab.add(UNK)
    vocab.add(NUM)

    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # export the trimmed glove vectors in a compressed file.
    vocab = load_vocab(config.filename_words)
    export_trimmed_glove_vectors(vocab, config.filename_glove,
                                 config.filename_trimmed, config.dim_word)
Ejemplo n.º 7
0
def generate_model_data(data_prefix=None):
    """Procedure to build data

    You MUST RUN this procedure. It iterates over the whole dataset (train,
    dev and test) and extract the vocabularies in terms of words, tags, and
    characters. Having built the vocabularies it writes them in a file. The
    writing of vocabulary in a file assigns an id (the line #) to each word.
    It then extract the relevant GloVe vectors and stores them in a np array
    such that the i-th entry corresponds to the i-th word in the vocabulary.


    Args:
        config: (instance of Config) has attributes like hyper-params...

    """

    # get config and processing of words
    # loads PubMeda articles
    config = Config(load=False)
    print('Config')
    processing_word = get_processing_word(lowercase=True)
    print('Processing_word')

    # Generators
    if data_prefix:
        cwd = os.getcwd()
        config.filename_dev = os.path.join(
            cwd, 'data',
            data_prefix + '_' + os.path.basename(config.filename_dev))
        config.filename_test = os.path.join(
            cwd, 'data',
            data_prefix + '_' + os.path.basename(config.filename_test))
        config.filename_train = os.path.join(
            cwd, 'data',
            data_prefix + '_' + os.path.basename(config.filename_train))

    if not os.path.isfile(config.filename_dev):
        print('Preprocessing tokens and labels to generate input data files')
        preprocess_data()

    dev = CoNLLDataset(config.filename_dev, processing_word)
    test = CoNLLDataset(config.filename_test, processing_word)
    train = CoNLLDataset(config.filename_train, processing_word)
    print('Loaded dev, test, train')

    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test])
    print('Loading vocab_words')
    vocab_glove = get_glove_vocab(config.filename_glove)

    vocab = vocab_words & vocab_glove
    vocab.add(UNK)
    vocab.add(NUM)

    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # Trim GloVe Vectors
    vocab = load_vocab(config.filename_words)
    export_trimmed_glove_vectors(vocab, config.filename_glove,
                                 config.filename_trimmed, config.dim_word)

    # Build and save char vocab
    train = CoNLLDataset(config.filename_train)
    vocab_chars = get_char_vocab(train)
    write_vocab(vocab_chars, config.filename_chars)
def main():
    # create instance of config
    config = Config()
    #


    # 1. Load previous vocab words
    old_vocab = set()
    with open(config.filename_words) as f:
        for word in f:
            #print(word)
            old_vocab.add(word.strip())
    print("Number of old vocabs = ", len(old_vocab))

    # Load new vocab and check for words in new vocab that is not in old vocab
    processing_word = get_processing_word(lowercase=True)
    dev   = CoNLLDataset(config.filename_dev, processing_word)
    test  = CoNLLDataset(config.filename_test, processing_word)

    vocab_words, vocab_tags = get_vocabs([dev, test])

    # Get vocab in new dataset that is not in old vocab
    vocab_new = vocab_words - old_vocab
    print("Number of new words: ", len(vocab_new))

    # Get full glove vocab
    vocab_glove = get_glove_vocab(config.filename_glove)

    # Get vocab set for words in new vocab and in glove_vocab
    vocab = vocab_new & vocab_glove
    print("Final number of additions are: ", len(vocab))

    # Load old model
    model = BLSTMCRF(config)
    model.build()
    model.summary()
    model.load_weights('./saves/less_words.h5')
    embedding_weights = model.get_layer(name="word_embeddings").get_weights()[0]
    print(embedding_weights.shape)


    def create_embedding_dict(glove_dir, dim_size):
        print("Creating embedding dictionary...")
        embeddings_index = {}
        f = open(glove_dir, encoding='utf-8')
        for line in f:
            values = line.split()
            word = values[0]
            coefs = np.asarray(values[1:], dtype='float32')
            embeddings_index[word] = coefs
        f.close()

        print('Found %s word vectors.' % len(embeddings_index))
        return embeddings_index

    embeddings_index = create_embedding_dict(config.filename_glove, config.dim_word)

    # Create new embedding size
    embeddings = np.zeros([embedding_weights.shape[0]+len(vocab), embedding_weights.shape[1]])
    # Load old vectors
    for idx, vec in enumerate(embedding_weights):
        embeddings[idx] = vec
    # Load new vectors
    pt = embedding_weights.shape[0]
    for idx, word in enumerate(vocab):
        embeddings[idx+pt] = embeddings_index.get(word)
    print("Size of new embeddings: ", embeddings.shape)
    # Save embeddings to npz
    np.savez_compressed(config.filename_trimmed, embeddings=embeddings)

    # Write new vocab file for new config
    def append_vocab(vocab, filename):
        """Writes a vocab to a file

        Writes one word per line.

        Args:
            vocab: iterable that yields word
            filename: path to vocab file

        Returns:
            write a word per line

        """
        print("Writing vocab...")
        with open(filename, "a") as f:
            f.write("\n")
            for i, word in enumerate(vocab):
                if i != len(vocab) - 1:
                    f.write("{}\n".format(word))
                else:
                    f.write(word)
        print("- done. {} tokens".format(len(vocab)))
    append_vocab(vocab, config.filename_words)

    # Build new model
    config2 = Config()
    model2 = BLSTMCRF(config2)
    model2.build()
    model2.summary()

    layer_names = ["char_embeddings", "fw_char_lstm", "bw_char_lstm", "bidirectional", "crf"]

    # Set other weights
    for layer_name in layer_names:
        if layer_name == "crf":
            model2.get_layer(name="crf_2").set_weights(model.get_layer(name="crf_1").get_weights())
        else:
            model2.get_layer(name=layer_name).set_weights(model.get_layer(name=layer_name).get_weights())

    # Set embedding weights
    #model2.get_layer(name="word_embeddings").set_weights([embeddings])
    model2.summary()
    model2.save_weights('./saves/WEWWWWW.h5')
Ejemplo n.º 9
0
def main():
    """Procedure to build data

    You MUST RUN this procedure. It iterates over the whole dataset (train,
    dev and test) and extract the vocabularies in terms of words, tags, and
    characters. Having built the vocabularies it writes them in a file. The
    writing of vocabulary in a file assigns an id (the line #) to each word.
    It then extract the relevant GloVe vectors and stores them in a np array
    such that the i-th entry corresponds to the i-th word in the vocabulary.


    Args:
        config: (instance of Config) has attributes like hyper-params...

    """
    
    if len(sys.argv)<2:
        sys.stderr.write("Too few arguments have been specified\n")
        sys.stderr.write("python "+sys.argv[0]+" config [additional vocabulary in conll format]\n")
        sys.exit(0)    
    # get config and processing of words
    config_file = sys.argv[1]
    
    config = Config(config_file,load=False)
    processing_word = get_processing_word(config)
#    processing_word = get_processing_word(lowercase=config.lowercase)

    # Generators
    dev   = CoNLLDataset(config.filename_dev, processing_word)
    test  = CoNLLDataset(config.filename_test, processing_word)
    train = CoNLLDataset(config.filename_train, processing_word)
    

    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test])
    #add additional tags/vocabulary where the data is applied to!
    if len(sys.argv)>2:
        for i in range(2,len(sys.argv)):
            wo,tg = get_vocabs([CoNLLDataset(sys.argv[i],processing_word)])
            vocab_words |=  wo
            vocab_tags |=  tg
    #if config.use_pretrained:
    #    vocab_glove = get_vocab(config.filename_embeddings)
    #if config.use_pretrained:
    #    vocab = vocab_words & vocab_glove
    #else:
    vocab = vocab_words
    vocab.add(UNK)

    vocab.add(NUM)
    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)
    
    # Trim GloVe Vectors
    vocab = load_vocab(config.filename_words)

    if config.use_pretrained:
        export_trimmed_embedding_vectors(vocab, config.filename_embeddings,
                                config.filename_embeddings_trimmed, config.dim_word, config.embedding_type)

    # Build and save char vocab
    train = CoNLLDataset(config.filename_train)
    vocab_chars = get_char_vocab(train)
    write_vocab(vocab_chars, config.filename_chars)
Ejemplo n.º 10
0
def main():
    """Procedure to build data
    You MUST RUN this procedure. It iterates over the whole dataset (train,
    dev and test) and extract the vocabularies in terms of words, tags, and
    characters. Having built the vocabularies it writes them in a file. The
    writing of vocabulary in a file assigns an id (the line #) to each word.
    It then extract the relevant GloVe vectors and stores them in a np array
    such that the i-th entry corresponds to the i-th word in the vocabulary.
    Args:
        config: (instance of Config) has attributes like hyper-params...
    """
    # get config and processing of words
    config = Config(load=False)
    processing_word = get_processing_word(lowercase=True)
    logger = config.logger

    #------------------------------------------------------------------
    # Generators
    # ------------------------------------------------------------------
    dev = CoNLLDataset(config.filename_dev, processing_word)
    test = CoNLLDataset(config.filename_test, processing_word)
    train = CoNLLDataset(config.filename_train, processing_word)
    sick = CoNLLDataset(config.filename_sick, processing_word)

    # ------------------------------------------------------------------
    # Build Word and Tag vocab
    # ------------------------------------------------------------------
    vocab_words, vocab_tags = get_vocabs([train, dev, test, sick])
    vocab_glove = get_glove_vocab(config.filename_glove)

    vocab = vocab_words & vocab_glove
    vocab.add(UNK)
    vocab.add(NUM)

    # ------------------------------------------------------------------
    # Save vocab
    # ------------------------------------------------------------------
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # ------------------------------------------------------------------
    # Trim GloVe Vectors
    # ------------------------------------------------------------------
    vocab, _ = load_vocab(config.filename_words)
    export_trimmed_glove_vectors(vocab, config.filename_glove,
                                 config.filename_trimmed, config.dim_word)

    # ------------------------------------------------------------------
    # Build and save char vocab
    # ------------------------------------------------------------------
    train = CoNLLDataset(config.filename_train)
    vocab_chars = get_char_vocab(train)
    write_vocab(vocab_chars, config.filename_chars)

    # ------------------------------------------------------------------
    #split train files
    # ------------------------------------------------------------------
    logger.info('\n Splitting the train file into {} splits ...'.format(
        config.num_splits))
    split_train(config)
    logger.info('Saved the train splits in {}'.format('ner/data/'))
Ejemplo n.º 11
0
def main():
    """Procedure to build data

    You MUST RUN this procedure. It iterates over the whole dataset (train,
    dev and test) and extract the vocabularies in terms of words, tags, and
    characters. Having built the vocabularies it writes them in a file. The
    writing of vocabulary in a file assigns an id (the line #) to each word.
    It then extract the relevant GloVe vectors and stores them in a np array
    such that the i-th entry corresponds to the i-th word in the vocabulary.


    Args:
        config: (instance of Config) has attributes like hyper-params...

    """
    # get config and processing of words
    dir_output = "./results/" + sys.argv[4] + "/"
    config = Config(dir_output, load=False)
    processing_word = get_processing_word(lowercase=True)

    # Generators
    #dev   = CoNLLDataset(config.filename_dev, processing_word)
    #test  = CoNLLDataset(config.filename_test, processing_word)
    #train = CoNLLDataset(config.filename_train, processing_word)

    dev = CoNLLDataset(sys.argv[1], processing_word)
    test = CoNLLDataset(sys.argv[2], processing_word)
    train = CoNLLDataset(sys.argv[3], processing_word)

    config.filename_dev = sys.argv[1]
    config.filename_test = sys.argv[2]
    config.filename_train = sys.argv[3]
    config.filename_pred = sys.argv[2].replace(".txt", ".pred")

    config.filename_words = "./data/words_" + sys.argv[4] + ".txt"
    config.filename_chars = "./data/chars_" + sys.argv[4] + ".txt"
    config.filename_tags = "./data/tags_" + sys.argv[4] + ".txt"

    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test])
    vocab_glove = get_glove_vocab(config.filename_glove)

    vocab = vocab_words & vocab_glove
    vocab.add(UNK)
    vocab.add(NUM)
    vocab.add(LG)
    vocab.add(ENT)

    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # Trim GloVe Vectors
    vocab = load_vocab(config.filename_words)
    export_trimmed_glove_vectors(vocab, config.filename_glove,
                                 config.filename_trimmed, config.dim_word)

    # Build and save char vocab
    train = CoNLLDataset(config.filename_train)
    vocab_chars = get_char_vocab(train)
    write_vocab(vocab_chars, config.filename_chars)
Ejemplo n.º 12
0
def main():
    """Procedure to build data

    You MUST RUN this procedure. It iterates over the whole dataset (train,
    dev and test) and extract the vocabularies in terms of words, tags, and
    characters. Having built the vocabularies it writes them in a file. The
    writing of vocabulary in a file assigns an id (the line #) to each word.
    It then extract the relevant GloVe vectors and stores them in a np array
    such that the i-th entry corresponds to the i-th word in the vocabulary.


    Args:
        config: (instance of Config) has attributes like hyper-params...

    """
    # get config and processing of words
    config = Config(load=False)
    processing_word = get_processing_word(lowercase=True) # 把字符全部小写,数字替换成NUM

    # Generators
    dev   = CoNLLDataset(config.filename_dev, processing_word) # 创建一个生成器对象,每一次迭代产生tuple (words,tags)
    test  = CoNLLDataset(config.filename_test, processing_word) # 返回一句话(words),和标签tags
    train = CoNLLDataset(config.filename_train, processing_word)


    #进一步处理数据



    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test])   # word词表, tags表
    print(len(vocab_words))


    vocab_glove = get_glove_vocab(config.filename_glove)       # glove词表


    vocab = vocab_words & vocab_glove                          # & 求交集  set,都是集合
    vocab.add(UNK)
    vocab.add(NUM)                                             # 手动添加
    print("len of vocab without entity: ", len(vocab))

    dev = CoNLLDataset(config.filename_dev)  # 创建一个生成器对象,每一次迭代产生tuple (words,tags)
    test = CoNLLDataset(config.filename_test)  # 返回一句话(words),和标签tags
    train = CoNLLDataset(config.filename_train)
    vocab_entity = entity2vocab(datasets=[train, dev, test])
    i = 0
    j = 0
    for entity in vocab_entity:
        if entity in vocab_glove:
            if entity not in vocab:
                i = i + 1
                vocab.add(entity)
        else:
            for word in entity[7:].split('_'):
                if word.lower() in vocab:
                    if entity not in vocab:
                        vocab.add(entity)
                        j = j + 1
                else:
                    pass
    print(i, j)


    # vocab.update(vocab_entity)
    # vocab = entity2vocab(datasets=[train, dev], vocab=vocab)

    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # Trim GloVe Vectors
    vocab = load_vocab(config.filename_words)    # 得到dict类型的vocab:{word:index}
    # 针对vocab,生成numpy的embedding文件,包含一个矩阵,对应词嵌入
    export_trimmed_glove_vectors(vocab, config.filename_glove,
                                config.filename_trimmed, config.dim_word)


    # Build and save char vocab   生成字母表, 这里没用到小写化的东西。只有文件本身。
    train = CoNLLDataset(config.filename_train)
    vocab_chars = get_char_vocab(train)
    write_vocab(vocab_chars, config.filename_chars)
def main():
    """Procedure to build data

    You MUST RUN this procedure. It iterates over the whole dataset (train,
    dev and test) and extract the vocabularies in terms of words, tags, and
    characters. Having built the vocabularies it writes them in a file. The
    writing of vocabulary in a file assigns an id (the line #) to each word.
    It then extract the relevant GloVe vectors and stores them in a np array
    such that the i-th entry corresponds to the i-th word in the vocabulary.


    Args:
        config: (instance of Config) has attributes like hyper-params...

    """
    # get config and processing of words
    config = Config(load=False)
    processing_word = get_processing_word(lowercase=True)

    # Generators
    dev   = FKDataset(config.filename_dev, processing_word)
    test1  = FKDataset(config.filename_test1, processing_word)
    test2  = FKDataset(config.filename_test2, processing_word)

    train = FKDataset(config.filename_train, processing_word)

    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test1,test2])
    vocab_glove = get_glove_vocab(config.filename_glove)

    #print ("Inside build data and prinitng vocab_tags")
    

    vocab_tags_task1 =[]
    vocab_tags_task2 =[]

    for items in vocab_tags:
        if "_dress" in items:
            vocab_tags_task1.append(items)
        if "_jean" in items:
            vocab_tags_task2.append(items)

    vocab_tags_task1.append('O')
    vocab_tags_task2.append('O')




    vocab = vocab_words & vocab_glove
    vocab.add(UNK)
    vocab.add(NUM)

    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # Trim GloVe Vectors
    vocab = load_vocab(config.filename_words)
    export_trimmed_glove_vectors(vocab, config.filename_glove,
                                config.filename_trimmed, config.dim_word)

    # Build and save char vocab
    train = FKDataset(config.filename_train)
    vocab_chars = get_char_vocab(train)
    write_vocab(vocab_chars, config.filename_chars)
Ejemplo n.º 14
0
def main():
    """Procedure to build data

    You MUST RUN this procedure. It iterates over the whole dataset (train,
    dev and test) and extract the vocabularies in terms of words, tags, and
    characters. Having built the vocabularies it writes them in a file. The
    writing of vocabulary in a file assigns an id (the line #) to each word.
    It then extract the relevant word2vec vectors and stores them in a np array
    such that the i-th entry corresponds to the i-th word in the vocabulary.


    Args:
        config: (instance of Config) has attributes like hyper-params...

    """
    # get config and processing of words

    config = Config(load=False)
    processing_word = get_processing_word(lowercase=False)

    # Generators
    dev = CoNLLDataset(config.filename_dev, processing_word)
    test = CoNLLDataset(config.filename_test, processing_word)
    train = CoNLLDataset(config.filename_train, processing_word)
    train2 = CoNLLDataset(config.filename_train2, processing_word)

    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test, train2])

    vocab = vocab_words
    if "w2v" in config.use_pretrained:
        vocab_word2vec = get_word_vec_vocab(config.filename_word2vec)
        vocab = vocab_words & vocab_word2vec if config.use_pretrained == "w2v" else vocab_words
    if config.replace_digits:
        vocab.add(NUM)
    vocab.add(UNK)

    # Save vocab
    write_vocab(vocab, config.filename_words)
    write_vocab(vocab_tags, config.filename_tags)

    # Trim FastText vectors
    if "ft" in config.use_pretrained:
        abs_f_words = os.path.abspath(config.filename_words)
        abs_f_vec = os.path.abspath(config.filename_fasttext)
        cmd = config.get_ft_vectors_cmd.format(abs_f_words, abs_f_vec)
        subprocess.check_call(cmd, shell=True)
        vocab = load_vocab(config.filename_words)
        export_trimmed_word_vectors(vocab, config.filename_fasttext,
                                    config.filename_trimmed_ft,
                                    config.dim_word)

    if "s2v" in config.use_pretrained:
        abs_s_words = os.path.abspath(config.filename_words)
        abs_s_vec = os.path.abspath(config.filename_fasttext)
        cmd = config.get_sent2vec_vectors_cmd.format(abs_s_words, abs_s_vec)
        subprocess.check_call(cmd, shell=True)
        vocab = load_vocab(config.filename_words)
        export_trimmed_word_vectors(vocab, config.filename_sent2vec,
                                    config.filename_trimmed_s2v,
                                    config.dim_sent)

    # Trim Morph2Vec vectors
    if "m2v" in config.use_pretrained:
        vocab = load_vocab(config.filename_words)
        export_trimmed_word_vectors(vocab,
                                    config.filename_morph2vec,
                                    config.filename_trimmed_m2v,
                                    config.dim_morph,
                                    partial_match=True)

    # Trim word2vec Vectors
    if "w2v" in config.use_pretrained:
        vocab = load_vocab(config.filename_words)
        export_trimmed_word_vectors(vocab, config.filename_word2vec,
                                    config.filename_trimmed_w2v,
                                    config.dim_word)

    # Build and save char vocab
    train = CoNLLDataset(config.filename_train)
    vocab_chars = get_char_vocab(train, config.use_ortho_char)
    write_vocab(vocab_chars, config.filename_chars)
Ejemplo n.º 15
0
def main():
    """Procedure to build data

    You MUST RUN this procedure. It iterates over the whole dataset (train,
    dev and test) and extract the vocabularies in terms of words, tags, and
    characters. Having built the vocabularies it writes them in a file. The
    writing of vocabulary in a file assigns an id (the line #) to each word.
    It then extract the relevant GloVe vectors and stores them in a np array
    such that the i-th entry corresponds to the i-th word in the vocabulary.


    Args:
        config: (instance of Config) has attributes like hyper-params...

    """

    parser = argparse.ArgumentParser()

    parser.add_argument('--dataset', type=str, default='conll2003')
    parser.add_argument('--train_lang', type=str, default='en')
    parser.add_argument('--dev_lang', type=str, default='en')
    parser.add_argument('--test_lang', type=str, default='en')

    parser.add_argument('--src_glove',
                        type=str,
                        default='data/glove.42B.300d.txt')
    parser.add_argument('--tgt_glove', type=str, default=None)
    parser.add_argument('--emb_dim', type=int, default=300)

    parser.add_argument('--trimmed_glove',
                        type=str,
                        default='glove_trimmed.npz')

    #parser.add_argument('--init_char', type=str, default=0)
    #parser.add_argument('--trimmed_char', type=str, default='char_trimmed.npz')

    args = parser.parse_args()

    # get config and processing of words
    #config = Config(emb_dim=512, load=False, dataset='ner_nl_es', use_muse=True)
    processing_word = get_processing_word(lowercase=True)
    #src_lang = 'nl'
    #tgt_lang = 'es'

    data_dir = args.dataset

    # Generators
    dev = CoNLLDataset(os.path.join(data_dir, 'dev.txt'),
                       processing_word=processing_word,
                       lang=args.dev_lang)
    test = CoNLLDataset(os.path.join(data_dir, 'test.txt'),
                        processing_word=processing_word,
                        lang=args.test_lang)
    train = CoNLLDataset(os.path.join(data_dir, 'train.txt'),
                         processing_word=processing_word,
                         lang=args.train_lang)

    # Build Word and Tag vocab
    vocab_words, vocab_tags = get_vocabs([train, dev, test])

    vocab_glove = get_glove_vocab(args.src_glove, lang=args.train_lang)
    if args.tgt_glove:
        vocab_glove_tgt = get_glove_vocab(args.tgt_glove, lang=args.test_lang)
        vocab = vocab_words & (vocab_glove | vocab_glove_tgt)
    else:
        vocab = vocab_words & vocab_glove

    #vocab = vocab_words
    vocab.add(UNK)
    vocab.add(NUM)

    # Save vocab
    write_vocab(vocab, os.path.join(data_dir, 'words.txt'))
    write_vocab(vocab_tags, os.path.join(data_dir, 'tags.txt'))

    # Trim GloVe Vectors

    vocab = load_vocab(os.path.join(data_dir, 'words.txt'))
    if args.tgt_glove:
        gloves = {
            args.train_lang: args.src_glove,
            args.test_lang: args.tgt_glove
        }
    else:
        gloves = {args.train_lang: args.src_glove}
    export_trimmed_glove_vectors_multiple(
        vocab, gloves, os.path.join(data_dir, args.trimmed_glove),
        args.emb_dim)

    # Build and save char vocab
    train = CoNLLDataset(os.path.join(data_dir, 'train.txt'))
    test = CoNLLDataset(os.path.join(data_dir, 'test.txt'))
    dev = CoNLLDataset(os.path.join(data_dir, 'dev.txt'))
    vocab_chars = get_char_vocab([train, test, dev])
    write_vocab(vocab_chars, os.path.join(data_dir, 'chars.txt'))