Esempio n. 1
0
def get_tokenized_data(training_path, tokenizer_conf, shuffle_targets=False,
                       test_data='', *args, **kwargs):
    """
    Loads data from either XML or compressed JSON
    :param gzip_json: set to True of False to force this method to read XML/JSON. Otherwise the type of
     input data is determined by the presence or absence of a .gz extension on the training_path
    :param args:
    """
    if is_gzipped(training_path):
        tokenizer = GzippedJsonTokenizer(**tokenizer_conf)
        x_tr, y_tr = tokenizer.tokenize_corpus(training_path)
        if test_data:
            x_test, y_test = tokenizer.tokenize_corpus(test_data)
        else:
            x_test, y_test = None, None
        return x_tr, np.array(y_tr), x_test, np.array(y_test) if y_test else y_test
    # todo maybe we want to keep the XML code. In that case check if the file is XML or CoNLL

    # m011303:sample-data mmb28$ file aclImdb-tagged/neg/train_neg_12490_1.txt.tagged
    # aclImdb-tagged/neg/train_neg_12490_1.txt.tagged: XML  document text
    elif _check_file_magic(training_path, b'directory'):
        # need to find out if these are XML or CoNLL files
        a_file = glob(os.path.join(training_path, '*', '*'))[0]
        if _check_file_magic(a_file, b'XML'):
            tokenizer = XmlTokenizer(**tokenizer_conf)
        else:
            # must be ConLL then
            tokenizer = ConllTokenizer(**tokenizer_conf)
        raw_data, data_ids = load_text_data_into_memory(training_path=training_path,
                                                        test_path=test_data,
                                                        shuffle_targets=shuffle_targets)
        return tokenize_data(raw_data, tokenizer, data_ids)
    else:
        raise ValueError('Input is neither a gzipped file containing all data nor a directory')
 def __iter__(self):
     for fname in self.files:
         filename = join(self.dirname, fname)
         infile = gzip.open(filename) if is_gzipped(filename) else open(filename)
         with contextlib.closing(infile):
             for line in infile:
                 # yield gensim.utils.tokenize(line, lower=True)
                 if isinstance(line, bytes):
                     line = line.decode()
                 res = [DocumentFeature.smart_lower(w) for w in line.split() if
                        DocumentFeature.from_string(w).type != 'EMPTY']
                 if len(res) > 8:
                     # ignore short sentences, they are probably noise
                     if self.remove_pos:
                         yield [x.split('/')[0] for x in res]
                     else:
                         yield res
    def from_tsv(cls, tsv_file, sim_threshold=0, include_self=False,
                 lowercasing=False, ngram_separator='_', pos_separator='/', allow_lexical_overlap=True,
                 row_filter=lambda x, y: True, column_filter=lambda x: True, max_len=50,
                 max_neighbours=1e8, merge_duplicates=False, immutable=True,
                 enforce_word_entry_pos_format=True, **kwargs):
        """
        Create a Thesaurus by parsing a Byblo-compatible TSV files (events or sims).
        If duplicate values are encoutered during parsing, only the latest will be kept.

        :param tsv_file: path to input TSV file. May be gzipped.
        :type tsv_file:  str
        :param sim_threshold: min similarity between an entry and its neighbour for the neighbour to be included
        :type sim_threshold: float
        :param include_self: whether to include self as nearest neighbour.
        :type include_self: bool
        :param lowercasing: if true, most of what is read will be lowercased (excluding PoS tags), so
            Cat/N -> cat/N. This is desirable when reading thesauri with this class. If False, no lowercasing
            will take place. This might be desirable when readings feature lists or already lowercased neighbour
            lists. FET + Byblo thesauri are already lowercased.
        :type lowercasing: bool
        :param ngram_separator: When n_gram entries are read in, what are the indidivual tokens separated by
        :param column_filter: A function that takes a string (column in the file) and returns whether or not
        the string should be kept
        :param row_filter: takes a string and its corresponding DocumentFeature and determines if it should be loaded.
        If `enforce_word_entry_pos_format` is `False`, the second parameter to this function will be `None`
        :param allow_lexical_overlap: whether neighbours/features are allowed to overlap lexically with the entry
        they are neighbours/features of. OTE: THE BEHAVIOUR OF THIS PARAMETER IS SLIGHTLY DIFFERENT FROM THE EQUIVALENT
        IN VECTORS. SEE COMMENT THERE.
        :param max_len: maximum length (in characters) of permissible **entries**. Longer entries are ignored.
        :param max_neighbours: maximum neighbours per entry. This is applied AFTER the filtering defined by
        column_filter and allow_lexical_overlap is finished.
        :param merge_duplicates: whether to raise en error if multiple entries exist, or concatenate/add them together.
        The former is appropriate for `Thesaurus`, and the latter for `Vectors`
        :param enforce_word_entry_pos_format: if true, entries that are not in a `word/POS` format are skipped. This
        must be true for `allow_lexical_overlap` to work.
        """

        if not tsv_file:
            raise ValueError("No thesaurus specified")

        DocumentFeature.recompile_pattern(pos_separator=pos_separator, ngram_separator=ngram_separator)
        to_return = dict()
        logging.info('Loading thesaurus %s from disk', tsv_file)

        if not allow_lexical_overlap:
            logging.warning('DISALLOWING LEXICAL OVERLAP')

        if not allow_lexical_overlap and not enforce_word_entry_pos_format:
            raise ValueError('allow_lexical_overlap requires entries to be converted to a DocumentFeature. '
                             'Please enable enforce_word_entry_pos_format')
        FILTERED = '___FILTERED___'.lower()

        gzipped = is_gzipped(tsv_file)
        if gzipped:
            logging.info('Attempting to read a gzipped file')
            fhandle = gzip.open(tsv_file)
        else:
            fhandle = open(tsv_file)

        with fhandle as infile:
            for line in infile.readlines():
                if gzipped:
                    # this is a byte steam, needs to be decoded
                    tokens = line.decode('UTF8').strip().split('\t')
                else:
                    tokens = line.strip().split('\t')

                if len(tokens) % 2 == 0:
                    # must have an odd number of things, one for the entry
                    # and pairs for (neighbour, similarity)
                    logging.warning('Skipping dodgy line in thesaurus file: %s\n %s', tsv_file, line)
                    continue

                if tokens[0] != FILTERED:
                    key = DocumentFeature.smart_lower(tokens[0], lowercasing)
                    dfkey = DocumentFeature.from_string(key) if enforce_word_entry_pos_format else None

                    if enforce_word_entry_pos_format and dfkey.type == 'EMPTY':
                        # do not load things in the wrong format, they'll get in the way later
                        # logging.warning('%s is not in the word/POS format, skipping', tokens[0])
                        continue

                    if (not row_filter(key, dfkey)) or len(key) > max_len:
                        logging.debug('Skipping entry for %s', key)
                        continue

                    to_insert = [(DocumentFeature.smart_lower(word, lowercasing), float(sim))
                                 for (word, sim) in walk_nonoverlapping_pairs(tokens, 1)
                                 if word.lower() != FILTERED and column_filter(word) and float(sim) > sim_threshold]

                    if not allow_lexical_overlap:
                        to_insert = cls.remove_overlapping_neighbours(dfkey, to_insert)

                    if len(to_insert) > max_neighbours:
                        to_insert = to_insert[:max_neighbours]

                    if include_self:
                        to_insert.insert(0, (key, 1.0))

                    # the steps above may filter out all neighbours of an entry. if this happens,
                    # do not bother adding it
                    if len(to_insert) > 0:
                        if key in to_return:  # this is a duplicate entry, merge it or raise an error
                            if merge_duplicates:
                                logging.debug('Multiple entries for "%s" found. Merging.', tokens[0])
                                c = Counter(dict(to_return[key]))
                                c.update(dict(to_insert))
                                to_return[key] = [(k, v) for k, v in c.items()]
                            else:
                                raise ValueError('Multiple entries for "%s" found.' % tokens[0])
                        else:
                            to_return[key] = to_insert
                    else:
                        logging.warning('Nothing survived filtering for %r', key)
        return Thesaurus(to_return, immutable=immutable)