コード例 #1
0
ファイル: files_to_vw.py プロジェクト: darth-cheney/declass
def tokenize(
    outfile, paths, base_path, no_shuffle, tokenizer_type, tokenizer_pickle,
    doc_id_level, n_jobs, chunksize):
    """
    Write later if module interface is needed. See _cli for the documentation.
    """
    assert (paths == []) or (base_path is None)

    if base_path:
        paths = filefilter.get_paths(base_path, file_type='*', get_iter=True)
        if no_shuffle is False:
            paths = list(paths)
            shuffle(paths)

    if tokenizer_pickle is not None:
        tokenizer = SaveLoad.load(tokenizer_pickle)
    else:
        tokenizer_dict = {'basic': text_processors.TokenizerBasic}
        tokenizer = tokenizer_dict[tokenizer_type]()

    formatter = text_processors.VWFormatter()

    func = partial(_tokenize_one, tokenizer, formatter, doc_id_level)

    results_iterator = imap_easy(func, paths, n_jobs, chunksize)

    for result in results_iterator:
        outfile.write(result + '\n')
コード例 #2
0
ファイル: streamers.py プロジェクト: darth-cheney/declass
    def to_vw(self, outfile, n_jobs=1, chunksize=1000):
        """
        Write our filestream to a VW (Vowpal Wabbit) formatted file.

        Parameters
        ----------
        outfile : filepath or buffer
        n_jobs : Integer
            Use n_jobs different jobs to do the processing.  Set = 4 for 4 
            jobs.  Set = -1 to use all available, -2 for all except 1,...
        chunksize : Integer
            Workers process this many jobs at once before pickling and sending
            results to master.  If this is too low, communication overhead
            will dominate.  If this is too high, jobs will not be distributed
            evenly.
        """
        # Note:  This is similar to declass/cmd/files_to_vw.py
        # This implementation is more complicated, due to the fact that a
        # streamer specifies the method to extract doc_id from a stream.
        # To be faithful to the streamer, we must therefore use the streamer
        # to stream the files.  This requires a combination of imap_easy and
        # a chunker.
        #
        # Create an iterator over chunks of paths
        path_group_iter = common.grouper(self.paths, chunksize)

        formatter = text_processors.VWFormatter()

        func = partial(_group_to_sstr, self, formatter)
        # Process one group at a time...set imap_easy chunksize arg to 1
        # since each group contains many paths.
        results_iterator = imap_easy(func, path_group_iter, n_jobs, 1)

        with smart_open(outfile, 'w') as open_outfile:
            for group_results in results_iterator:
                for sstr in group_results:
                    open_outfile.write(sstr + '\n')
コード例 #3
0
def download_files(paths, n_jobs=7, chunksize=100):
    """
    Wgets tjhe files, but uses the parallel_easy imap function to make things 
    go a lot faster. See documentation therein.
    """
    imap_easy(_downld_func, paths, n_jobs, chunksize)
コード例 #4
0
 def test_imap_easy_3job(self):
     result_iterator = base.imap_easy(abfunc, self.numbers, 3, 1)
     result = []
     for number in result_iterator:
         result.append(number)
     self.assertEqual(result, self.benchmark)