예제 #1
0
def create_reddit_data(url):
    lazynlp.download_pages(url,
                           "/gpfs/gpfsfpo/reddit_dataset",
                           timeout=30,
                           default_skip=True,
                           extensions=[],
                           domains=[])
    pass
예제 #2
0
def create_gutenberg():
    """ Uses a hard coded path to indicate the urls.txt file that serves to collect the data, 
        second parameter is the folder in the distributed storage where the datas is downloaded
    """
    lazynlp.download_pages("/gpfs/gpfsfpo/aus_gutemberg/urls.txt",
                           "/gpfs/gpfsfpo/aus_gutemberg_dataset",
                           timeout=30,
                           default_skip=True,
                           extensions=[],
                           domains=[])
    pass
예제 #3
0
def create_reddit_data(url, output):
    """ Uses a softcoded path to indicate the urls.txt file that serves to collect the data,
        second parameter is the folder in the distributed storage where the datas is downloaded
    """

    lazynlp.download_pages(url,
                           output,
                           timeout=30,
                           default_skip=True,
                           extensions=[],
                           domains=[])
    pass
예제 #4
0
def process_files(path, id):
    files = [f for f in listdir(path) if isfile(join(path, f))]
    
    for filename in files:
        file_path = os.path.join(path, filename)
        lazynlp.download_pages(file_path, "/gpfs/gpfsfpo/reddit", timeout=30, default_skip=True, extensions=[], domains=[])