Example #1
0
def createChunksSubmitJobs(inDir, outDir, minId, runner, chunkSize):
    """ submit jobs to convert zip and disk files from inDir to outDir
        split files into chunks and submit chunks to cluster system
        write first to temporary dir, and copy over at end of all jobs
        This is based on pubConvElsevier.py
    """
    maxCommon.mustExistDir(outDir)

    updateId, minId, alreadyDoneFiles = pubStore.parseUpdatesTab(outDir, minId)
    assert(chunkSize!=None)

    finalOutDir= outDir
    outDir     = tempfile.mktemp(dir = outDir, prefix = "springerUpdate.tmp.")
    os.mkdir(outDir)

    # getting filenames from the disk
    diskDir = join(inDir, "disk")
    if int(updateId)==0 and isdir(diskDir):
        inDiskFiles = parseDiskFnames(diskDir)
    else:
        logging.info("Not first update or no directory %s, not parsing files from springer disk" % diskDir)

    # getting filenames from the updates
    zipDir = join(inDir, "updates")
    inZipFiles = os.listdir(zipDir)
    inZipFiles = [x for x in inZipFiles if x.endswith(".zip")]
    logging.info("Found %d update zip files" % len(inZipFiles))
    # keep order of input files for first run

    if len(alreadyDoneFiles)==0:
        processFiles = inDiskFiles+inZipFiles
    else:
        processFiles = set(inZipFiles).difference(alreadyDoneFiles)

    if len(processFiles)==0:
        logging.info("All updates done, not converting anything")
        return None
    else:
        logging.info("Total number of files to convert: %d" % (len(processFiles)))

    indexFilename = join(outDir, "%d_index.tab" % updateId)
    maxArticleId  = createIndexFile(zipDir, processFiles, indexFilename, updateId, minId, chunkSize)

    indexSplitDir = join(outDir, "indexFiles")
    pubStore.splitTabFileOnChunkId(indexFilename, indexSplitDir)

    idFname = concatDois(finalOutDir, outDir, "doneArticles.tab")
    submitJobs(runner, zipDir, indexSplitDir, idFname, outDir)

    pubGeneric.concatDelIdFiles(outDir, finalOutDir, "%d_ids.tab" % updateId)
    pubGeneric.concatDelLogs(outDir, finalOutDir, "%d.log" % updateId)

    # cleanup, move over, remove whole temp dir
    if isdir(indexSplitDir): # necessary? how could it not be there? 
        logging.info("Deleting directory %s" % indexSplitDir)
        shutil.rmtree(indexSplitDir) # got sometimes exception here...
    pubStore.moveFiles(outDir, finalOutDir)
    shutil.rmtree(outDir)

    pubStore.appendToUpdatesTxt(finalOutDir, updateId, maxArticleId, processFiles)
Example #2
0
def createChunksSubmitJobs(inDir, outDir, minId, runner, chunkSize):
    tmpDir = pubGeneric.makeTempDir("bingData", tmpDir=outDir)
    #maxCommon.delOnExit(tmpDir)

    maxCommon.mustExistDir(outDir)
    updateId, minId, alreadyDoneFiles = pubStore.parseUpdatesTab(outDir, minId)
    # get all .gz.index files, remove the already done files
    inFnames = glob.glob(join(inDir, "*.index.gz"))
    inBaseNames = set([basename(x) for x in inFnames])
    todoBasenames = inBaseNames - set(alreadyDoneFiles)
    todoFnames = [join(inDir, x) for x in todoBasenames]
    if len(todoFnames) == 0:
        logging.info("All input files already converted")
        return

    indexFilename = join(outDir, "%d_index.tab" % updateId)
    indexFile = open(indexFilename, "w")
    headers = ["articleId", "tsvFile", "url", "offset"]
    indexFile.write("\t".join(headers))
    indexFile.write("\n")

    # read them and create a big index file:
    # with tsvname, url, offset
    numId = minId
    doneUrls = set()
    for fname in todoFnames:
        baseName = basename(fname)
        for line in gzip.open(fname):
            url, offset = line.rstrip("\n").split("\t")
            assert (offset.isdigit())
            if "\t" in url or "\n" in url:
                logging.info("tab or NL in url %s, skipping" % url)
                continue
            if url in doneUrls:
                logging.info("Already did %s" % url)
                continue
            baseName = baseName.replace(".index.gz", ".gz")
            row = [str(numId), baseName, url, offset]
            indexFile.write("\t".join(row))
            indexFile.write("\n")
            numId += 1
    indexFile.close()

    # split the index file into chunks, one per job
    chunkIds = pubStore.splitTabFileOnChunkId(indexFilename,
                                              tmpDir,
                                              chunkSize=chunkSize)
    idFname = pubGeneric.concatIdentifiers(outDir, tmpDir, "doneArticles.tab")
    # submit one conversion job per chunk
    submitConvertJobs(runner, inDir, updateId, chunkIds, tmpDir, idFname,
                      tmpDir)
    pubGeneric.concatDelIdFiles(tmpDir, outDir, "%d_ids.tab" % updateId)
    pubGeneric.concatDelLogs(tmpDir, outDir, "%d.log" % updateId)
    pubStore.moveFiles(tmpDir, outDir)
    shutil.rmtree(tmpDir)
    pubStore.appendToUpdatesTxt(outDir, updateId, numId, todoBasenames)
Example #3
0
def createChunksSubmitJobs(inDir, outDir, minId, runner, chunkSize):
    tmpDir = pubGeneric.makeTempDir("bingData", tmpDir=outDir)
    #maxCommon.delOnExit(tmpDir)

    maxCommon.mustExistDir(outDir)
    updateId, minId, alreadyDoneFiles = pubStore.parseUpdatesTab(outDir, minId)
    # get all .gz.index files, remove the already done files
    inFnames = glob.glob(join(inDir, "*.index.gz"))
    inBaseNames = set([basename(x) for x in inFnames])
    todoBasenames = inBaseNames - set(alreadyDoneFiles)
    todoFnames = [join(inDir, x) for x in todoBasenames]
    if len(todoFnames)==0:
        logging.info("All input files already converted")
        return

    indexFilename = join(outDir, "%d_index.tab" % updateId)
    indexFile = open(indexFilename, "w")
    headers = ["articleId", "tsvFile", "url", "offset"]
    indexFile.write("\t".join(headers))
    indexFile.write("\n")

    # read them and create a big index file:
    # with tsvname, url, offset
    numId = minId
    doneUrls = set()
    for fname in todoFnames:
        baseName = basename(fname)
        for line in gzip.open(fname):
            url, offset = line.rstrip("\n").split("\t")
            assert(offset.isdigit())
            if "\t" in url or "\n" in url:
                logging.info("tab or NL in url %s, skipping" % url)
                continue
            if url in doneUrls:
                logging.info("Already did %s" % url)
                continue
            baseName = baseName.replace(".index.gz", ".gz")
            row = [str(numId), baseName, url, offset]
            indexFile.write("\t".join(row))
            indexFile.write("\n")
            numId+=1
    indexFile.close()

    # split the index file into chunks, one per job
    chunkIds = pubStore.splitTabFileOnChunkId(indexFilename, tmpDir, chunkSize=chunkSize)
    idFname  = pubGeneric.concatIdentifiers(outDir, tmpDir, "doneArticles.tab")
    # submit one conversion job per chunk
    submitConvertJobs(runner, inDir, updateId, chunkIds, tmpDir, idFname, tmpDir)
    pubGeneric.concatDelIdFiles(tmpDir, outDir, "%d_ids.tab" % updateId)
    pubGeneric.concatDelLogs(tmpDir, outDir, "%d.log" % updateId)
    pubStore.moveFiles(tmpDir, outDir)
    shutil.rmtree(tmpDir)
    pubStore.appendToUpdatesTxt(outDir, updateId, numId, todoBasenames)
Example #4
0
def createChunksSubmitJobs(inDir, outDir, minId, runner, chunkSize):
    """ convert Consyn ZIP files from inDir to outDir 
        split files into chunks and submit chunks to cluster system
        write first to temporary dir, and copy over at end of all jobs
    """
    maxCommon.mustExistDir(outDir)

    updateId, minId, alreadyDoneFiles = pubStore.parseUpdatesTab(outDir, minId)
    if chunkSize == None:
        chunkSize = pubStore.guessChunkSize(outDir)
    assert (chunkSize != None)

    # build into temporary dir, fail if it exists
    # it should not exist, otherwise something is wrong
    finalOutDir = outDir
    #outDir     = tempfile.mktemp(dir = outDir, prefix = "elsevierUpdate%s.tmp." % str(updateId))
    buildDir = join(outDir, "build")
    os.mkdir(buildDir)

    inFiles = os.listdir(inDir)
    inFiles = [x for x in inFiles if x.endswith(".ZIP")]
    # keep order of input of input files for first run
    if len(alreadyDoneFiles) != 0:
        processFiles = set(inFiles).difference(alreadyDoneFiles)
    else:
        processFiles = inFiles

    if len(processFiles) == 0:
        logging.info("All updates done, not converting anything")
        os.rmdir(buildDir)
        return None

    indexFilename = join(buildDir, "%d_index.tab" % updateId)
    maxArticleId = createIndexFile(inDir, processFiles, indexFilename,
                                   updateId, minId, chunkSize)
    indexSplitDir = indexFilename + ".tmp.split"
    chunkIds = pubStore.splitTabFileOnChunkId(indexFilename, indexSplitDir)
    idFname = pubGeneric.concatIdentifiers(finalOutDir, indexSplitDir,
                                           "doneArticles.tab")

    submitJobs(runner, inDir, chunkIds, indexSplitDir, idFname, buildDir)

    pubGeneric.concatDelIdFiles(buildDir, finalOutDir, "%d_ids.tab" % updateId)
    pubGeneric.concatDelLogs(buildDir, finalOutDir, "%d.log" % updateId)

    if isdir(indexSplitDir):  # necessary? how could it not be there?
        logging.info("Deleting directory %s" % indexSplitDir)
        shutil.rmtree(indexSplitDir)  # got sometimes exception here...
    pubStore.moveFiles(buildDir, finalOutDir)
    shutil.rmtree(buildDir)

    pubStore.appendToUpdatesTxt(finalOutDir, updateId, maxArticleId,
                                processFiles)
Example #5
0
def createChunksSubmitJobs(inDir, outDir, minId, runner, chunkSize):
    """ convert Consyn ZIP files from inDir to outDir 
        split files into chunks and submit chunks to cluster system
        write first to temporary dir, and copy over at end of all jobs
    """
    maxCommon.mustExistDir(outDir)

    updateId, minId, alreadyDoneFiles = pubStore.parseUpdatesTab(outDir, minId)
    if chunkSize==None:
        chunkSize  = pubStore.guessChunkSize(outDir)
    assert(chunkSize!=None)

    # build into temporary dir, fail if it exists
    # it should not exist, otherwise something is wrong
    finalOutDir= outDir
    #outDir     = tempfile.mktemp(dir = outDir, prefix = "elsevierUpdate%s.tmp." % str(updateId))
    buildDir     = join(outDir, "build")
    os.mkdir(buildDir)

    inFiles = os.listdir(inDir)
    inFiles = [x for x in inFiles if x.endswith(".ZIP")]
    # keep order of input of input files for first run
    if len(alreadyDoneFiles)!=0:
        processFiles = set(inFiles).difference(alreadyDoneFiles)
    else:
        processFiles = inFiles

    if len(processFiles)==0:
        logging.info("All updates done, not converting anything")
        os.rmdir(buildDir)
        return None

    indexFilename = join(buildDir, "%d_index.tab" % updateId)
    maxArticleId  = createIndexFile(inDir, processFiles, indexFilename, updateId, minId, chunkSize)
    indexSplitDir = indexFilename+".tmp.split"
    chunkIds = pubStore.splitTabFileOnChunkId(indexFilename, indexSplitDir)
    idFname = pubGeneric.concatIdentifiers(finalOutDir, indexSplitDir, "doneArticles.tab")

    submitJobs(runner, inDir, chunkIds, indexSplitDir, idFname, buildDir)

    pubGeneric.concatDelIdFiles(buildDir, finalOutDir, "%d_ids.tab" % updateId)
    pubGeneric.concatDelLogs(buildDir, finalOutDir, "%d.log" % updateId)

    if isdir(indexSplitDir): # necessary? how could it not be there? 
        logging.info("Deleting directory %s" % indexSplitDir)
        shutil.rmtree(indexSplitDir) # got sometimes exception here...
    pubStore.moveFiles(buildDir, finalOutDir)
    shutil.rmtree(buildDir)

    pubStore.appendToUpdatesTxt(finalOutDir, updateId, maxArticleId, processFiles)
Example #6
0
def createChunksSubmitJobs(inDir, finalOutDir, runner, chunkSize):
    """ submit jobs to convert zip and disk files from inDir to outDir
        split files into chunks and submit chunks to cluster system
        write first to temporary dir, and copy over at end of all jobs
        This is based on pubConvElsevier.py
    """
    maxCommon.mustExistDir(finalOutDir)
    minId = pubConf.identifierStart["springer"]

    buildDir = pubGeneric.makeBuildDir(finalOutDir)

    updateId, minId, alreadyDoneFiles = pubStore.parseUpdatesTab(finalOutDir, minId)
    assert chunkSize != None

    # getting filenames from the disk
    diskDir = join(inDir, "disk")
    if int(updateId) == 0 and isdir(diskDir):
        inDiskFiles = parseDiskFnames(diskDir)
    else:
        logging.info("Not first update or no directory %s, not parsing files from springer disk" % diskDir)

    # getting filenames from the updates
    zipDir = join(inDir, "updates")
    inZipFiles = os.listdir(zipDir)
    inZipFiles = [x for x in inZipFiles if x.endswith(".zip")]
    logging.info("Found %d update zip files" % len(inZipFiles))
    # keep order of input files for first run

    if len(alreadyDoneFiles) == 0:
        processFiles = inDiskFiles + inZipFiles
    else:
        processFiles = set(inZipFiles).difference(alreadyDoneFiles)

    if len(processFiles) == 0:
        logging.info("All updates done, not converting anything")
        os.rmdir(buildDir)
        return None
    else:
        logging.info("Total number of files to convert: %d" % (len(processFiles)))

    indexFilename = join(buildDir, "%d_index.tab" % updateId)
    maxArticleId = createIndexFile(zipDir, processFiles, indexFilename, updateId, minId, chunkSize)

    indexSplitDir = join(buildDir, "indexFiles")
    pubStore.splitTabFileOnChunkId(indexFilename, indexSplitDir)

    idFname = concatDois(finalOutDir, buildDir, "doneArticles.tab")
    submitJobs(runner, zipDir, indexSplitDir, idFname, buildDir)

    finishUp(buildDir, finalOutDir)
Example #7
0
def createChunksSubmitJobs(inDir, finalOutDir, runner, chunkSize):
    """ submit jobs to convert zip and disk files from inDir to outDir
        split files into chunks and submit chunks to cluster system
        write first to temporary dir, and copy over at end of all jobs
        This is based on pubConvElsevier.py
    """
    maxCommon.mustExistDir(finalOutDir)
    minId = pubConf.identifierStart["springer"]

    buildDir = pubGeneric.makeBuildDir(finalOutDir)

    updateId, minId, alreadyDoneFiles = pubStore.parseUpdatesTab(finalOutDir, minId)
    assert(chunkSize!=None)

    # getting filenames from the disk
    diskDir = join(inDir, "disk")
    if int(updateId)==0 and isdir(diskDir):
        inDiskFiles = parseDiskFnames(diskDir)
    else:
        logging.info("Not first update or no directory %s, not parsing files from springer disk" % diskDir)

    # getting filenames from the updates
    zipDir = join(inDir, "updates")
    inZipFiles = os.listdir(zipDir)
    inZipFiles = [x for x in inZipFiles if x.endswith(".zip")]
    logging.info("Found %d update zip files" % len(inZipFiles))
    # keep order of input files for first run

    if len(alreadyDoneFiles)==0:
        processFiles = inDiskFiles+inZipFiles
    else:
        processFiles = set(inZipFiles).difference(alreadyDoneFiles)

    if len(processFiles)==0:
        logging.info("All updates done, not converting anything")
        os.rmdir(buildDir)
        return None
    else:
        logging.info("Total number of files to convert: %d" % (len(processFiles)))

    indexFilename = join(buildDir, "%d_index.tab" % updateId)
    maxArticleId  = createIndexFile(zipDir, processFiles, indexFilename, updateId, minId, chunkSize)

    indexSplitDir = join(buildDir, "indexFiles")
    pubStore.splitTabFileOnChunkId(indexFilename, indexSplitDir)

    idFname = concatDois(finalOutDir, buildDir, "doneArticles.tab")
    submitJobs(runner, zipDir, indexSplitDir, idFname, buildDir)

    finishUp(buildDir, finalOutDir)
Example #8
0
def createChunksSubmitJobs(inDir, outDir, minId, chunkCount, runner):
    """ convert Consyn ZIP files from inDir to outDir 
        split files into chunks and submit chunks to cluster system
    """
    maxCommon.mustExistDir(outDir)

    updateId, minId, alreadyDoneFiles = pubStore.parseUpdatesTab(outDir, minId)
    chunkSize  = pubStore.guessChunkSize(outDir)
    finalOutDir= outDir
    outDir     = tempfile.mktemp(dir = outDir, prefix = "temp.pubConvElsevier.update.")
    os.mkdir(outDir)
    chunkCount = None

    inFiles = os.listdir(inDir)
    inFiles = [x for x in inFiles if x.endswith(".ZIP")]
    # keep order of input of input files for first run
    if len(alreadyDoneFiles)!=0:
        processFiles = set(inFiles).difference(alreadyDoneFiles)
    else:
        processFiles = inFiles

    if len(processFiles)==0:
        logging.info("All updates done, not converting anything")
        return None

    indexFilename = join(outDir, "%d_index.tab" % updateId)
    maxArticleId  = createIndexFile(inDir, processFiles, indexFilename, updateId, minId, chunkCount, chunkSize)
    indexSplitDir = indexFilename+".tmp.split"
    pubStore.splitTabFileOnChunkId(indexFilename, indexSplitDir)
    submitJobs(runner, indexSplitDir, outDir)

    pubStore.moveFiles(outDir, finalOutDir)
    shutil.rmtree(outDir)
    if isdir(indexSplitDir): # how could it not be there? 
        logging.info("Deleting directory %s" % indexSplitDir)
        shutil.rmtree(indexSplitDir) # got sometimes exception here...
    pubStore.appendToUpdatesTxt(finalOutDir, updateId, maxArticleId, processFiles)