Ejemplo n.º 1
0
Archivo: faq.py Proyecto: VRDate/twss
def process(statement,database_name = DATABASE_NAME):
  ''' Allows us to create entities via statements like "There is a course CSCI4702 called Mobile Programming" 
      and modify entities with statements like "CSCI4702 has a start date of Jan 31st 2013"
      
      already encountering a statement like "There is a game engine Unity3d" gives us trouble
      seems like we need named entity recognition to be able to extract types like that ... or perhaps rely on capitalization
      which doesn't really work for things like CTO as a category of items, hmm
      
      >>> sent = "There is a game engine Unreal Engine".split()
      >>> print nltk.ne_chunk(nltk.pos_tag(sent))
      '''
  # this runs real fast, but it doesn't quite get the NN/NNP combination I hoped for from "There is a game engine Unity3D"
  # although it does now with light=True setting, but now it doesn't get the NNP in "There is a game engine Source"

  s = parse(statement, relations=True, lemmata=True, light=True) 
  s = split(s)

  #result = search('There be DT NN+ (DT) (RB) (JJ) NNP+ (call) (DT) (RB) (JJ) (NNPS|NNP)+', s)
  s, result = extract(statement)
  if result:
    #try:
      noun = search('(NN)+', s)[0].string
      table = pluralize(noun.replace(' ','_'))
      result = search('(JJ|NNPS|NNP)+', s) # this pulls in adjectives, but there's supposed to be a better fix coming
      ident = result[0].string
      name = result[1].string if len(result) > 1 else ident
      #raise Exception(table+"; "+ident+"; "+name)
      return newTable(table,ident,name,database_name)
    #except:
      #return regexMatch(statement,database_name)
  else:
    return regexMatch(statement,database_name)
Ejemplo n.º 2
0
def process(statement, database_name=DATABASE_NAME):
    ''' Allows us to create entities via statements like "There is a course CSCI4702 called Mobile Programming" 
      and modify entities with statements like "CSCI4702 has a start date of Jan 31st 2013"
      
      already encountering a statement like "There is a game engine Unity3d" gives us trouble
      seems like we need named entity recognition to be able to extract types like that ... or perhaps rely on capitalization
      which doesn't really work for things like CTO as a category of items, hmm
      
      >>> sent = "There is a game engine Unreal Engine".split()
      >>> print nltk.ne_chunk(nltk.pos_tag(sent))
      '''
    # this runs real fast, but it doesn't quite get the NN/NNP combination I hoped for from "There is a game engine Unity3D"
    # although it does now with light=True setting, but now it doesn't get the NNP in "There is a game engine Source"

    s = parse(statement, relations=True, lemmata=True, light=True)
    s = split(s)

    #result = search('There be DT NN+ (DT) (RB) (JJ) NNP+ (call) (DT) (RB) (JJ) (NNPS|NNP)+', s)
    s, result = extract(statement)
    if result:
        #try:
        noun = search('(NN)+', s)[0].string
        table = pluralize(noun.replace(' ', '_'))
        result = search(
            '(JJ|NNPS|NNP)+', s
        )  # this pulls in adjectives, but there's supposed to be a better fix coming
        ident = result[0].string
        name = result[1].string if len(result) > 1 else ident
        #raise Exception(table+"; "+ident+"; "+name)
        return newTable(table, ident, name, database_name)
    #except:
    #return regexMatch(statement,database_name)
    else:
        return regexMatch(statement, database_name)
    def _test_dna(self, s1, s2, expected_variants):
        s1_swig = util.swig_str(s1)
        s2_swig = util.swig_str(s2)
        extracted = extractor.extract(s1_swig[0], s1_swig[1],
                                      s2_swig[0], s2_swig[1], extractor.TYPE_DNA)

        assert len(extracted.variants) == len(expected_variants)

        for variant, expected_variant in zip(extracted.variants, expected_variants):
            for attribute, expected_value in expected_variant.items():
                assert getattr(variant, attribute) == expected_value
Ejemplo n.º 4
0
def _create_document(url):

    # Read raw html
    try:
        json_object = extractor.extract(url)
    except urllib2.URLError as err:
        log.error('urllib2 URL[%s] error: %s', url, err)
        abort(400)  # bad request
    except urllib2.HTTPError as err:
        log.error('urllib2 HTTP error: %s', err)
        abort(error.code)

    return json_object
Ejemplo n.º 5
0
def _create_document(url):

    # Read raw html
    try:
        json_object = extractor.extract(url)
    except urllib2.URLError as err:
        log.error('urllib2 URL[%s] error: %s', url, err)
        abort(400) # bad request
    except urllib2.HTTPError as err:
        log.error('urllib2 HTTP error: %s', err)
        abort(error.code)

    return json_object
Ejemplo n.º 6
0
def main(inputDirectory, inputName, inputCategory, inputHash):

    status = int(1)  # 1 = failed | 0 = success
    root = int(0)
    video = int(0)
    video2 = int(0)
    foundFile = int(0)
    deleteOriginal = int(0)
    numCompressed = int(0)
    extractionSuccess = False

    Logger.debug("MAIN: Received Directory: %s | Name: %s | Category: %s", inputDirectory, inputName, inputCategory)

    inputDirectory, inputName, inputCategory, root = category_search(inputDirectory, inputName, inputCategory, root, categories)  # Confirm the category by parsing directory structure

    for category in categories:
        if category == inputCategory:
            outputDestination = os.path.normpath(os.path.join(outputDirectory, category, safeName(inputName)))
            Logger.info("MAIN: Output directory set to: %s", outputDestination)
            break
        else:
            continue

    Logger.debug("MAIN: Scanning files in directory: %s", inputDirectory)      

    now = datetime.datetime.now()
    for dirpath, dirnames, filenames in os.walk(inputDirectory):
        for file in filenames:

            filePath = os.path.join(dirpath, file)
            fileName, fileExtension = os.path.splitext(file)
            targetDirectory = os.path.join(outputDestination, file)

            if root == 1:
                if not foundFile: 
                    Logger.debug("MAIN: Looking for %s in: %s", inputName, file)
                if (safeName(inputName) in safeName(file)) or (safeName(os.path.splitext(file)[0]) in safeName(inputName)) and foundFile == 0:
                    pass  # This file does match the Torrent name
                    foundFile = 1
                    Logger.debug("MAIN: Found file %s that matches Torrent Name %s", file, inputName)
                else:
                    continue  # This file does not match the Torrent name, skip it

            if root == 2:
                Logger.debug("MAIN: Looking for files with modified/created dates less than 5 minutes old.")
                mtime_lapse = now - datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(dirpath, file)))
                ctime_lapse = now - datetime.datetime.fromtimestamp(os.path.getctime(os.path.join(dirpath, file)))
                if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)) and foundFile == 0:
                    pass  # This file does match the date time criteria
                    foundFile = 1
                    Logger.debug("MAIN: Found file %s with date modifed/created less than 5 minutes ago.", file)
                else:
                    continue  # This file has not been recently moved or created, skip it

            if not (inputCategory == cpsCategory or inputCategory == sbCategory): #process all for non-video categories.
                Logger.info("MAIN: Found file %s for category %s", filepath, inputCategory)
                copy_link(filePath, targetDirectory, useLink, outputDestination)
            elif fileExtension in mediaContainer:  # If the file is a video file
                if is_sample(filePath, inputName, minSampleSize):  # Ignore samples
                    Logger.info("MAIN: Ignoring sample file: %s  ", filePath)
                    continue
                else:
                    video = video + 1
                    Logger.info("MAIN: Found video file %s in %s", fileExtension, filePath)
                    try:
                        copy_link(filePath, targetDirectory, useLink, outputDestination)
                    except Exception as e:
                        Logger.error("MAIN: Failed to link file: %s", file)
                        Logger.debug(e)
            elif fileExtension in metaContainer:
                Logger.info("MAIN: Found metadata file %s for file %s", fileExtension, filePath)
                try:
                    copy_link(filePath, targetDirectory, useLink, outputDestination)
                except Exception as e:
                    Logger.error("MAIN: Failed to link file: %s", file)
                    Logger.debug(e)
            elif fileExtension in compressedContainer:
                numCompressed = numCompressed + 1
                if re.search(r'\d+', os.path.splitext(fileName)[1]) and numCompressed > 1: # find part numbers in second "extension" from right, if we have more than 1 compressed file.
                    part = int(re.search(r'\d+', os.path.splitext(fileName)[1]).group())
                    if part == 1: # we only want to extract the primary part.
                        Logger.debug("MAIN: Found primary part of a multi-part archive %s. Extracting", file)                       
                    else:
                        Logger.debug("MAIN: Found part %s of a multi-part archive %s. Ignoring", part, file)
                        continue
                Logger.info("MAIN: Found compressed archive %s for file %s", fileExtension, filePath)
                try:
                    extractor.extract(filePath, outputDestination)
                    extractionSuccess = True # we use this variable to determine if we need to pause a torrent or not in uTorrent (dont need to pause archived content)
                except Exception as e:
                    Logger.warn("MAIN: Extraction failed for: %s", file)
                    Logger.debug(e)
            else:
                Logger.debug("MAIN: Ignoring unknown filetype %s for file %s", fileExtension, filePath)
                continue
    flatten(outputDestination)

    # Now check if movie files exist in destination:
    for dirpath, dirnames, filenames in os.walk(outputDestination):
        for file in filenames:
            filePath = os.path.join(dirpath, file)
            fileExtension = os.path.splitext(file)[1]
            if fileExtension in mediaContainer:  # If the file is a video file
                if is_sample(filePath, inputName, minSampleSize):
                    Logger.debug("MAIN: Removing sample file: %s", filePath)
                    os.unlink(filePath)  # remove samples
                else:
                    video2 = video2 + 1
    if video2 >= video and video2 > 0:  # Check that all video files were moved
        status = 0

    # Hardlink solution for uTorrent, need to implent support for deluge, transmission
    if clientAgent == 'utorrent' and extractionSuccess == False and inputHash:
        try:
            Logger.debug("MAIN: Connecting to uTorrent: %s", uTorrentWEBui)
            utorrentClass = UTorrentClient(uTorrentWEBui, uTorrentUSR, uTorrentPWD)
        except Exception as e:
            Logger.error("MAIN: Failed to connect to uTorrent: %s", e)

        # if we are using links with uTorrent it means we need to pause it in order to access the files
        if useLink == 1:
            Logger.debug("MAIN: Stoping torrent %s in uTorrent while processing", inputName)
            utorrentClass.stop(inputHash)
            time.sleep(5)  # Give uTorrent some time to catch up with the change

        # Delete torrent and torrentdata from uTorrent
        if deleteOriginal == 1:
            Logger.debug("MAIN: Deleting torrent %s from uTorrent", inputName)
            utorrentClass.removedata(inputHash)
            utorrentClass.remove(inputHash)
            time.sleep(5)

    processCategories = {cpsCategory, sbCategory, hpCategory, mlCategory, gzCategory}

    if inputCategory and not (inputCategory in processCategories): # no extra processign to be done... yet.
        Logger.info("MAIN: No further processing to be done for category %s.", inputCategory)
        result = 1
    elif status == 0:
        Logger.debug("MAIN: Calling autoProcess script for successful download.")
    else:
        Logger.error("MAIN: Something failed! Please check logs. Exiting")
        sys.exit(-1)

    if inputCategory == cpsCategory:
        Logger.info("MAIN: Calling CouchPotatoServer to post-process: %s", inputName)
        result = autoProcessMovie.process(outputDestination, inputName, status)
    elif inputCategory == sbCategory:
        Logger.info("MAIN: Calling Sick-Beard to post-process: %s", inputName)
        result = autoProcessTV.processEpisode(outputDestination, inputName, status)
    elif inputCategory == hpCategory:
        Logger.info("MAIN: Calling HeadPhones to post-process: %s", inputName)
        result = autoProcessMusic.process(outputDestination, inputName, status)
    elif inputCategory == mlCategory:
        Logger.info("MAIN: Calling Mylar to post-process: %s", inputName)
        result = autoProcessComics.processEpisode(outputDestination, inputName, status)
    elif inputCategory == gzCategory:
        Logger.info("MAIN: Calling Gamez to post-process: %s", inputName)
        result = autoProcessGames.process(outputDestination, inputName, status)

    if result == 1:
        Logger.info("MAIN: A problem was reported in the autoProcess* script. If torrent was pasued we will resume seeding")

    # Hardlink solution for uTorrent, need to implent support for deluge, transmission
    if clientAgent == 'utorrent' and extractionSuccess == False and inputHash and useLink == 1 and deleteOriginal == 0: # we always want to resume seeding, for now manually find out what is wrong when extraction fails
        Logger.debug("MAIN: Starting torrent %s in uTorrent", inputName)
        utorrentClass.start(inputHash)

    Logger.info("MAIN: All done.")
Ejemplo n.º 7
0
def main(inputDirectory, inputName, inputCategory, inputHash, inputID):

    status = int(1)  # 1 = failed | 0 = success
    root = int(0)
    video = int(0)
    video2 = int(0)
    foundFile = int(0)
    extracted_folder = []
    extractionSuccess = False
    copy_list = []

    Logger.debug("MAIN: Received Directory: %s | Name: %s | Category: %s",
                 inputDirectory, inputName, inputCategory)
    if inputCategory in sbCategory and sbFork in SICKBEARD_TORRENT:
        Logger.info("MAIN: Calling SickBeard's %s branch to post-process: %s",
                    sbFork, inputName)
        result = autoProcessTV.processEpisode(inputDirectory, inputName,
                                              int(0))
        if result == 1:
            Logger.info(
                "MAIN: A problem was reported in the autoProcess* script. If torrent was pasued we will resume seeding"
            )
        Logger.info("MAIN: All done.")
        sys.exit()

    inputDirectory, inputName, inputCategory, root = category_search(
        inputDirectory, inputName, inputCategory, root,
        categories)  # Confirm the category by parsing directory structure

    outputDestination = ""
    for category in categories:
        if category == inputCategory:
            if os.path.basename(inputDirectory) == inputName:
                Logger.info("MAIN: Download is a directory")
                outputDestination = os.path.normpath(
                    os.path.join(outputDirectory, category,
                                 safeName(inputName)))
            else:
                Logger.info("MAIN: Download is not a directory")
                outputDestination = os.path.normpath(
                    os.path.join(outputDirectory, category,
                                 os.path.splitext(safeName(inputName))[0]))
            Logger.info("MAIN: Output directory set to: %s", outputDestination)
            break
        else:
            continue
    if outputDestination == "":
        if inputCategory == "":
            inputCategory = "UNCAT"
        if os.path.basename(inputDirectory) == inputName:
            Logger.info("MAIN: Download is a directory")
            outputDestination = os.path.normpath(
                os.path.join(outputDirectory, inputCategory,
                             safeName(inputName)))
        else:
            Logger.info("MAIN: Download is not a directory")
            outputDestination = os.path.normpath(
                os.path.join(outputDirectory, inputCategory,
                             os.path.splitext(safeName(inputName))[0]))
        Logger.info("MAIN: Output directory set to: %s", outputDestination)

    processOnly = cpsCategory + sbCategory + hpCategory + mlCategory + gzCategory
    if not "NONE" in user_script_categories:  # if None, we only process the 5 listed.
        if "ALL" in user_script_categories:  # All defined categories
            processOnly = categories
        processOnly.extend(
            user_script_categories
        )  # Adds all categories to be processed by userscript.

    if not inputCategory in processOnly:
        Logger.info("MAIN: No processing to be done for category: %s. Exiting",
                    inputCategory)
        Logger.info("MAIN: All done.")
        sys.exit()

    # Hardlink solution for uTorrent, need to implent support for deluge, transmission
    if clientAgent in ['utorrent', 'transmission'] and inputHash:
        if clientAgent == 'utorrent':
            try:
                Logger.debug("MAIN: Connecting to %s: %s", clientAgent,
                             uTorrentWEBui)
                utorrentClass = UTorrentClient(uTorrentWEBui, uTorrentUSR,
                                               uTorrentPWD)
            except:
                Logger.exception("MAIN: Failed to connect to uTorrent")
                utorrentClass = ""
        if clientAgent == 'transmission':
            try:
                Logger.debug("MAIN: Connecting to %s: http://%s:%s",
                             clientAgent, TransmissionHost, TransmissionPort)
                TransmissionClass = TransmissionClient(TransmissionHost,
                                                       TransmissionPort,
                                                       TransmissionUSR,
                                                       TransmissionPWD)
            except:
                Logger.exception("MAIN: Failed to connect to Transmission")
                TransmissionClass = ""

        # if we are using links with uTorrent it means we need to pause it in order to access the files
        Logger.debug("MAIN: Stoping torrent %s in %s while processing",
                     inputName, clientAgent)
        if clientAgent == 'utorrent' and utorrentClass != "":
            utorrentClass.stop(inputHash)
        if clientAgent == 'transmission' and TransmissionClass != "":
            TransmissionClass.stop_torrent(inputID)
        time.sleep(
            5)  # Give Torrent client some time to catch up with the change

    Logger.debug("MAIN: Scanning files in directory: %s", inputDirectory)

    now = datetime.datetime.now()
    for dirpath, dirnames, filenames in os.walk(inputDirectory):
        for file in filenames:

            filePath = os.path.join(dirpath, file)
            fileName, fileExtension = os.path.splitext(file)
            targetDirectory = os.path.join(outputDestination, file)

            if root == 1:
                if foundFile == int(0):
                    Logger.debug("MAIN: Looking for %s in: %s", inputName,
                                 file)
                if (safeName(inputName)
                        in safeName(file)) or (safeName(fileName)
                                               in safeName(inputName)):
                    #pass  # This file does match the Torrent name
                    foundFile = 1
                    Logger.debug(
                        "MAIN: Found file %s that matches Torrent Name %s",
                        file, inputName)
                else:
                    continue  # This file does not match the Torrent name, skip it

            if root == 2:
                Logger.debug(
                    "MAIN: Looking for files with modified/created dates less than 5 minutes old."
                )
                mtime_lapse = now - datetime.datetime.fromtimestamp(
                    os.path.getmtime(os.path.join(dirpath, file)))
                ctime_lapse = now - datetime.datetime.fromtimestamp(
                    os.path.getctime(os.path.join(dirpath, file)))
                if (mtime_lapse < datetime.timedelta(minutes=5)) or (
                        ctime_lapse < datetime.timedelta(minutes=5)):
                    #pass  # This file does match the date time criteria
                    foundFile = 1
                    Logger.debug(
                        "MAIN: Found file %s with date modifed/created less than 5 minutes ago.",
                        file)
                else:
                    continue  # This file has not been recently moved or created, skip it

            if fileExtension in mediaContainer:  # If the file is a video file
                if is_sample(
                        filePath, inputName, minSampleSize
                ) and not inputCategory in hpCategory:  # Ignore samples
                    Logger.info("MAIN: Ignoring sample file: %s  ", filePath)
                    continue
                else:
                    video = video + 1
                    Logger.info("MAIN: Found video file %s in %s",
                                fileExtension, filePath)
                    try:
                        copy_link(filePath, targetDirectory, useLink,
                                  outputDestination)
                        copy_list.append(
                            [filePath,
                             os.path.join(outputDestination, file)])
                    except:
                        Logger.exception("MAIN: Failed to link file: %s", file)
            elif fileExtension in metaContainer:
                Logger.info("MAIN: Found metadata file %s for file %s",
                            fileExtension, filePath)
                try:
                    copy_link(filePath, targetDirectory, useLink,
                              outputDestination)
                    copy_list.append(
                        [filePath,
                         os.path.join(outputDestination, file)])
                except:
                    Logger.exception("MAIN: Failed to link file: %s", file)
                continue
            elif fileExtension in compressedContainer:
                if inputCategory in hpCategory:  # We need to link all files for HP in order to move these back to support seeding.
                    Logger.info(
                        "MAIN: Linking compressed archive file %s for file %s",
                        fileExtension, filePath)
                    try:
                        copy_link(filePath, targetDirectory, useLink,
                                  outputDestination)
                        copy_list.append(
                            [filePath,
                             os.path.join(outputDestination, file)])
                    except:
                        Logger.exception("MAIN: Failed to link file: %s", file)
                # find part numbers in second "extension" from right, if we have more than 1 compressed file in the same directory.
                if re.search(
                        r'\d+',
                        os.path.splitext(fileName)[1]
                ) and os.path.dirname(filePath) in extracted_folder and not (
                        os.path.splitext(fileName)[1] in ['.720p', '.1080p']):
                    part = int(
                        re.search(r'\d+',
                                  os.path.splitext(fileName)[1]).group())
                    if part == 1:  # we only want to extract the primary part.
                        Logger.debug(
                            "MAIN: Found primary part of a multi-part archive %s. Extracting",
                            file)
                    else:
                        Logger.debug(
                            "MAIN: Found part %s of a multi-part archive %s. Ignoring",
                            part, file)
                        continue
                Logger.info("MAIN: Found compressed archive %s for file %s",
                            fileExtension, filePath)
                try:
                    if inputCategory in hpCategory:  # HP needs to scan the same dir as passed to downloader.
                        extractor.extract(filePath, inputDirectory)
                    else:
                        extractor.extract(filePath, outputDestination)
                    extractionSuccess = True  # we use this variable to determine if we need to pause a torrent or not in uTorrent (don't need to pause archived content)
                    extracted_folder.append(os.path.dirname(filePath))
                except:
                    Logger.exception("MAIN: Extraction failed for: %s", file)
                continue
            elif not inputCategory in cpsCategory + sbCategory:  #process all for non-video categories.
                Logger.info("MAIN: Found file %s for category %s", filePath,
                            inputCategory)
                copy_link(filePath, targetDirectory, useLink,
                          outputDestination)
                copy_list.append(
                    [filePath, os.path.join(outputDestination, file)])
                continue
            else:
                Logger.debug("MAIN: Ignoring unknown filetype %s for file %s",
                             fileExtension, filePath)
                continue
    if not inputCategory in hpCategory:  #don't flatten hp in case multi cd albums, and we need to copy this back later.
        flatten(outputDestination)

    # Now check if movie files exist in destination:
    if inputCategory in cpsCategory + sbCategory:
        for dirpath, dirnames, filenames in os.walk(outputDestination):
            for file in filenames:
                filePath = os.path.join(dirpath, file)
                fileName, fileExtension = os.path.splitext(file)
                if fileExtension in mediaContainer:  # If the file is a video file
                    if is_sample(filePath, inputName, minSampleSize):
                        Logger.debug("MAIN: Removing sample file: %s",
                                     filePath)
                        os.unlink(filePath)  # remove samples
                    else:
                        Logger.debug("MAIN: Found media file: %s", filePath)
                        video2 = video2 + 1
                else:
                    Logger.debug("MAIN: File %s is not a media file", filePath)
        if video2 >= video and video2 > int(
                0):  # Check that all video files were moved
            Logger.debug("MAIN: Found %s media files", str(video2))
            status = int(0)
        else:
            Logger.debug(
                "MAIN: Found %s media files in output. %s were found in input",
                str(video2), str(video))

    processCategories = cpsCategory + sbCategory + hpCategory + mlCategory + gzCategory

    if (inputCategory in user_script_categories
            and not "NONE" in user_script_categories) or (
                "ALL" in user_script_categories
                and not inputCategory in processCategories):
        Logger.info("MAIN: Processing user script %s.", user_script)
        result = external_script(outputDestination)
    elif status == int(0) or (
            inputCategory in hpCategory + mlCategory +
            gzCategory):  # if movies linked/extracted or for other categories.
        Logger.debug(
            "MAIN: Calling autoProcess script for successful download.")
        status = int(0)  # hp, my, gz don't support failed.
    else:
        Logger.error("MAIN: Something failed! Please check logs. Exiting")
        sys.exit(-1)

    if inputCategory in cpsCategory:
        Logger.info("MAIN: Calling CouchPotatoServer to post-process: %s",
                    inputName)
        download_id = inputHash
        result = autoProcessMovie.process(outputDestination, inputName, status,
                                          clientAgent, download_id,
                                          inputCategory)
    elif inputCategory in sbCategory:
        Logger.info("MAIN: Calling Sick-Beard to post-process: %s", inputName)
        result = autoProcessTV.processEpisode(outputDestination, inputName,
                                              status, inputCategory)
    elif inputCategory in hpCategory:
        Logger.info("MAIN: Calling HeadPhones to post-process: %s", inputName)
        result = autoProcessMusic.process(inputDirectory, inputName, status,
                                          inputCategory)
    elif inputCategory in mlCategory:
        Logger.info("MAIN: Calling Mylar to post-process: %s", inputName)
        result = autoProcessComics.processEpisode(outputDestination, inputName,
                                                  status, inputCategory)
    elif inputCategory in gzCategory:
        Logger.info("MAIN: Calling Gamez to post-process: %s", inputName)
        result = autoProcessGames.process(outputDestination, inputName, status,
                                          inputCategory)

    if result == 1:
        Logger.info(
            "MAIN: A problem was reported in the autoProcess* script. If torrent was paused we will resume seeding"
        )

    if inputCategory in hpCategory:
        # we need to move the output dir files back...
        Logger.debug(
            "MAIN: Moving temporary HeadPhones files back to allow seeding.")
        for item in copy_list:
            if os.path.isfile(os.path.normpath(
                    item[1])):  # check to ensure temp files still exist.
                if os.path.isfile(os.path.normpath(
                        item[0])):  # both exist, remove temp version
                    Logger.debug(
                        "MAIN: File %s still present. Removing tempoary file %s",
                        str(item[0]), str(item[1]))
                    os.unlink(os.path.normpath(item[1]))
                    continue
                else:  # move temp version back to allow seeding or Torrent removal.
                    Logger.debug("MAIN: Moving %s to %s", str(item[1]),
                                 str(item[0]))
                    shutil.move(os.path.normpath(item[1]),
                                os.path.normpath(item[0]))
                    continue

    # Hardlink solution for uTorrent, need to implent support for deluge, transmission
    if clientAgent in ['utorrent', 'transmission'] and inputHash:
        # Delete torrent and torrentdata from Torrent client if processing was successful.
        if deleteOriginal == 1 and result != 1:
            Logger.debug("MAIN: Deleting torrent %s from %s", inputName,
                         clientAgent)
            if clientAgent == 'utorrent' and utorrentClass != "":
                utorrentClass.removedata(inputHash)
                if not inputCategory in hpCategory:
                    utorrentClass.remove(inputHash)
            if clientAgent == 'transmission' and TransmissionClass != "":
                if inputCategory in hpCategory:  #don't delete actual files for hp category, just remove torrent.
                    TransmissionClass.remove_torrent(inputID, False)
                else:
                    TransmissionClass.remove_torrent(inputID, True)
        # we always want to resume seeding, for now manually find out what is wrong when extraction fails
        else:
            Logger.debug("MAIN: Starting torrent %s in %s", inputName,
                         clientAgent)
            if clientAgent == 'utorrent' and utorrentClass != "":
                utorrentClass.start(inputHash)
            if clientAgent == 'transmission' and TransmissionClass != "":
                TransmissionClass.start_torrent(inputID)
        time.sleep(5)
    #cleanup
    if inputCategory in processCategories and result == 0 and os.path.isdir(
            outputDestination):
        num_files_new = int(0)
        file_list = []
        for dirpath, dirnames, filenames in os.walk(outputDestination):
            for file in filenames:
                filePath = os.path.join(dirpath, file)
                fileName, fileExtension = os.path.splitext(file)
                if fileExtension in mediaContainer or fileExtension in metaContainer:
                    num_files_new = num_files_new + 1
                    file_list.append(file)
        if num_files_new == int(0):
            Logger.info(
                "All files have been processed. Cleaning outputDirectory %s",
                outputDestination)
            shutil.rmtree(outputDestination)
        else:
            Logger.info(
                "outputDirectory %s still contains %s media and/or meta files. This directory will not be removed.",
                outputDestination, num_files_new)
            for item in file_list:
                Logger.debug("media/meta file found: %s", item)
    Logger.info("MAIN: All done.")
Ejemplo n.º 8
0
def main(inputDirectory, inputName, inputCategory, inputHash, inputID):

    status = int(1)  # 1 = failed | 0 = success
    root = int(0)
    video = int(0)
    video2 = int(0)
    foundFile = int(0)
    numCompressed = int(0)
    extractionSuccess = False

    Logger.debug("MAIN: Received Directory: %s | Name: %s | Category: %s", inputDirectory, inputName, inputCategory)

    inputDirectory, inputName, inputCategory, root = category_search(inputDirectory, inputName, inputCategory, root, categories)  # Confirm the category by parsing directory structure

    for category in categories:
        if category == inputCategory:
            outputDestination = os.path.normpath(os.path.join(outputDirectory, category, safeName(inputName)))
            Logger.info("MAIN: Output directory set to: %s", outputDestination)
            break
        else:
            continue

    Logger.debug("MAIN: Scanning files in directory: %s", inputDirectory)      

    now = datetime.datetime.now()
    for dirpath, dirnames, filenames in os.walk(inputDirectory):
        for file in filenames:

            filePath = os.path.join(dirpath, file)
            fileName, fileExtension = os.path.splitext(file)
            targetDirectory = os.path.join(outputDestination, file)

            if root == 1:
                if not foundFile: 
                    Logger.debug("MAIN: Looking for %s in: %s", inputName, file)
                if (safeName(inputName) in safeName(file)) or (safeName(os.path.splitext(file)[0]) in safeName(inputName)) and foundFile == 0:
                    pass  # This file does match the Torrent name
                    foundFile = 1
                    Logger.debug("MAIN: Found file %s that matches Torrent Name %s", file, inputName)
                else:
                    continue  # This file does not match the Torrent name, skip it

            if root == 2:
                Logger.debug("MAIN: Looking for files with modified/created dates less than 5 minutes old.")
                mtime_lapse = now - datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(dirpath, file)))
                ctime_lapse = now - datetime.datetime.fromtimestamp(os.path.getctime(os.path.join(dirpath, file)))
                if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)) and foundFile == 0:
                    pass  # This file does match the date time criteria
                    foundFile = 1
                    Logger.debug("MAIN: Found file %s with date modifed/created less than 5 minutes ago.", file)
                else:
                    continue  # This file has not been recently moved or created, skip it

            if not (inputCategory == cpsCategory or inputCategory == sbCategory): #process all for non-video categories.
                Logger.info("MAIN: Found file %s for category %s", filePath, inputCategory)
                copy_link(filePath, targetDirectory, useLink, outputDestination)
            elif fileExtension in mediaContainer:  # If the file is a video file
                if is_sample(filePath, inputName, minSampleSize):  # Ignore samples
                    Logger.info("MAIN: Ignoring sample file: %s  ", filePath)
                    continue
                else:
                    video = video + 1
                    Logger.info("MAIN: Found video file %s in %s", fileExtension, filePath)
                    try:
                        copy_link(filePath, targetDirectory, useLink, outputDestination)
                    except:
                        Logger.exception("MAIN: Failed to link file: %s", file)
            elif fileExtension in metaContainer:
                Logger.info("MAIN: Found metadata file %s for file %s", fileExtension, filePath)
                try:
                    copy_link(filePath, targetDirectory, useLink, outputDestination)
                except:
                    Logger.exception("MAIN: Failed to link file: %s", file)
            elif fileExtension in compressedContainer:
                numCompressed = numCompressed + 1
                if re.search(r'\d+', os.path.splitext(fileName)[1]) and numCompressed > 1: # find part numbers in second "extension" from right, if we have more than 1 compressed file.
                    part = int(re.search(r'\d+', os.path.splitext(fileName)[1]).group())
                    if part == 1: # we only want to extract the primary part.
                        Logger.debug("MAIN: Found primary part of a multi-part archive %s. Extracting", file)                       
                    else:
                        Logger.debug("MAIN: Found part %s of a multi-part archive %s. Ignoring", part, file)
                        continue
                Logger.info("MAIN: Found compressed archive %s for file %s", fileExtension, filePath)
                try:
                    extractor.extract(filePath, outputDestination)
                    extractionSuccess = True # we use this variable to determine if we need to pause a torrent or not in uTorrent (dont need to pause archived content)
                except:
                    Logger.exception("MAIN: Extraction failed for: %s", file)
            else:
                Logger.debug("MAIN: Ignoring unknown filetype %s for file %s", fileExtension, filePath)
                continue
    flatten(outputDestination)

    # Now check if movie files exist in destination:
    for dirpath, dirnames, filenames in os.walk(outputDestination):
        for file in filenames:
            filePath = os.path.join(dirpath, file)
            fileExtension = os.path.splitext(file)[1]
            if fileExtension in mediaContainer:  # If the file is a video file
                if is_sample(filePath, inputName, minSampleSize):
                    Logger.debug("MAIN: Removing sample file: %s", filePath)
                    os.unlink(filePath)  # remove samples
                else:
                    video2 = video2 + 1
    if video2 >= video and video2 > 0:  # Check that all video files were moved
        status = 0

    # Hardlink solution for uTorrent, need to implent support for deluge, transmission
    if clientAgent == 'utorrent' and extractionSuccess == False and inputHash:
        try:
            Logger.debug("MAIN: Connecting to uTorrent: %s", uTorrentWEBui)
            utorrentClass = UTorrentClient(uTorrentWEBui, uTorrentUSR, uTorrentPWD)
        except:
            Logger.exception("MAIN: Failed to connect to uTorrent")

        # if we are using links with uTorrent it means we need to pause it in order to access the files
        if useLink != "no":
            Logger.debug("MAIN: Stoping torrent %s in uTorrent while processing", inputName)
            utorrentClass.stop(inputHash)
            time.sleep(5)  # Give uTorrent some time to catch up with the change

        # Delete torrent and torrentdata from uTorrent
        if deleteOriginal == 1:
            Logger.debug("MAIN: Deleting torrent %s from uTorrent", inputName)
            utorrentClass.removedata(inputHash)
            utorrentClass.remove(inputHash)
            time.sleep(5)

    processCategories = Set([cpsCategory, sbCategory, hpCategory, mlCategory, gzCategory])

    if inputCategory and not (inputCategory in processCategories): # no extra processign to be done... yet.
        Logger.info("MAIN: No further processing to be done for category %s.", inputCategory)
        result = 1
    elif status == 0:
        Logger.debug("MAIN: Calling autoProcess script for successful download.")
    else:
        Logger.error("MAIN: Something failed! Please check logs. Exiting")
        sys.exit(-1)

    if inputCategory == cpsCategory:
        Logger.info("MAIN: Calling CouchPotatoServer to post-process: %s", inputName)
        if clientAgent == 'utorrent' and inputHash != '':
            download_id = 'uTorrent_' + inputHash
        elif clientAgent == 'transmission' and inputHash != '':
            download_id = 'Transmission_' + inputHash
        else:
            download_id = inputHash
        result = autoProcessMovie.process(outputDestination, inputName, status, clientAgent, download_id)
    elif inputCategory == sbCategory:
        Logger.info("MAIN: Calling Sick-Beard to post-process: %s", inputName)
        result = autoProcessTV.processEpisode(outputDestination, inputName, status)
    elif inputCategory == hpCategory:
        Logger.info("MAIN: Calling HeadPhones to post-process: %s", inputName)
        result = autoProcessMusic.process(outputDestination, inputName, status)
    elif inputCategory == mlCategory:
        Logger.info("MAIN: Calling Mylar to post-process: %s", inputName)
        result = autoProcessComics.processEpisode(outputDestination, inputName, status)
    elif inputCategory == gzCategory:
        Logger.info("MAIN: Calling Gamez to post-process: %s", inputName)
        result = autoProcessGames.process(outputDestination, inputName, status)

    if result == 1:
        Logger.info("MAIN: A problem was reported in the autoProcess* script. If torrent was pasued we will resume seeding")

    # Hardlink solution for uTorrent, need to implent support for deluge, transmission
    if clientAgent == 'utorrent' and extractionSuccess == False and inputHash and useLink != "no" and deleteOriginal == 0: # we always want to resume seeding, for now manually find out what is wrong when extraction fails
        Logger.debug("MAIN: Starting torrent %s in uTorrent", inputName)
        utorrentClass.start(inputHash)

    Logger.info("MAIN: All done.")
Ejemplo n.º 9
0
def main(inputDirectory, inputName, inputCategory, inputHash, inputID):

    status = int(1)  # 1 = failed | 0 = success
    root = int(0)
    video = int(0)
    video2 = int(0)
    foundFile = int(0)
    extracted_folder = []
    extractionSuccess = False
    copy_list = []

    Logger.debug("MAIN: Received Directory: %s | Name: %s | Category: %s", inputDirectory, inputName, inputCategory)
    if  inputCategory in sbCategory and sbFork in SICKBEARD_TORRENT:
        Logger.info("MAIN: Calling SickBeard's %s branch to post-process: %s",sbFork ,inputName)
        result = autoProcessTV.processEpisode(inputDirectory, inputName, int(0))
        if result == 1:
            Logger.info("MAIN: A problem was reported in the autoProcess* script. If torrent was pasued we will resume seeding")
        Logger.info("MAIN: All done.")
        sys.exit()

    inputDirectory, inputName, inputCategory, root = category_search(inputDirectory, inputName, inputCategory, root, categories)  # Confirm the category by parsing directory structure

    outputDestination = ""
    for category in categories:
        if category == inputCategory:
            if os.path.basename(inputDirectory) == inputName:
                Logger.info("MAIN: Download is a directory")
                outputDestination = os.path.normpath(os.path.join(outputDirectory, category, safeName(inputName)))
            else:
                Logger.info("MAIN: Download is not a directory")
                outputDestination = os.path.normpath(os.path.join(outputDirectory, category, os.path.splitext(safeName(inputName))[0]))
            Logger.info("MAIN: Output directory set to: %s", outputDestination)
            break
        else:
            continue
    if outputDestination == "":
        if inputCategory == "":
            inputCategory = "UNCAT" 
        if os.path.basename(inputDirectory) == inputName:
            Logger.info("MAIN: Download is a directory")
            outputDestination = os.path.normpath(os.path.join(outputDirectory, inputCategory, safeName(inputName)))
        else:
            Logger.info("MAIN: Download is not a directory")
            outputDestination = os.path.normpath(os.path.join(outputDirectory, inputCategory, os.path.splitext(safeName(inputName))[0]))
        Logger.info("MAIN: Output directory set to: %s", outputDestination)

    processOnly = cpsCategory + sbCategory + hpCategory + mlCategory + gzCategory
    if not "NONE" in user_script_categories: # if None, we only process the 5 listed.
        if "ALL" in user_script_categories: # All defined categories
            processOnly = categories
        processOnly.extend(user_script_categories) # Adds all categories to be processed by userscript.

    if not inputCategory in processOnly:
        Logger.info("MAIN: No processing to be done for category: %s. Exiting", inputCategory) 
        Logger.info("MAIN: All done.")
        sys.exit()

    # Hardlink solution for uTorrent, need to implent support for deluge, transmission
    if clientAgent in ['utorrent', 'transmission'] and inputHash:
        if clientAgent == 'utorrent':
            try:
                Logger.debug("MAIN: Connecting to %s: %s", clientAgent, uTorrentWEBui)
                utorrentClass = UTorrentClient(uTorrentWEBui, uTorrentUSR, uTorrentPWD)
            except:
                Logger.exception("MAIN: Failed to connect to uTorrent")
                utorrentClass = ""
        if clientAgent == 'transmission':
            try:
                Logger.debug("MAIN: Connecting to %s: http://%s:%s", clientAgent, TransmissionHost, TransmissionPort)
                TransmissionClass = TransmissionClient(TransmissionHost, TransmissionPort, TransmissionUSR, TransmissionPWD)
            except:
                Logger.exception("MAIN: Failed to connect to Transmission")
                TransmissionClass = ""

        # if we are using links with uTorrent it means we need to pause it in order to access the files
        Logger.debug("MAIN: Stoping torrent %s in %s while processing", inputName, clientAgent)
        if clientAgent == 'utorrent' and utorrentClass != "":            
            utorrentClass.stop(inputHash)
        if clientAgent == 'transmission' and TransmissionClass !="":
            TransmissionClass.stop_torrent(inputID)
        time.sleep(5)  # Give Torrent client some time to catch up with the change      

    Logger.debug("MAIN: Scanning files in directory: %s", inputDirectory)      

    now = datetime.datetime.now()
    for dirpath, dirnames, filenames in os.walk(inputDirectory):
        for file in filenames:

            filePath = os.path.join(dirpath, file)
            fileName, fileExtension = os.path.splitext(file)
            targetDirectory = os.path.join(outputDestination, file)

            if root == 1:
                if foundFile == int(0): 
                    Logger.debug("MAIN: Looking for %s in: %s", inputName, file)
                if (safeName(inputName) in safeName(file)) or (safeName(fileName) in safeName(inputName)):
                    #pass  # This file does match the Torrent name
                    foundFile = 1
                    Logger.debug("MAIN: Found file %s that matches Torrent Name %s", file, inputName)
                else:
                    continue  # This file does not match the Torrent name, skip it

            if root == 2:
                Logger.debug("MAIN: Looking for files with modified/created dates less than 5 minutes old.")
                mtime_lapse = now - datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(dirpath, file)))
                ctime_lapse = now - datetime.datetime.fromtimestamp(os.path.getctime(os.path.join(dirpath, file)))
                if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)):
                    #pass  # This file does match the date time criteria
                    foundFile = 1
                    Logger.debug("MAIN: Found file %s with date modifed/created less than 5 minutes ago.", file)
                else:
                    continue  # This file has not been recently moved or created, skip it

            if fileExtension in mediaContainer:  # If the file is a video file
                if is_sample(filePath, inputName, minSampleSize) and not inputCategory in hpCategory:  # Ignore samples
                    Logger.info("MAIN: Ignoring sample file: %s  ", filePath)
                    continue
                else:
                    video = video + 1
                    Logger.info("MAIN: Found video file %s in %s", fileExtension, filePath)
                    try:
                        copy_link(filePath, targetDirectory, useLink, outputDestination)
                        copy_list.append([filePath, os.path.join(outputDestination, file)])
                    except:
                        Logger.exception("MAIN: Failed to link file: %s", file)
            elif fileExtension in metaContainer:
                Logger.info("MAIN: Found metadata file %s for file %s", fileExtension, filePath)
                try:
                    copy_link(filePath, targetDirectory, useLink, outputDestination)
                    copy_list.append([filePath, os.path.join(outputDestination, file)])
                except:
                    Logger.exception("MAIN: Failed to link file: %s", file)
                continue
            elif fileExtension in compressedContainer:
                if inputCategory in hpCategory: # We need to link all files for HP in order to move these back to support seeding.
                    Logger.info("MAIN: Linking compressed archive file %s for file %s", fileExtension, filePath)
                    try:
                        copy_link(filePath, targetDirectory, useLink, outputDestination)
                        copy_list.append([filePath, os.path.join(outputDestination, file)])
                    except:
                        Logger.exception("MAIN: Failed to link file: %s", file)
                # find part numbers in second "extension" from right, if we have more than 1 compressed file in the same directory.
                if re.search(r'\d+', os.path.splitext(fileName)[1]) and os.path.dirname(filePath) in extracted_folder and not (os.path.splitext(fileName)[1] in ['.720p','.1080p']):
                    part = int(re.search(r'\d+', os.path.splitext(fileName)[1]).group())
                    if part == 1: # we only want to extract the primary part.
                        Logger.debug("MAIN: Found primary part of a multi-part archive %s. Extracting", file)                       
                    else:
                        Logger.debug("MAIN: Found part %s of a multi-part archive %s. Ignoring", part, file)
                        continue
                Logger.info("MAIN: Found compressed archive %s for file %s", fileExtension, filePath)
                try:
                    if inputCategory in hpCategory: # HP needs to scan the same dir as passed to downloader. 
                        extractor.extract(filePath, inputDirectory)
                    else:
                        extractor.extract(filePath, outputDestination)
                    extractionSuccess = True # we use this variable to determine if we need to pause a torrent or not in uTorrent (don't need to pause archived content)
                    extracted_folder.append(os.path.dirname(filePath))
                except:
                    Logger.exception("MAIN: Extraction failed for: %s", file)
                continue
            elif not inputCategory in cpsCategory + sbCategory: #process all for non-video categories.
                Logger.info("MAIN: Found file %s for category %s", filePath, inputCategory)
                copy_link(filePath, targetDirectory, useLink, outputDestination)
                copy_list.append([filePath, os.path.join(outputDestination, file)])
                continue
            else:
                Logger.debug("MAIN: Ignoring unknown filetype %s for file %s", fileExtension, filePath)
                continue
    if not inputCategory in hpCategory: #don't flatten hp in case multi cd albums, and we need to copy this back later. 
        flatten(outputDestination)

    # Now check if movie files exist in destination:
    if inputCategory in cpsCategory + sbCategory: 
        for dirpath, dirnames, filenames in os.walk(outputDestination):
            for file in filenames:
                filePath = os.path.join(dirpath, file)
                fileName, fileExtension = os.path.splitext(file)
                if fileExtension in mediaContainer:  # If the file is a video file
                    if is_sample(filePath, inputName, minSampleSize):
                        Logger.debug("MAIN: Removing sample file: %s", filePath)
                        os.unlink(filePath)  # remove samples
                    else:
                        Logger.debug("MAIN: Found media file: %s", filePath)
                        video2 = video2 + 1
                else:
                    Logger.debug("MAIN: File %s is not a media file", filePath)
        if video2 >= video and video2 > int(0):  # Check that all video files were moved
            Logger.debug("MAIN: Found %s media files", str(video2))
            status = int(0)
        else:
            Logger.debug("MAIN: Found %s media files in output. %s were found in input", str(video2), str(video))

    processCategories = cpsCategory + sbCategory + hpCategory + mlCategory + gzCategory

    if (inputCategory in user_script_categories and not "NONE" in user_script_categories) or ("ALL" in user_script_categories and not inputCategory in processCategories):
        Logger.info("MAIN: Processing user script %s.", user_script)
        result = external_script(outputDestination)
    elif status == int(0) or (inputCategory in hpCategory + mlCategory + gzCategory): # if movies linked/extracted or for other categories.
        Logger.debug("MAIN: Calling autoProcess script for successful download.")
        status = int(0) # hp, my, gz don't support failed.
    else:
        Logger.error("MAIN: Something failed! Please check logs. Exiting")
        sys.exit(-1)

    if inputCategory in cpsCategory:
        Logger.info("MAIN: Calling CouchPotatoServer to post-process: %s", inputName)
        download_id = inputHash
        result = autoProcessMovie.process(outputDestination, inputName, status, clientAgent, download_id, inputCategory)
    elif inputCategory in sbCategory:
        Logger.info("MAIN: Calling Sick-Beard to post-process: %s", inputName)
        result = autoProcessTV.processEpisode(outputDestination, inputName, status, inputCategory)
    elif inputCategory in hpCategory:
        Logger.info("MAIN: Calling HeadPhones to post-process: %s", inputName)
        result = autoProcessMusic.process(inputDirectory, inputName, status, inputCategory)
    elif inputCategory in mlCategory:
        Logger.info("MAIN: Calling Mylar to post-process: %s", inputName)
        result = autoProcessComics.processEpisode(outputDestination, inputName, status, inputCategory)
    elif inputCategory in gzCategory:
        Logger.info("MAIN: Calling Gamez to post-process: %s", inputName)
        result = autoProcessGames.process(outputDestination, inputName, status, inputCategory)

    if result == 1:
        Logger.info("MAIN: A problem was reported in the autoProcess* script. If torrent was paused we will resume seeding")

    if inputCategory in hpCategory:
        # we need to move the output dir files back...
        Logger.debug("MAIN: Moving temporary HeadPhones files back to allow seeding.")
        for item in copy_list:
            if os.path.isfile(os.path.normpath(item[1])): # check to ensure temp files still exist.
                if os.path.isfile(os.path.normpath(item[0])): # both exist, remove temp version
                    Logger.debug("MAIN: File %s still present. Removing tempoary file %s", str(item[0]), str(item[1]))
                    os.unlink(os.path.normpath(item[1]))
                    continue
                else: # move temp version back to allow seeding or Torrent removal.
                    Logger.debug("MAIN: Moving %s to %s", str(item[1]), str(item[0]))
                    shutil.move(os.path.normpath(item[1]), os.path.normpath(item[0]))
                    continue

    # Hardlink solution for uTorrent, need to implent support for deluge, transmission
    if clientAgent in ['utorrent', 'transmission']  and inputHash:
        # Delete torrent and torrentdata from Torrent client if processing was successful.
        if deleteOriginal == 1 and result != 1:
            Logger.debug("MAIN: Deleting torrent %s from %s", inputName, clientAgent)
            if clientAgent == 'utorrent' and utorrentClass != "":
                utorrentClass.removedata(inputHash)
                if not inputCategory in hpCategory:
                    utorrentClass.remove(inputHash)
            if clientAgent == 'transmission' and TransmissionClass !="":
                if inputCategory in hpCategory: #don't delete actual files for hp category, just remove torrent.
                    TransmissionClass.remove_torrent(inputID, False)
                else:
                    TransmissionClass.remove_torrent(inputID, True)
        # we always want to resume seeding, for now manually find out what is wrong when extraction fails
        else:
            Logger.debug("MAIN: Starting torrent %s in %s", inputName, clientAgent)
            if clientAgent == 'utorrent' and utorrentClass != "":
                utorrentClass.start(inputHash)
            if clientAgent == 'transmission' and TransmissionClass !="":
                TransmissionClass.start_torrent(inputID)
        time.sleep(5)        
    #cleanup
    if inputCategory in processCategories and result == 0 and os.path.isdir(outputDestination):
        num_files_new = int(0)
        file_list = []
        for dirpath, dirnames, filenames in os.walk(outputDestination):
            for file in filenames:
                filePath = os.path.join(dirpath, file)
                fileName, fileExtension = os.path.splitext(file)
                if fileExtension in mediaContainer or fileExtension in metaContainer:
                    num_files_new = num_files_new + 1
                    file_list.append(file)
        if num_files_new == int(0): 
            Logger.info("All files have been processed. Cleaning outputDirectory %s", outputDestination)
            shutil.rmtree(outputDestination)
        else:
            Logger.info("outputDirectory %s still contains %s media and/or meta files. This directory will not be removed.", outputDestination, num_files_new)
            for item in file_list:
                Logger.debug("media/meta file found: %s", item)
    Logger.info("MAIN: All done.")
Ejemplo n.º 10
0
                    Logger.error("MAIN: Failed to link file %s", file)
                    failed_link = 1
        elif fileExtention in metaContainer:
            source = filePath
            target = os.path.join(outputDestination, file)
            Logger.info("MAIN: Found metadata file %s for file %s", fileExtention, filePath)
            state = copy_link(source, target, useLink, outputDestination)
            if state == False:
                Logger.error("MAIN: Failed to link file %s", file)
                failed_link = 1
        elif fileExtention in compressedContainer:
            Logger.info("MAIN: Found compressed archive %s for file %s", fileExtention, filePath)
            source = filePath
            target = os.path.join(outputDestination, file)
            try:
                extractor.extract(dirpath, file, outputDestination)
            except:
                Logger.warn("Extraction failed for %s", file)
        else:
            Logger.debug("MAIN: Ignoring unknown filetype %s for file %s", fileExtention, filePath)
            continue
flatten(outputDestination)

# Now check if movie files exist in destination:
for dirpath, dirnames, filenames in os.walk(outputDestination):
    for file in filenames:
        filePath = os.path.join(dirpath, file)
        fileExtention = os.path.splitext(file)[1]
        if fileExtention in mediaContainer:  # If the file is a video file
            if is_sample(filePath, inputName, minSampleSize):
                Logger.debug("Removing sample file: %s", filePath)