예제 #1
0
파일: mb.py 프로젝트: cdj/mylar
def pullsearch(comicapi,comicquery,offset,explicit,type):
    u_comicquery = urllib.quote(comicquery.encode('utf-8').strip())
    u_comicquery = u_comicquery.replace(" ", "%20")

    if explicit == 'all' or explicit == 'loose':
        PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=' + str(type) + '&query=' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&page=' + str(offset)

    else:
        # 02/22/2014 use the volume filter label to get the right results.
        # add the 's' to the end of type to pluralize the caption (it's needed)
        if type == 'story_arc':
            logger.info('redefining.')
            u_comicquery = re.sub("%20AND%20", "%20", u_comicquery)
        PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page

    #all these imports are standard on most modern python implementations
    #CV API Check here.
    if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
        cvapi_check()
    #download the file:
    try:
        file = urllib2.urlopen(PULLURL)
    except urllib2.HTTPError, err:
        logger.error('err : ' + str(err))
        logger.error("There was a major problem retrieving data from ComicVine - on their end. You'll have to try again later most likely.")
        return        
예제 #2
0
파일: mb.py 프로젝트: pcg79/mylar
def pullsearch(comicapi,comicquery,offset,explicit,type):
    u_comicquery = urllib.quote(comicquery.encode('utf-8').strip())
    u_comicquery = u_comicquery.replace(" ", "%20")

    if explicit == 'all' or explicit == 'loose':
        PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=' + str(type) + '&query=' + u_comicquery + '&field_list=id,name,start_year,first_issue,site_detail_url,count_of_issues,image,publisher,deck,description&format=xml&page=' + str(offset)

    else:
        # 02/22/2014 use the volume filter label to get the right results.
        # add the 's' to the end of type to pluralize the caption (it's needed)
        if type == 'story_arc':
            u_comicquery = re.sub("%20AND%20", "%20", u_comicquery)
        PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,deck,description&format=xml&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
    #all these imports are standard on most modern python implementations
    #CV API Check here.
    #logger.info('PULLURL:' + PULLURL)
    if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
        cvapi_check()
    #download the file:
    try:
        file = urllib2.urlopen(PULLURL)
    except urllib2.HTTPError, err:
        logger.error('err : ' + str(err))
        logger.error("There was a major problem retrieving data from ComicVine - on their end. You'll have to try again later most likely.")
        return        
예제 #3
0
def pulldetails(comicid, type, issueid=None, offset=1):
    import urllib2

    #import easy to use xml parser called minidom:
    from xml.dom.minidom import parseString

    if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
        logger.warn(
            'You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.'
        )
        comicapi = mylar.DEFAULT_CVAPI
    else:
        comicapi = mylar.COMICVINE_API

    if type == 'comic':
        if not comicid.startswith('4050-'): comicid = '4050-' + comicid
        PULLURL = mylar.CVURL + 'volume/' + str(comicid) + '/?api_key=' + str(
            comicapi
        ) + '&format=xml&field_list=name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,first_issue,deck,aliases'
    elif type == 'issue':
        if mylar.CV_ONLY:
            cv_type = 'issues'
            searchset = 'filter=volume:' + str(
                comicid
            ) + '&field_list=cover_date,description,id,image,issue_number,name,date_last_updated,store_date'
        else:
            cv_type = 'volume/' + str(comicid)
            searchset = 'name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,store_date'
        PULLURL = mylar.CVURL + str(cv_type) + '/?api_key=' + str(
            comicapi) + '&format=xml&' + str(searchset) + '&offset=' + str(
                offset)
    elif type == 'firstissue':
        #this is used ONLY for CV_ONLY
        PULLURL = mylar.CVURL + 'issues/?api_key=' + str(
            comicapi) + '&format=xml&filter=id:' + str(
                issueid) + '&field_list=cover_date'
    elif type == 'storyarc':
        PULLURL = mylar.CVURL + 'story_arc/?api_key=' + str(
            comicapi) + '&format=xml&filter=id:' + str(
                issueid) + '&field_list=cover_date'

    #CV API Check here.
    if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
        cvapi_check()
    #download the file:
    file = urllib2.urlopen(PULLURL)
    #increment CV API counter.
    mylar.CVAPI_COUNT += 1
    #convert to string:
    data = file.read()
    #close file because we dont need it anymore:
    file.close()
    #parse the xml you downloaded
    dom = parseString(data)

    return dom
예제 #4
0
파일: cv.py 프로젝트: adrianmoisey/mylar
def pulldetails(comicid,type,issueid=None,offset=1,arclist=None,comicidlist=None):
    #import easy to use xml parser called minidom:
    from xml.dom.minidom import parseString

    if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
        logger.warn('You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.')
        comicapi = mylar.DEFAULT_CVAPI
    else:
        comicapi = mylar.COMICVINE_API

    if type == 'comic':
        if not comicid.startswith('4050-'): comicid = '4050-' + comicid
        PULLURL= mylar.CVURL + 'volume/' + str(comicid) + '/?api_key=' + str(comicapi) + '&format=xml&field_list=name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,first_issue,deck,aliases'
    elif type == 'issue':
        if mylar.CV_ONLY:
            cv_type = 'issues'
            if arclist is None:
                searchset = 'filter=volume:' + str(comicid) + '&field_list=cover_date,description,id,image,issue_number,name,date_last_updated,store_date'
            else:
                searchset = 'filter=id:' + (arclist) + '&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume'
        else:
            cv_type = 'volume/' + str(comicid)
            searchset = 'name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,store_date'
        PULLURL = mylar.CVURL + str(cv_type) + '/?api_key=' + str(comicapi) + '&format=xml&' + str(searchset) + '&offset=' + str(offset)
    elif type == 'firstissue':
        #this is used ONLY for CV_ONLY
        PULLURL = mylar.CVURL + 'issues/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(issueid) + '&field_list=cover_date'
    elif type == 'storyarc':
        PULLURL = mylar.CVURL + 'story_arcs/?api_key=' + str(comicapi) + '&format=xml&filter=name:' + str(issueid) + '&field_list=cover_date'
    elif type == 'comicyears':
        PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher&offset=' + str(offset)

    #logger.info('PULLURL: ' + PULLURL)
    #CV API Check here.
    if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
        cvapi_check()
    #download the file:
    file = urllib2.urlopen(PULLURL)
    #increment CV API counter.
    mylar.CVAPI_COUNT +=1
    #convert to string:
    data = file.read()
    #close file because we dont need it anymore:
    file.close()
    #parse the xml you downloaded
    dom = parseString(data)

    return dom
예제 #5
0
파일: mb.py 프로젝트: ruinit/mylar
def storyarcinfo(xmlid):

    comicLibrary = listLibrary()

    arcinfo = {}

    if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
        logger.warn('You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.')
        comicapi = mylar.DEFAULT_CVAPI
    else:
        comicapi = mylar.COMICVINE_API

    #respawn to the exact id for the story arc and count the # of issues present.
    ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
    logger.fdebug('arcpull_url:' + str(ARCPULL_URL))
    if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
        chkit = cvapi_check()
        if chkit == False:
            return 'apireached'
    try:
        file = urllib2.urlopen(ARCPULL_URL)
    except urllib2.HTTPError, err:
        logger.error('err : ' + str(err))
        logger.error('There was a major problem retrieving data from ComicVine - on their end.')
        return
예제 #6
0
def storyarcinfo(xmlid):

    comicLibrary = listLibrary()

    arcinfo = {}

    if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
        logger.warn('You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.')
        comicapi = mylar.DEFAULT_CVAPI
    else:
        comicapi = mylar.COMICVINE_API

    #respawn to the exact id for the story arc and count the # of issues present.
    ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
    logger.fdebug('arcpull_url:' + str(ARCPULL_URL))
    if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
        chkit = cvapi_check()
        if chkit == False:
            return 'apireached'
    try:
        file = urllib2.urlopen(ARCPULL_URL)
    except urllib2.HTTPError, err:
        logger.error('err : ' + str(err))
        logger.error('There was a major problem retrieving data from ComicVine - on their end.')
        return
예제 #7
0
def run(dirName,
        nzbName=None,
        issueid=None,
        manual=None,
        filename=None,
        module=None):
    if module is None:
        module = ''
    module += '[META-TAGGER]'

    logger.fdebug(module + ' dirName:' + dirName)

    ## Set the directory in which comictagger and other external commands are located - IMPORTANT - ##
    # ( User may have to modify, depending on their setup, but these are some guesses for now )

    if platform.system() == "Windows":
        #if it's a source install.
        if os.path.isdir(os.path.join(mylar.CMTAGGER_PATH, '.git')):
            comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH,
                                           'comictagger.py')

        else:
            #regardless of 32/64 bit install
            if 'comictagger.exe' in mylar.CMTAGGER_PATH:
                comictagger_cmd = mylar.CMTAGGER_PATH
            else:
                comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH,
                                               'comictagger.exe')

        unrar_cmd = "C:\Program Files\WinRAR\UnRAR.exe"

        # test for UnRAR
        if not os.path.isfile(unrar_cmd):
            unrar_cmd = "C:\Program Files (x86)\WinRAR\UnRAR.exe"
            if not os.path.isfile(unrar_cmd):
                logger.fdebug(
                    module +
                    ' Unable to locate UnRAR.exe - make sure it is installed.')
                logger.fdebug(module + ' Aborting meta-tagging.')
                return "fail"

    elif platform.system() == "Darwin":  #Mac OS X
        comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.py')
        unrar_cmd = "/usr/local/bin/unrar"

    else:
        #for the 'nix
        if 'freebsd' in platform.linux_distribution()[0].lower():
            unrar_cmd = "/usr/local/bin/unrar"
        else:
            unrar_cmd = "/usr/bin/unrar"
        #check for dependencies here - configparser
        try:
            import configparser
        except ImportError:
            logger.fdebug(
                module +
                ' configparser not found on system. Please install manually in order to write metadata'
            )
            logger.fdebug(
                module +
                ' continuing with PostProcessing, but I am not using metadata.'
            )
            return "fail"

        #set this to the lib path (ie. '<root of mylar>/lib')
        comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.py')

#    if not os.path.exists( comictagger_cmd ):
#        print "ERROR:  can't find the ComicTagger program: {0}".format( comictagger_cmd )
#        print "        You probably need to edit this script!"
#        sys.exit( 1 )

    file_conversion = True
    file_extension_fixing = True
    if not os.path.exists(unrar_cmd):
        logger.fdebug(module + ' WARNING:  cannot find the unrar command.')
        logger.fdebug(module +
                      ' File conversion and extension fixing not available')
        logger.fdebug(
            module +
            ' You probably need to edit this script, or install the missing tool, or both!'
        )
        file_conversion = False
        file_extension_fixing = False

    ## Sets up other directories ##
    scriptname = os.path.basename(sys.argv[0])
    downloadpath = os.path.abspath(dirName)
    sabnzbdscriptpath = os.path.dirname(sys.argv[0])
    if manual is None:
        comicpath = os.path.join(downloadpath, "temp")
    else:
        chkpath, chkfile = os.path.split(filename)
        logger.fdebug(module + ' chkpath: ' + chkpath)
        logger.fdebug(module + ' chkfile: ' + chkfile)
        extensions = ('.cbr', '.cbz')
        if os.path.isdir(chkpath) and chkpath != downloadpath:
            logger.fdebug(module + ' Changing ' + downloadpath +
                          ' location to ' + chkpath + ' as it is a directory.')
            downloadpath = chkpath
        comicpath = os.path.join(downloadpath, issueid)
    unrar_folder = os.path.join(comicpath, "unrard")

    logger.fdebug(module + ' Paths / Locations:')
    logger.fdebug(module + ' scriptname : ' + scriptname)
    logger.fdebug(module + ' downloadpath : ' + downloadpath)
    logger.fdebug(module + ' sabnzbdscriptpath : ' + sabnzbdscriptpath)
    logger.fdebug(module + ' comicpath : ' + comicpath)
    logger.fdebug(module + ' unrar_folder : ' + unrar_folder)
    logger.fdebug(module + ' Running the ComicTagger Add-on for Mylar')

    if os.path.exists(comicpath):
        shutil.rmtree(comicpath)

    logger.fdebug(module + ' Attempting to create directory @: ' +
                  str(comicpath))
    try:
        os.makedirs(comicpath)
    except OSError:
        raise

    logger.fdebug(module + ' Created directory @ : ' + str(comicpath))
    logger.fdebug(module + ' Filename is : ' + str(filename))
    if filename is None:
        filename_list = glob.glob(os.path.join(downloadpath, "*.cbz"))
        filename_list.extend(glob.glob(os.path.join(downloadpath, "*.cbr")))
        fcount = 1
        for f in filename_list:
            if fcount > 1:
                logger.fdebug(
                    module +
                    ' More than one cbr/cbz within path, performing Post-Process on first file detected: '
                    + f)
                break
            shutil.move(f, comicpath)
            filename = f  #just the filename itself
            fcount += 1
    else:
        # if the filename is identical to the parent folder, the entire subfolder gets copied since it's the first match, instead of just the file
        shutil.move(filename, comicpath)

    filename = os.path.split(filename)[1]  # just the filename itself
    #print comicpath
    #print os.path.join( comicpath, filename )
    if filename.endswith('.cbr'):
        f = os.path.join(comicpath, filename)
        if zipfile.is_zipfile(f):
            logger.fdebug(module + ' zipfile detected')
            base = os.path.splitext(f)[0]
            shutil.move(f, base + ".cbz")
            logger.fdebug(module + ' {0}: renaming {1} to be a cbz'.format(
                scriptname, os.path.basename(f)))

    if file_extension_fixing:
        if filename.endswith('.cbz'):
            logger.info(module + ' Filename detected as a .cbz file.')
            f = os.path.join(comicpath, filename)
            logger.fdebug(module + ' filename : ' + f)

            if os.path.isfile(f):
                try:
                    rar_test_cmd_output = "is not RAR archive"  #default, in case of error
                    rar_test_cmd_output = subprocess.check_output(
                        [unrar_cmd, "t", f])
                except:
                    logger.fdebug(module +
                                  ' This is a zipfile. Unable to test rar.')

                if not "is not RAR archive" in rar_test_cmd_output:
                    base = os.path.splitext(f)[0]
                    shutil.move(f, base + ".cbr")
                    logger.fdebug(module +
                                  ' {0}: renaming {1} to be a cbr'.format(
                                      scriptname, os.path.basename(f)))
                else:
                    try:
                        with open(f):
                            pass
                    except:
                        logger.warn(module + ' No zip file present')
                        return "fail"

                    base = os.path.join(re.sub(issueid, '', comicpath),
                                        filename)  #extension is already .cbz
                    logger.fdebug(module + ' Base set to : ' + base)
                    logger.fdebug(module + ' Moving : ' + f + ' - to - ' +
                                  base)
                    shutil.move(f, base)
                    try:
                        with open(base):
                            logger.fdebug(
                                module +
                                ' Verified file exists in location: ' + base)
                        removetemp = True
                    except:
                        logger.fdebug(
                            module +
                            ' Cannot verify file exist in location: ' + base)
                        removetemp = False

                    if removetemp == True:
                        if comicpath != downloadpath:
                            #shutil.rmtree( comicpath )
                            logger.fdebug(
                                module +
                                ' Successfully removed temporary directory: ' +
                                comicpath)
                        else:
                            loggger.fdebug(
                                module +
                                ' Unable to remove temporary directory since it is identical to the download location : '
                                + comicpath)
                    logger.fdebug(module + ' new filename : ' + base)
                    nfilename = base

    # Now rename all CBR files to RAR
    if filename.endswith('.cbr'):
        #logger.fdebug('renaming .cbr to .rar')
        f = os.path.join(comicpath, filename)
        base = os.path.splitext(f)[0]
        baserar = base + ".rar"
        shutil.move(f, baserar)

        ## Changes any cbr files to cbz files for insertion of metadata ##
        if file_conversion:
            f = os.path.join(comicpath, filename)
            logger.fdebug(module +
                          ' {0}: converting {1} to be zip format'.format(
                              scriptname, os.path.basename(f)))
            basename = os.path.splitext(f)[0]
            zipname = basename + ".cbz"

            # Move into the folder where we will be unrar-ing things
            os.makedirs(unrar_folder)
            os.chdir(unrar_folder)

            # Extract and zip up
            logger.fdebug(module + ' {0}: Comicpath is ' +
                          baserar)  #os.path.join(comicpath,basename))
            logger.fdebug(module + ' {0}: Unrar is ' + unrar_folder)
            try:
                #subprocess.Popen( [ unrar_cmd, "x", os.path.join(comicpath,basename) ] ).communicate()
                output = subprocess.check_output([unrar_cmd, 'x', baserar])
            except CalledProcessError as e:
                if e.returncode == 3:
                    logger.warn(module + ' [Unrar Error 3] - Broken Archive.')
                elif e.returncode == 1:
                    logger.warn(module +
                                ' [Unrar Error 1] - No files to extract.')
                logger.warn(module +
                            ' Marking this as an incomplete download.')
                return "unrar error"

            shutil.make_archive(basename, "zip", unrar_folder)

            # get out of unrar folder and clean up
            os.chdir(comicpath)
            shutil.rmtree(unrar_folder)

            ## Changes zip to cbz

            f = os.path.join(comicpath, os.path.splitext(filename)[0] + ".zip")
            #print "zipfile" + f
            try:
                with open(f):
                    pass
            except:
                logger.warn(module + ' No zip file present:' + f)
                return "fail"
            base = os.path.splitext(f)[0]
            shutil.move(f, base + ".cbz")
            nfilename = base + ".cbz"
    #else:
    #    logger.fdebug(module + ' Filename:' + filename)
    #    nfilename = filename

    #if os.path.isfile( nfilename ):
    #    logger.fdebug(module + ' File exists in given location already : ' + nfilename)
    #    file_dir, file_n = os.path.split(nfilename)
    #else:
    #    #remove the IssueID from the path
    #    file_dir = re.sub(issueid, '', comicpath)
    #    file_n = os.path.split(nfilename)[1]
    if manual is None:
        file_dir = downloadpath
    else:
        file_dir = re.sub(issueid, '', comicpath)

    file_n = os.path.split(nfilename)[1]
    logger.fdebug(module + ' Converted directory: ' + str(file_dir))
    logger.fdebug(module + ' Converted filename: ' + str(file_n))
    logger.fdebug(module + ' Destination path: ' +
                  os.path.join(file_dir, file_n))  #dirName,file_n))
    logger.fdebug(module + ' dirName: ' + dirName)
    logger.fdebug(module + ' absDirName: ' + os.path.abspath(dirName))

    ##set up default comictagger options here.
    tagoptions = ["-s", "--verbose"]

    ## check comictagger version - less than 1.15.beta - take your chances.
    ctversion = subprocess.check_output([comictagger_cmd, "--version"])
    ctend = ctversion.find(':')
    ctcheck = re.sub("[^0-9]", "", ctversion[:ctend])
    ctcheck = re.sub('\.', '', ctcheck).strip()
    if int(ctcheck) >= int('1115'):  #(v1.1.15)
        if mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
            logger.fdebug(
                module + ' ' + ctversion[:ctend] +
                ' being used - no personal ComicVine API Key supplied. Take your chances.'
            )
            use_cvapi = "False"
        else:
            logger.fdebug(
                module + ' ' + ctversion[:ctend] +
                ' being used - using personal ComicVine API key supplied via mylar.'
            )
            use_cvapi = "True"
            tagoptions.extend(["--cv-api-key", mylar.COMICVINE_API])
    else:
        logger.fdebug(
            module + ' ' + ctversion[:ctend] +
            ' being used - personal ComicVine API key not supported in this version. Good luck.'
        )
        use_cvapi = "False"

    i = 1
    tagcnt = 0

    if mylar.CT_TAG_CR:
        tagcnt = 1
        logger.fdebug(module + ' CR Tagging enabled.')

    if mylar.CT_TAG_CBL:
        if not mylar.CT_TAG_CR:
            i = 2  #set the tag to start at cbl and end without doing another tagging.
        tagcnt = 2
        logger.fdebug(module + ' CBL Tagging enabled.')

    if tagcnt == 0:
        logger.warn(
            module +
            ' You have metatagging enabled, but you have not selected the type(s) of metadata to write. Please fix and re-run manually'
        )
        return "fail"

    #if it's a cbz file - check if no-overwrite existing tags is enabled / disabled in config.
    if nfilename.endswith('.cbz'):
        if mylar.CT_CBZ_OVERWRITE:
            logger.fdebug(
                module + ' Will modify existing tag blocks even if it exists.')
        else:
            logger.fdebug(
                module +
                ' Will NOT modify existing tag blocks even if they exist already.'
            )
            tagoptions.extend(["--nooverwrite"])

    if issueid is None:
        tagoptions.extend(["-f", "-o"])
    else:
        tagoptions.extend(["-o", "--id", issueid])

    original_tagoptions = tagoptions
    og_tagtype = None

    while (i <= tagcnt):
        if i == 1:
            tagtype = 'cr'  # CR meta-tagging cycle.
            tagdisp = 'ComicRack tagging'
        elif i == 2:
            tagtype = 'cbl'  #Cbl meta-tagging cycle
            tagdisp = 'Comicbooklover tagging'

        f_tagoptions = original_tagoptions

        if og_tagtype is not None:
            for index, item in enumerate(f_tagoptions):
                if item == og_tagtype:
                    f_tagoptions[index] = tagtype
        else:
            f_tagoptions.extend(["--type", tagtype, nfilename])

        og_tagtype = tagtype

        logger.info(module + ' ' + tagdisp +
                    ' meta-tagging processing started.')

        #CV API Check here.
        if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= 200:
            cvapi_check()

        currentScriptName = str(comictagger_cmd).decode("string_escape")
        logger.fdebug(module + ' Enabling ComicTagger script: ' +
                      str(currentScriptName) + ' with options: ' +
                      str(f_tagoptions))
        # generate a safe command line string to execute the script and provide all the parameters
        script_cmd = shlex.split(currentScriptName, posix=False) + f_tagoptions

        # use subprocess to run the command and capture output
        logger.fdebug(module + ' Executing command: ' + str(script_cmd))
        logger.fdebug(module + ' Absolute path to script: ' + script_cmd[0])
        try:
            p = subprocess.Popen(script_cmd)
            out, err = p.communicate()  #@UnusedVariable
            logger.fdebug(module + '[COMIC-TAGGER] : ' + str(out))
            logger.info(module + '[COMIC-TAGGER] Successfully wrote ' +
                        tagdisp)
        except OSError, e:
            logger.warn(
                module +
                '[COMIC-TAGGER] Unable to run comictagger with the options provided: '
                + str(script_cmd))

        #increment CV API counter.
        mylar.CVAPI_COUNT += 1

        ## Tag each CBZ, and move it back to original directory ##
        #if use_cvapi == "True":
        #    if issueid is None:
        #        subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "--cv-api-key", mylar.COMICVINE_API, "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate()
        #    else:
        #        subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "--cv-api-key", mylar.COMICVINE_API, "-o", "--id", issueid, "--verbose", nfilename ] ).communicate()
        #        logger.info(module + ' ' + tagdisp + ' meta-tagging complete')
        #    #increment CV API counter.
        #    mylar.CVAPI_COUNT +=1
        #else:
        #    if issueid is None:
        #        subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate()
        #    else:
        #        subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "-o", "--id", issueid, "--verbose", "--nooverwrite", nfilename ] ).communicate()
        #    #increment CV API counter.
        #    mylar.CVAPI_COUNT +=1
        i += 1
예제 #8
0
def findComic(name, mode, issue, limityear=None, explicit=None, type=None):

    #with mb_lock:
    comiclist = []
    comicResults = None
    comicLibrary = listLibrary()

    chars = set('!?*')
    if any((c in chars) for c in name):
        name = '"' + name + '"'

    #print ("limityear: " + str(limityear))
    if limityear is None: limityear = 'None'

    comicquery = name
    #comicquery=name.replace(" ", "%20")

    if explicit is None:
        #logger.fdebug('explicit is None. Setting to Default mode of ALL search words.')
        #comicquery=name.replace(" ", " AND ")
        explicit = 'all'

    #OR
    if explicit == 'loose':
        logger.fdebug(
            'Changing to loose mode - this will match ANY of the search words')
        comicquery = name.replace(" ", " OR ")
    elif explicit == 'explicit':
        logger.fdebug(
            'Changing to explicit mode - this will match explicitly on the EXACT words'
        )
        comicquery = name.replace(" ", " AND ")
    else:
        logger.fdebug(
            'Default search mode - this will match on ALL search words')
        comicquery = name.replace(" ", " AND ")
        explicit = 'all'

    if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
        logger.warn(
            'You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.'
        )
        comicapi = mylar.DEFAULT_CVAPI
    else:
        comicapi = mylar.COMICVINE_API

    if type is None:
        type = 'volume'

    #let's find out how many results we get from the query...
    searched = pullsearch(comicapi, comicquery, 0, explicit, type)
    if searched is None: return False
    totalResults = searched.getElementsByTagName(
        'number_of_total_results')[0].firstChild.wholeText
    logger.fdebug("there are " + str(totalResults) + " search results...")
    if not totalResults:
        return False
    countResults = 0
    while (countResults < int(totalResults)):
        #logger.fdebug("querying " + str(countResults))
        if countResults > 0:
            #2012/22/02 - CV API flipped back to offset usage instead of page
            if explicit == 'all' or explicit == 'loose':
                #all / loose uses page for offset
                offsetcount = (countResults / 100) + 1
            else:
                #explicit uses offset
                offsetcount = countResults

            searched = pullsearch(comicapi, comicquery, offsetcount, explicit,
                                  type)
        comicResults = searched.getElementsByTagName(type)  #('volume')
        body = ''
        n = 0
        if not comicResults:
            break
        for result in comicResults:
            #retrieve the first xml tag (<tag>data</tag>)
            #that the parser finds with name tagName:
            arclist = []
            if type == 'story_arc':
                #call cv.py here to find out issue count in story arc
                try:
                    logger.fdebug('story_arc ascension')
                    names = len(result.getElementsByTagName('name'))
                    n = 0
                    logger.fdebug('length: ' + str(names))
                    xmlpub = None  #set this incase the publisher field isn't populated in the xml
                    while (n < names):
                        logger.fdebug(
                            result.getElementsByTagName('name')
                            [n].parentNode.nodeName)
                        if result.getElementsByTagName(
                                'name')[n].parentNode.nodeName == 'story_arc':
                            logger.fdebug('yes')
                            try:
                                xmlTag = result.getElementsByTagName(
                                    'name')[n].firstChild.wholeText
                                xmlTag = xmlTag.rstrip()
                                logger.fdebug('name: ' + str(xmlTag))
                            except:
                                logger.error(
                                    'There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.'
                                )
                                return

                        elif result.getElementsByTagName(
                                'name')[n].parentNode.nodeName == 'publisher':
                            logger.fdebug('publisher check.')
                            xmlpub = result.getElementsByTagName(
                                'name')[n].firstChild.wholeText

                        n += 1
                except:
                    logger.warn('error retrieving story arc search results.')
                    return

                siteurl = len(result.getElementsByTagName('site_detail_url'))
                s = 0
                logger.fdebug('length: ' + str(names))
                xmlurl = None
                while (s < siteurl):
                    logger.fdebug(
                        result.getElementsByTagName('site_detail_url')
                        [s].parentNode.nodeName)
                    if result.getElementsByTagName('site_detail_url')[
                            s].parentNode.nodeName == 'story_arc':
                        try:
                            xmlurl = result.getElementsByTagName(
                                'site_detail_url')[s].firstChild.wholeText
                        except:
                            logger.error(
                                'There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.'
                            )
                            return
                    s += 1

                xmlid = result.getElementsByTagName(
                    'id')[0].firstChild.wholeText

                if xmlid is not None:
                    #respawn to the exact id for the story arc and count the # of issues present.
                    ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(
                        xmlid
                    ) + '/?api_key=' + str(
                        comicapi
                    ) + '&field_list=issues,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
                    logger.fdebug('arcpull_url:' + str(ARCPULL_URL))
                    if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
                        cvapi_check()
                    try:
                        file = urllib2.urlopen(ARCPULL_URL)
                    except urllib2.HTTPError, err:
                        logger.error('err : ' + str(err))
                        logger.error(
                            'There was a major problem retrieving data from ComicVine - on their end.'
                        )
                        return

                    mylar.CVAPI_COUNT += 1
                    arcdata = file.read()
                    file.close()
                    arcdom = parseString(arcdata)

                    try:
                        logger.fdebug('story_arc ascension')
                        issuecount = len(arcdom.getElementsByTagName('issue'))
                        issuedom = arcdom.getElementsByTagName('issue')
                        isc = 0
                        arclist = ''
                        for isd in issuedom:
                            zeline = isd.getElementsByTagName('id')
                            isdlen = len(zeline)
                            isb = 0
                            while (isb < isdlen):
                                if isc == 0:
                                    arclist = str(zeline[isb].firstChild.
                                                  wholeText).strip()
                                else:
                                    arclist += '|' + str(zeline[isb].firstChild
                                                         .wholeText).strip()
                                isb += 1

                            isc += 1

                    except:
                        logger.fdebug(
                            'unable to retrive issue count - nullifying value.'
                        )
                        issuecount = 0

                    try:
                        firstid = None
                        arcyear = None
                        fid = len(arcdom.getElementsByTagName('id'))
                        fi = 0
                        while (fi < fid):
                            if arcdom.getElementsByTagName(
                                    'id'
                            )[fi].parentNode.nodeName == 'first_appeared_in_issue':
                                if not arcdom.getElementsByTagName('id')[
                                        fi].firstChild.wholeText == xmlid:
                                    logger.fdebug('hit it.')
                                    firstid = arcdom.getElementsByTagName(
                                        'id')[fi].firstChild.wholeText
                                    break  # - dont' break out here as we want to gather ALL the issue ID's since it's here
                            fi += 1
                        logger.fdebug('firstid: ' + str(firstid))
                        if firstid is not None:
                            firstdom = cv.pulldetails(comicid=None,
                                                      type='firstissue',
                                                      issueid=firstid)
                            logger.fdebug('success')
                            arcyear = cv.GetFirstIssue(firstid, firstdom)
                    except:
                        logger.fdebug(
                            'Unable to retrieve first issue details. Not caclulating at this time.'
                        )

                    if (arcdom.getElementsByTagName('image')
                        [0].childNodes[0].nodeValue) is None:
                        xmlimage = arcdom.getElementsByTagName(
                            'super_url')[0].firstChild.wholeText
                    else:
                        xmlimage = "cache/blankcover.jpg"

                    try:
                        xmldesc = arcdom.getElementsByTagName(
                            'desc')[0].firstChild.wholeText
                    except:
                        xmldesc = "None"

                    try:
                        xmldeck = arcdom.getElementsByTagName(
                            'deck')[0].firstChild.wholeText
                    except:
                        xmldeck = "None"

                    if xmlid in comicLibrary:
                        haveit = comicLibrary[xmlid]
                    else:
                        haveit = "No"

                comiclist.append({
                    'name': xmlTag,
                    'comicyear': arcyear,
                    'comicid': xmlid,
                    'url': xmlurl,
                    'issues': issuecount,
                    'comicimage': xmlimage,
                    'publisher': xmlpub,
                    'description': xmldesc,
                    'deck': xmldeck,
                    'arclist': arclist,
                    'haveit': haveit
                })

                logger.info(arclist)

            else:
                xmlcnt = result.getElementsByTagName(
                    'count_of_issues')[0].firstChild.wholeText
                #here we can determine what called us, and either start gathering all issues or just limited ones.
                if issue is not None and str(issue).isdigit():
                    #this gets buggered up with NEW/ONGOING series because the db hasn't been updated
                    #to reflect the proper count. Drop it by 1 to make sure.
                    limiter = int(issue) - 1
                else:
                    limiter = 0
                #get the first issue # (for auto-magick calcs)
                try:
                    xmlfirst = result.getElementsByTagName(
                        'issue_number')[0].firstChild.wholeText
                    if '\xbd' in xmlfirst:
                        xmlfirst = "1"  #if the first issue is 1/2, just assume 1 for logistics
                except:
                    xmlfirst = '1'

                #logger.info('There are : ' + str(xmlcnt) + ' issues in this series.')
                #logger.info('The first issue started at # ' + str(xmlfirst))

                cnt_numerical = int(xmlcnt) + int(
                    xmlfirst
                )  # (of issues + start of first issue = numerical range)
                #logger.info('The maximum issue number should be roughly # ' + str(cnt_numerical))
                #logger.info('The limiter (issue max that we know of) is # ' + str(limiter))
                if cnt_numerical >= limiter:
                    cnl = len(result.getElementsByTagName('name'))
                    cl = 0
                    xmlTag = 'None'
                    xmlimage = "cache/blankcover.jpg"
                    while (cl < cnl):
                        if result.getElementsByTagName(
                                'name')[cl].parentNode.nodeName == 'volume':
                            xmlTag = result.getElementsByTagName(
                                'name')[cl].firstChild.wholeText
                            #break

                        if result.getElementsByTagName(
                                'name')[cl].parentNode.nodeName == 'image':
                            xmlimage = result.getElementsByTagName(
                                'super_url')[0].firstChild.wholeText

                        cl += 1

                    if (result.getElementsByTagName('start_year')[0].firstChild
                        ) is not None:
                        xmlYr = result.getElementsByTagName(
                            'start_year')[0].firstChild.wholeText
                    else:
                        xmlYr = "0000"
                    #logger.info('name:' + str(xmlTag) + ' -- ' + str(xmlYr))
                    if xmlYr in limityear or limityear == 'None':
                        xmlurl = result.getElementsByTagName(
                            'site_detail_url')[0].firstChild.wholeText
                        idl = len(result.getElementsByTagName('id'))
                        idt = 0
                        xmlid = None
                        while (idt < idl):
                            if result.getElementsByTagName(
                                    'id')[idt].parentNode.nodeName == 'volume':
                                xmlid = result.getElementsByTagName(
                                    'id')[idt].firstChild.wholeText
                                break
                            idt += 1

                        if xmlid is None:
                            logger.error(
                                'Unable to figure out the comicid - skipping this : '
                                + str(xmlurl))
                            continue
                        #logger.info('xmlid: ' + str(xmlid))
                        publishers = result.getElementsByTagName('publisher')
                        if len(publishers) > 0:
                            pubnames = publishers[0].getElementsByTagName(
                                'name')
                            if len(pubnames) > 0:
                                xmlpub = pubnames[0].firstChild.wholeText
                            else:
                                xmlpub = "Unknown"
                        else:
                            xmlpub = "Unknown"

                        try:
                            xmldesc = result.getElementsByTagName(
                                'description')[0].firstChild.wholeText
                        except:
                            xmldesc = "None"

                        #this is needed to display brief synopsis for each series on search results page.
                        try:
                            xmldeck = result.getElementsByTagName(
                                'deck')[0].firstChild.wholeText
                        except:
                            xmldeck = "None"

                        if xmlid in comicLibrary:
                            haveit = comicLibrary[xmlid]
                        else:
                            haveit = "No"
                        comiclist.append({
                            'name': xmlTag,
                            'comicyear': xmlYr,
                            'comicid': xmlid,
                            'url': xmlurl,
                            'issues': xmlcnt,
                            'comicimage': xmlimage,
                            'publisher': xmlpub,
                            'description': xmldesc,
                            'deck': xmldeck,
                            'haveit': haveit
                        })
                        #logger.fdebug('year: ' + str(xmlYr) + ' - constraint met: ' + str(xmlTag) + '[' + str(xmlYr) + '] --- 4050-' + str(xmlid))
                    else:
                        logger.fdebug(
                            'year: ' + str(xmlYr) +
                            ' -  contraint not met. Has to be within ' +
                            str(limityear))
            n += 1
        #search results are limited to 100 and by pagination now...let's account for this.
        countResults = countResults + 100
예제 #9
0
def run (dirName, nzbName=None, issueid=None, manual=None, filename=None, module=None):
    if module is None:
        module = ''
    module += '[META-TAGGER]'

    logger.fdebug(module + ' dirName:' + dirName)

    ## Set the directory in which comictagger and other external commands are located - IMPORTANT - ##
    # ( User may have to modify, depending on their setup, but these are some guesses for now )

    if platform.system() == "Windows":
        #if it's a source install.
        if os.path.isdir(os.path.join(mylar.CMTAGGER_PATH, '.git')):
            comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.py')

        else:
            #regardless of 32/64 bit install
            if 'comictagger.exe' in mylar.CMTAGGER_PATH:
                comictagger_cmd = mylar.CMTAGGER_PATH
            else:
                comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.exe')

        unrar_cmd = "C:\Program Files\WinRAR\UnRAR.exe"

      # test for UnRAR
        if not os.path.isfile(unrar_cmd):
            unrar_cmd = "C:\Program Files (x86)\WinRAR\UnRAR.exe"
            if not os.path.isfile(unrar_cmd):
                logger.fdebug(module + ' Unable to locate UnRAR.exe - make sure it is installed.')
                logger.fdebug(module + ' Aborting meta-tagging.')
                return "fail"

    
    elif platform.system() == "Darwin":  #Mac OS X
        comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.py')
        unrar_cmd = "/usr/local/bin/unrar"
    
    else:
        #for the 'nix
        #check for dependencies here - configparser
        try:
            import configparser
        except ImportError:
            logger.fdebug(module + ' configparser not found on system. Please install manually in order to write metadata')
            logger.fdebug(module + ' continuing with PostProcessing, but I am not using metadata.')
            return "fail"

        #set this to the lib path (ie. '<root of mylar>/lib')
        comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.py')
        unrar_cmd = "/usr/bin/unrar"

#    if not os.path.exists( comictagger_cmd ):
#        print "ERROR:  can't find the ComicTagger program: {0}".format( comictagger_cmd )
#        print "        You probably need to edit this script!"
#        sys.exit( 1 )

    file_conversion = True
    file_extension_fixing = True
    if not os.path.exists( unrar_cmd ):
        logger.fdebug(module + ' WARNING:  cannot find the unrar command.')
        logger.fdebug(module + ' File conversion and extension fixing not available')
        logger.fdebug(module + ' You probably need to edit this script, or install the missing tool, or both!')
        file_conversion = False
        file_extension_fixing = False


    ## Sets up other directories ##
    scriptname = os.path.basename( sys.argv[0] )
    downloadpath = os.path.abspath( dirName ) 
    sabnzbdscriptpath = os.path.dirname( sys.argv[0] )
    if manual is None:
        comicpath = os.path.join( downloadpath , "temp" )
    else:
        chkpath, chkfile = os.path.split(filename)
        logger.fdebug(module + ' chkpath: ' + chkpath)
        logger.fdebug(module + ' chkfile: ' + chkfile)
        extensions = ('.cbr', '.cbz')
        if os.path.isdir(chkpath) and chkpath != downloadpath:
            logger.fdebug(module + ' Changing ' + downloadpath + ' location to ' + chkpath + ' as it is a directory.')
            downloadpath = chkpath
        comicpath = os.path.join( downloadpath, issueid )
    unrar_folder = os.path.join( comicpath , "unrard" )

    logger.fdebug(module + ' Paths / Locations:')
    logger.fdebug(module + ' scriptname : ' + scriptname)
    logger.fdebug(module + ' downloadpath : ' + downloadpath)
    logger.fdebug(module + ' sabnzbdscriptpath : ' + sabnzbdscriptpath)
    logger.fdebug(module + ' comicpath : ' + comicpath)
    logger.fdebug(module + ' unrar_folder : ' + unrar_folder)
    logger.fdebug(module + ' Running the ComicTagger Add-on for Mylar')

    if os.path.exists( comicpath ):
        shutil.rmtree( comicpath )

    logger.fdebug(module + ' Attempting to create directory @: ' + str(comicpath))
    try:
        os.makedirs(comicpath)
    except OSError:
        raise

    logger.fdebug(module + ' Created directory @ : ' + str(comicpath))
    logger.fdebug(module + ' Filename is : ' + str(filename))
    if filename is None:
        filename_list = glob.glob( os.path.join( downloadpath, "*.cbz" ) )
        filename_list.extend( glob.glob( os.path.join( downloadpath, "*.cbr" ) ) )
        fcount = 1
        for f in filename_list:
            if fcount > 1: 
                logger.fdebug(module + ' More than one cbr/cbz within path, performing Post-Process on first file detected: ' + f)
                break
            shutil.move( f, comicpath )
            filename = f  #just the filename itself
            fcount+=1
    else:
        # if the filename is identical to the parent folder, the entire subfolder gets copied since it's the first match, instead of just the file
        shutil.move( filename, comicpath )

    filename = os.path.split(filename)[1]   # just the filename itself
    #print comicpath
    #print os.path.join( comicpath, filename )
    if filename.endswith('.cbr'):
        f = os.path.join( comicpath, filename )
        if zipfile.is_zipfile( f ):
            logger.fdebug(module + ' zipfile detected')
            base = os.path.splitext( f )[0]
            shutil.move( f, base + ".cbz" )
            logger.fdebug(module + ' {0}: renaming {1} to be a cbz'.format( scriptname, os.path.basename( f ) ))

    if file_extension_fixing:
        if filename.endswith('.cbz'):
            logger.info(module + ' Filename detected as a .cbz file.')
            f = os.path.join( comicpath, filename )
            logger.fdebug(module + ' filename : ' + f)

            if os.path.isfile( f ):
                try:
                    rar_test_cmd_output = "is not RAR archive" #default, in case of error
                    rar_test_cmd_output = subprocess.check_output( [ unrar_cmd, "t", f ] )
                except:
                    logger.fdebug(module + ' This is a zipfile. Unable to test rar.')

                if not "is not RAR archive" in rar_test_cmd_output:
                    base = os.path.splitext( f )[0]
                    shutil.move( f, base + ".cbr" )
                    logger.fdebug(module + ' {0}: renaming {1} to be a cbr'.format( scriptname, os.path.basename( f ) ))
                else:
                    try:
                        with open(f): pass
                    except:
                        logger.warn(module + ' No zip file present')
                        return "fail"


                    base = os.path.join(re.sub(issueid, '', comicpath), filename) #extension is already .cbz
                    logger.fdebug(module + ' Base set to : ' + base)
                    logger.fdebug(module + ' Moving : ' + f + ' - to - ' + base)
                    shutil.move( f, base)
                    try:
                        with open(base):
                            logger.fdebug(module + ' Verified file exists in location: ' + base)
                        removetemp = True
                    except:
                        logger.fdebug(module + ' Cannot verify file exist in location: ' + base)
                        removetemp = False

                    if removetemp == True:
                        if comicpath != downloadpath:
                            #shutil.rmtree( comicpath )
                            logger.fdebug(module + ' Successfully removed temporary directory: ' + comicpath)
                        else:
                            loggger.fdebug(module + ' Unable to remove temporary directory since it is identical to the download location : ' + comicpath)
                    logger.fdebug(module + ' new filename : ' + base)
                    nfilename = base

    # Now rename all CBR files to RAR
    if filename.endswith('.cbr'):
        #logger.fdebug('renaming .cbr to .rar')
        f = os.path.join( comicpath, filename)
        base = os.path.splitext( f )[0]
        baserar = base + ".rar"
        shutil.move( f, baserar )

        ## Changes any cbr files to cbz files for insertion of metadata ##
        if file_conversion:
            f = os.path.join( comicpath, filename )
            logger.fdebug(module + ' {0}: converting {1} to be zip format'.format( scriptname, os.path.basename( f ) ))
            basename = os.path.splitext( f )[0]
            zipname = basename + ".cbz"

            # Move into the folder where we will be unrar-ing things
            os.makedirs( unrar_folder )
            os.chdir( unrar_folder )

            # Extract and zip up
            logger.fdebug(module + ' {0}: Comicpath is ' + baserar) #os.path.join(comicpath,basename))
            logger.fdebug(module + ' {0}: Unrar is ' + unrar_folder )
            try:
                #subprocess.Popen( [ unrar_cmd, "x", os.path.join(comicpath,basename) ] ).communicate()
                output = subprocess.check_output( [ unrar_cmd, 'x', baserar ] ) #os.path.join(comicpath,basename) ] )
            except CalledProcessError as e:
                if e.returncode == 3:
                    logger.warn(module + ' [Unrar Error 3] - Broken Archive.')
                elif e.returncode == 1:
                    logger.warn(module + ' [Unrar Error 1] - No files to extract.')
                logger.warn(module + ' Marking this as an incomplete download.')
                return "unrar error"

            shutil.make_archive( basename, "zip", unrar_folder )

            # get out of unrar folder and clean up
            os.chdir( comicpath )
            shutil.rmtree( unrar_folder )

            ## Changes zip to cbz
   
            f = os.path.join( comicpath, os.path.splitext(filename)[0] + ".zip" )
            #print "zipfile" + f
            try:
                with open(f): pass
            except:
                logger.warn(module + ' No zip file present:' + f)
                return "fail"         
            base = os.path.splitext( f )[0]
            shutil.move( f, base + ".cbz" )
            nfilename = base + ".cbz"
    #else:
    #    logger.fdebug(module + ' Filename:' + filename)       
    #    nfilename = filename

    #if os.path.isfile( nfilename ):
    #    logger.fdebug(module + ' File exists in given location already : ' + nfilename)
    #    file_dir, file_n = os.path.split(nfilename)
    #else:
    #    #remove the IssueID from the path
    #    file_dir = re.sub(issueid, '', comicpath)
    #    file_n = os.path.split(nfilename)[1]
    if manual is None:
        file_dir = downloadpath
    else:
        file_dir = re.sub(issueid, '', comicpath)

    file_n = os.path.split(nfilename)[1]
    logger.fdebug(module + ' Converted directory: ' + str(file_dir))
    logger.fdebug(module + ' Converted filename: ' + str(file_n))
    logger.fdebug(module + ' Destination path: ' + os.path.join(file_dir,file_n))  #dirName,file_n))
    logger.fdebug(module + ' dirName: ' + dirName)
    logger.fdebug(module + ' absDirName: ' + os.path.abspath(dirName))

    ##set up default comictagger options here.
    tagoptions = [ "-s", "--verbose" ]


    ## check comictagger version - less than 1.15.beta - take your chances.
    ctversion = subprocess.check_output( [ comictagger_cmd, "--version" ] )
    ctend = ctversion.find(':')
    ctcheck = re.sub("[^0-9]", "", ctversion[:ctend])
    ctcheck = re.sub('\.', '', ctcheck).strip()
    if int(ctcheck) >= int('1115'): #(v1.1.15)
        if mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
            logger.fdebug(module + ' ' + ctversion[:ctend] + ' being used - no personal ComicVine API Key supplied. Take your chances.')
            use_cvapi = "False"
        else:
            logger.fdebug(module + ' ' + ctversion[:ctend] + ' being used - using personal ComicVine API key supplied via mylar.')
            use_cvapi = "True"
            tagoptions.extend( [ "--cv-api-key", mylar.COMICVINE_API ] )
    else:
        logger.fdebug(module + ' ' + ctversion[:ctend] + ' being used - personal ComicVine API key not supported in this version. Good luck.')
        use_cvapi = "False"

    i = 1
    tagcnt = 0

    if mylar.CT_TAG_CR:
        tagcnt = 1
        logger.fdebug(module + ' CR Tagging enabled.')

    if mylar.CT_TAG_CBL:
        if not mylar.CT_TAG_CR: i = 2  #set the tag to start at cbl and end without doing another tagging.
        tagcnt = 2
        logger.fdebug(module + ' CBL Tagging enabled.')

    if tagcnt == 0:
        logger.warn(module + ' You have metatagging enabled, but you have not selected the type(s) of metadata to write. Please fix and re-run manually')
        return "fail"
    
    #if it's a cbz file - check if no-overwrite existing tags is enabled / disabled in config.
    if nfilename.endswith('.cbz'):
        if mylar.CT_CBZ_OVERWRITE:
            logger.fdebug(module + ' Will modify existing tag blocks even if it exists.')
        else:
            logger.fdebug(module + ' Will NOT modify existing tag blocks even if they exist already.')
            tagoptions.extend( [ "--nooverwrite" ] )

    if issueid is None:
        tagoptions.extend( [ "-f", "-o" ] )
    else:
        tagoptions.extend( [ "-o", "--id", issueid ] )

    original_tagoptions = tagoptions
    og_tagtype = None

    while ( i <= tagcnt ):
        if i == 1: 
            tagtype = 'cr'  # CR meta-tagging cycle.
            tagdisp = 'ComicRack tagging'
        elif i == 2: 
            tagtype = 'cbl'  #Cbl meta-tagging cycle
            tagdisp = 'Comicbooklover tagging'


        f_tagoptions = original_tagoptions

        if og_tagtype is not None: 
            for index, item in enumerate(f_tagoptions):
                if item == og_tagtype:
                    f_tagoptions[index] = tagtype
        else:
            f_tagoptions.extend( [ "--type", tagtype, nfilename ] )

        og_tagtype = tagtype

        logger.info(module + ' ' + tagdisp + ' meta-tagging processing started.')
 
        #CV API Check here.
        if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= 200:
            cvapi_check()

        currentScriptName = str(comictagger_cmd).decode("string_escape")
        logger.fdebug(module + ' Enabling ComicTagger script: ' + str(currentScriptName) + ' with options: ' + str(f_tagoptions))
            # generate a safe command line string to execute the script and provide all the parameters
        script_cmd = shlex.split(currentScriptName, posix=False) + f_tagoptions

            # use subprocess to run the command and capture output
        logger.fdebug(module + ' Executing command: '+str(script_cmd))
        logger.fdebug(module + ' Absolute path to script: '+script_cmd[0])
        try:
            p = subprocess.Popen(script_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
            out, err = p.communicate() #@UnusedVariable
            logger.fdebug(module + '[COMIC-TAGGER] : '+str(out))
            logger.info(module + '[COMIC-TAGGER] Successfully wrote ' + tagdisp)
        except OSError, e:
            logger.warn(module + '[COMIC-TAGGER] Unable to run comictagger with the options provided: ' + str(script_cmd))

        #increment CV API counter.
        mylar.CVAPI_COUNT +=1


        ## Tag each CBZ, and move it back to original directory ##
        #if use_cvapi == "True":
        #    if issueid is None:
        #        subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "--cv-api-key", mylar.COMICVINE_API, "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate()
        #    else:
        #        subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "--cv-api-key", mylar.COMICVINE_API, "-o", "--id", issueid, "--verbose", nfilename ] ).communicate()
        #        logger.info(module + ' ' + tagdisp + ' meta-tagging complete')
        #    #increment CV API counter.
        #    mylar.CVAPI_COUNT +=1
        #else:
        #    if issueid is None:
        #        subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate()
        #    else:
        #        subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "-o", "--id", issueid, "--verbose", "--nooverwrite", nfilename ] ).communicate()
        #    #increment CV API counter.
        #    mylar.CVAPI_COUNT +=1
        i+=1
예제 #10
0
파일: mb.py 프로젝트: adrianmoisey/mylar
def findComic(name, mode, issue, limityear=None, explicit=None, type=None):

    #with mb_lock:       
    comiclist = []
    comicResults = None
    comicLibrary = listLibrary()
    
    chars = set('!?*')
    if any((c in chars) for c in name):
        name = '"'+name+'"'

    #print ("limityear: " + str(limityear))            
    if limityear is None: limityear = 'None'
    
    comicquery = name
    #comicquery=name.replace(" ", "%20")

    if explicit is None:
        #logger.fdebug('explicit is None. Setting to Default mode of ALL search words.')
        #comicquery=name.replace(" ", " AND ")
        explicit = 'all'

    #OR
    if explicit == 'loose':
        logger.fdebug('Changing to loose mode - this will match ANY of the search words')
        comicquery = name.replace(" ", " OR ")
    elif explicit == 'explicit':
        logger.fdebug('Changing to explicit mode - this will match explicitly on the EXACT words')
        comicquery=name.replace(" ", " AND ")
    else:
        logger.fdebug('Default search mode - this will match on ALL search words')
        comicquery = name.replace(" ", " AND ")
        explicit = 'all'


    if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
        logger.warn('You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.')
        comicapi = mylar.DEFAULT_CVAPI
    else:
        comicapi = mylar.COMICVINE_API

    if type is None:
        type = 'volume'

    #let's find out how many results we get from the query...    
    searched = pullsearch(comicapi,comicquery,0,explicit,type)
    if searched is None: return False
    totalResults = searched.getElementsByTagName('number_of_total_results')[0].firstChild.wholeText
    logger.fdebug("there are " + str(totalResults) + " search results...")
    if not totalResults:
        return False
    countResults = 0
    while (countResults < int(totalResults)):
        #logger.fdebug("querying " + str(countResults))
        if countResults > 0:
            #2012/22/02 - CV API flipped back to offset usage instead of page 
            if explicit == 'all' or explicit == 'loose':
                #all / loose uses page for offset
                offsetcount = (countResults/100) + 1
            else:
                #explicit uses offset
                offsetcount = countResults
            
            searched = pullsearch(comicapi,comicquery,offsetcount,explicit,type)
        comicResults = searched.getElementsByTagName(type) #('volume')
        body = ''
        n = 0        
        if not comicResults:
           break        
        for result in comicResults:
                #retrieve the first xml tag (<tag>data</tag>)
                #that the parser finds with name tagName:
                arclist = []
                if type == 'story_arc':
                    #call cv.py here to find out issue count in story arc
                    try:
                        logger.fdebug('story_arc ascension')
                        names = len( result.getElementsByTagName('name') )
                        n = 0
                        logger.fdebug('length: ' + str(names))
                        xmlpub = None #set this incase the publisher field isn't populated in the xml
                        while ( n < names ):
                            logger.fdebug(result.getElementsByTagName('name')[n].parentNode.nodeName)
                            if result.getElementsByTagName('name')[n].parentNode.nodeName == 'story_arc':
                                logger.fdebug('yes')
                                try:
                                    xmlTag = result.getElementsByTagName('name')[n].firstChild.wholeText
                                    xmlTag = xmlTag.rstrip()
                                    logger.fdebug('name: ' + str(xmlTag))
                                except:
                                    logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.')
                                    return

                            elif result.getElementsByTagName('name')[n].parentNode.nodeName == 'publisher':
                                logger.fdebug('publisher check.')
                                xmlpub = result.getElementsByTagName('name')[n].firstChild.wholeText

                            n+=1
                    except:
                        logger.warn('error retrieving story arc search results.')
                        return

                    siteurl = len( result.getElementsByTagName('site_detail_url') )
                    s = 0
                    logger.fdebug('length: ' + str(names))
                    xmlurl = None
                    while ( s < siteurl ):
                        logger.fdebug(result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName)
                        if result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName == 'story_arc':
                            try:
                                xmlurl = result.getElementsByTagName('site_detail_url')[s].firstChild.wholeText
                            except:
                                logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.')
                                return
                        s+=1

                    xmlid = result.getElementsByTagName('id')[0].firstChild.wholeText

                    if xmlid is not None:
                        #respawn to the exact id for the story arc and count the # of issues present.
                        ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
                        logger.fdebug('arcpull_url:' + str(ARCPULL_URL))
                        if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
                            cvapi_check()
                        try:
                            file = urllib2.urlopen(ARCPULL_URL)
                        except urllib2.HTTPError, err:
                            logger.error('err : ' + str(err))
                            logger.error('There was a major problem retrieving data from ComicVine - on their end.')
                            return

                        mylar.CVAPI_COUNT +=1
                        arcdata = file.read()
                        file.close()
                        arcdom = parseString(arcdata)

                        try:
                            logger.fdebug('story_arc ascension')
                            issuecount = len( arcdom.getElementsByTagName('issue') )
                            issuedom = arcdom.getElementsByTagName('issue')
                            isc = 0 
                            arclist = ''
                            for isd in issuedom:
                                zeline = isd.getElementsByTagName('id')
                                isdlen = len( zeline )
                                isb = 0
                                while ( isb < isdlen):
                                    if isc == 0:
                                        arclist = str(zeline[isb].firstChild.wholeText).strip()
                                    else:
                                        arclist += '|' + str(zeline[isb].firstChild.wholeText).strip()
                                    isb+=1

                                isc+=1

                        except:
                            logger.fdebug('unable to retrive issue count - nullifying value.')
                            issuecount = 0

                        try:
                            firstid = None
                            arcyear = None
                            fid = len ( arcdom.getElementsByTagName('id') )
                            fi = 0
                            while (fi < fid):
                                if arcdom.getElementsByTagName('id')[fi].parentNode.nodeName == 'first_appeared_in_issue':
                                    if not arcdom.getElementsByTagName('id')[fi].firstChild.wholeText == xmlid:
                                        logger.fdebug('hit it.')
                                        firstid = arcdom.getElementsByTagName('id')[fi].firstChild.wholeText
                                        break # - dont' break out here as we want to gather ALL the issue ID's since it's here
                                fi+=1
                            logger.fdebug('firstid: ' + str(firstid))
                            if firstid is not None:
                                firstdom = cv.pulldetails(comicid=None, type='firstissue', issueid=firstid)
                                logger.fdebug('success')
                                arcyear = cv.GetFirstIssue(firstid,firstdom)
                        except:
                            logger.fdebug('Unable to retrieve first issue details. Not caclulating at this time.')

                        if (arcdom.getElementsByTagName('image')[0].childNodes[0].nodeValue) is None:
                            xmlimage = arcdom.getElementsByTagName('super_url')[0].firstChild.wholeText
                        else:
                            xmlimage = "cache/blankcover.jpg"

                        try:
                            xmldesc = arcdom.getElementsByTagName('desc')[0].firstChild.wholeText
                        except:
                            xmldesc = "None"

                        try:
                            xmldeck = arcdom.getElementsByTagName('deck')[0].firstChild.wholeText
                        except:
                            xmldeck = "None"
                            
                        if xmlid in comicLibrary:
                            haveit = comicLibrary[xmlid]
                        else:
                            haveit = "No"

                    comiclist.append({
                            'name':                 xmlTag,
                            'comicyear':            arcyear,
                            'comicid':              xmlid,
                            'url':                  xmlurl,
                            'issues':               issuecount,
                            'comicimage':           xmlimage,
                            'publisher':            xmlpub,
                            'description':          xmldesc,
                            'deck':                 xmldeck,
                            'arclist':              arclist,
                            'haveit':               haveit
                            })

                    logger.fdebug('IssueID\'s that are a part of ' + xmlTag + ' : ' + str(arclist))
    
                else:
                    xmlcnt = result.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
                    #here we can determine what called us, and either start gathering all issues or just limited ones.
                    if issue is not None and str(issue).isdigit():
                        #this gets buggered up with NEW/ONGOING series because the db hasn't been updated
                        #to reflect the proper count. Drop it by 1 to make sure.
                        limiter = int(issue) - 1
                    else: limiter = 0
                    #get the first issue # (for auto-magick calcs)
                    try:
                        xmlfirst = result.getElementsByTagName('issue_number')[0].firstChild.wholeText
                        if '\xbd' in xmlfirst:
                            xmlfirst = "1"  #if the first issue is 1/2, just assume 1 for logistics
                    except:
                        xmlfirst = '1'

                    #logger.info('There are : ' + str(xmlcnt) + ' issues in this series.')
                    #logger.info('The first issue started at # ' + str(xmlfirst))
                    
                    cnt_numerical = int(xmlcnt) + int(xmlfirst) # (of issues + start of first issue = numerical range)
                    #logger.info('The maximum issue number should be roughly # ' + str(cnt_numerical))
                    #logger.info('The limiter (issue max that we know of) is # ' + str(limiter))
                    if cnt_numerical >= limiter:
                        cnl = len ( result.getElementsByTagName('name') )
                        cl = 0
                        xmlTag = 'None'
                        xmlimage = "cache/blankcover.jpg"
                        while (cl < cnl):
                            if result.getElementsByTagName('name')[cl].parentNode.nodeName == 'volume':
                                xmlTag = result.getElementsByTagName('name')[cl].firstChild.wholeText
                                #break

                            if result.getElementsByTagName('name')[cl].parentNode.nodeName == 'image':
                                xmlimage = result.getElementsByTagName('super_url')[0].firstChild.wholeText

                            cl+=1

                        if (result.getElementsByTagName('start_year')[0].firstChild) is not None:
                            xmlYr = result.getElementsByTagName('start_year')[0].firstChild.wholeText
                        else: xmlYr = "0000"
                        #logger.info('name:' + str(xmlTag) + ' -- ' + str(xmlYr))
                        if xmlYr in limityear or limityear == 'None':
                            xmlurl = result.getElementsByTagName('site_detail_url')[0].firstChild.wholeText
                            idl = len ( result.getElementsByTagName('id') )
                            idt = 0
                            xmlid = None
                            while (idt < idl):
                                if result.getElementsByTagName('id')[idt].parentNode.nodeName == 'volume':
                                    xmlid = result.getElementsByTagName('id')[idt].firstChild.wholeText
                                    break
                                idt+=1

                            if xmlid is None:
                                logger.error('Unable to figure out the comicid - skipping this : ' + str(xmlurl))
                                continue    
                            #logger.info('xmlid: ' + str(xmlid))
                            publishers = result.getElementsByTagName('publisher')
                            if len(publishers) > 0:
                                pubnames = publishers[0].getElementsByTagName('name')
                                if len(pubnames) >0:
                                    xmlpub = pubnames[0].firstChild.wholeText
                                else:
                                    xmlpub = "Unknown"
                            else:
                                xmlpub = "Unknown"

                            try:
                                xmldesc = result.getElementsByTagName('description')[0].firstChild.wholeText
                            except:
                                xmldesc = "None"

                            #this is needed to display brief synopsis for each series on search results page.
                            try:
                                xmldeck = result.getElementsByTagName('deck')[0].firstChild.wholeText
                            except:
                                xmldeck = "None"


                            if xmlid in comicLibrary:
                                haveit = comicLibrary[xmlid]
                            else:
                                haveit = "No"
                            comiclist.append({
                                    'name':                 xmlTag,
                                    'comicyear':            xmlYr,
                                    'comicid':              xmlid,
                                    'url':                  xmlurl,
                                    'issues':               xmlcnt,
                                    'comicimage':           xmlimage,
                                    'publisher':            xmlpub,
                                    'description':          xmldesc,
                                    'deck':                 xmldeck,
                                    'haveit':               haveit
                                    })
                            #logger.fdebug('year: ' + str(xmlYr) + ' - constraint met: ' + str(xmlTag) + '[' + str(xmlYr) + '] --- 4050-' + str(xmlid))
                        else:
                            pass
                            #logger.fdebug('year: ' + str(xmlYr) + ' -  contraint not met. Has to be within ' + str(limityear)) 
                n+=1    
        #search results are limited to 100 and by pagination now...let's account for this.
        countResults = countResults + 100