Esempio n. 1
0
def storyarcinfo(xmlid):

    comicLibrary = listStoryArcs()

    arcinfo = {}

    if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
        logger.warn('You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.')
        comicapi = mylar.DEFAULT_CVAPI
    else:
        comicapi = mylar.COMICVINE_API

    #respawn to the exact id for the story arc and count the # of issues present.
    ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,publisher,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
    #logger.fdebug('arcpull_url:' + str(ARCPULL_URL))

    #new CV API restriction - one api request / second.
    if mylar.CVAPI_RATE is None or mylar.CVAPI_RATE < 2:
        time.sleep(2)
    else:
        time.sleep(mylar.CVAPI_RATE)

    #download the file:
    payload = None

    try:
        r = requests.get(ARCPULL_URL, params=payload, verify=mylar.CV_VERIFY, headers=mylar.CV_HEADERS)
    except Exception, e:
        logger.warn('Error fetching data from ComicVine: %s' % (e))
        return
Esempio n. 2
0
def storyarcinfo(xmlid):

    comicLibrary = listStoryArcs()

    arcinfo = {}

    if mylar.CONFIG.COMICVINE_API == 'None' or mylar.CONFIG.COMICVINE_API is None:
        logger.warn(
            'You have not specified your own ComicVine API key - this is a requirement. Get your own @ http://api.comicvine.com.'
        )
        return
    else:
        comicapi = mylar.CONFIG.COMICVINE_API

    #respawn to the exact id for the story arc and count the # of issues present.
    ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(
        xmlid
    ) + '/?api_key=' + str(
        comicapi
    ) + '&field_list=issues,publisher,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
    #logger.fdebug('arcpull_url:' + str(ARCPULL_URL))

    #new CV API restriction - one api request / second.
    if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2:
        time.sleep(2)
    else:
        time.sleep(mylar.CONFIG.CVAPI_RATE)

    #download the file:
    payload = None

    try:
        r = requests.get(ARCPULL_URL,
                         params=payload,
                         verify=mylar.CONFIG.CV_VERIFY,
                         headers=mylar.CV_HEADERS)
    except Exception as e:
        logger.warn('While parsing data from ComicVine, got exception: %s' % e)
        return

    try:
        arcdom = parseString(r.content)
    except ExpatError:
        if '<title>Abnormal Traffic Detected' in r.content:
            logger.error(
                'ComicVine has banned this server\'s IP address because it exceeded the API rate limit.'
            )
        else:
            logger.warn(
                'While parsing data from ComicVine, got exception: %s for data: %s'
                % (e, r.content))
        return
    except Exception as e:
        logger.warn(
            'While parsing data from ComicVine, got exception: %s for data: %s'
            % (e, r.content))
        return

    try:
        logger.fdebug('story_arc ascension')
        issuedom = arcdom.getElementsByTagName('issue')
        issuecount = len(issuedom)  #arcdom.getElementsByTagName('issue') )
        isc = 0
        arclist = ''
        ordernum = 1
        for isd in issuedom:
            zeline = isd.getElementsByTagName('id')
            isdlen = len(zeline)
            isb = 0
            while (isb < isdlen):
                if isc == 0:
                    arclist = str(zeline[isb].firstChild.wholeText).strip(
                    ) + ',' + str(ordernum)
                else:
                    arclist += '|' + str(zeline[isb].firstChild.wholeText
                                         ).strip() + ',' + str(ordernum)
                ordernum += 1
                isb += 1

            isc += 1

    except:
        logger.fdebug('unable to retrive issue count - nullifying value.')
        issuecount = 0

    try:
        firstid = None
        arcyear = None
        fid = len(arcdom.getElementsByTagName('id'))
        fi = 0
        while (fi < fid):
            if arcdom.getElementsByTagName(
                    'id')[fi].parentNode.nodeName == 'first_appeared_in_issue':
                if not arcdom.getElementsByTagName(
                        'id')[fi].firstChild.wholeText == xmlid:
                    logger.fdebug('hit it.')
                    firstid = arcdom.getElementsByTagName(
                        'id')[fi].firstChild.wholeText
                    break  # - dont' break out here as we want to gather ALL the issue ID's since it's here
            fi += 1
        logger.fdebug('firstid: ' + str(firstid))
        if firstid is not None:
            firstdom = cv.pulldetails(comicid=None,
                                      type='firstissue',
                                      issueid=firstid)
            logger.fdebug('success')
            arcyear = cv.Getissue(firstid, firstdom, 'firstissue')
    except:
        logger.fdebug(
            'Unable to retrieve first issue details. Not caclulating at this time.'
        )

    try:
        xmlimage = arcdom.getElementsByTagName(
            'super_url')[0].firstChild.wholeText
    except:
        xmlimage = "cache/blankcover.jpg"

    try:
        xmldesc = arcdom.getElementsByTagName('desc')[0].firstChild.wholeText
    except:
        xmldesc = "None"

    try:
        xmlpub = arcdom.getElementsByTagName(
            'publisher')[0].firstChild.wholeText
    except:
        xmlpub = "None"

    try:
        xmldeck = arcdom.getElementsByTagName('deck')[0].firstChild.wholeText
    except:
        xmldeck = "None"

    if xmlid in comicLibrary:
        haveit = comicLibrary[xmlid]
    else:
        haveit = "No"

    arcinfo = {
        #'name':                 xmlTag,    #theese four are passed into it only when it's a new add
        #'url':                  xmlurl,    #needs to be modified for refreshing to work completely.
        #'publisher':            xmlpub,
        'comicyear': arcyear,
        'comicid': xmlid,
        'issues': issuecount,
        'comicimage': xmlimage,
        'description': xmldesc,
        'deck': xmldeck,
        'arclist': arclist,
        'haveit': haveit,
        'publisher': xmlpub
    }

    return arcinfo
Esempio n. 3
0
File: mb.py Progetto: evilhero/mylar
def storyarcinfo(xmlid):

    comicLibrary = listStoryArcs()

    arcinfo = {}

    if mylar.CONFIG.COMICVINE_API == 'None' or mylar.CONFIG.COMICVINE_API is None:
        logger.warn('You have not specified your own ComicVine API key - this is a requirement. Get your own @ http://api.comicvine.com.')
        return
    else:
        comicapi = mylar.CONFIG.COMICVINE_API

    #respawn to the exact id for the story arc and count the # of issues present.
    ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,publisher,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
    #logger.fdebug('arcpull_url:' + str(ARCPULL_URL))

    #new CV API restriction - one api request / second.
    if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2:
        time.sleep(2)
    else:
        time.sleep(mylar.CONFIG.CVAPI_RATE)

    #download the file:
    payload = None

    try:
        r = requests.get(ARCPULL_URL, params=payload, verify=mylar.CONFIG.CV_VERIFY, headers=mylar.CV_HEADERS)
    except Exception as e:
        logger.warn('While parsing data from ComicVine, got exception: %s' % e)
        return

    try:
        arcdom = parseString(r.content)
    except ExpatError:
        if u'<title>Abnormal Traffic Detected' in r.content:
            logger.error('ComicVine has banned this server\'s IP address because it exceeded the API rate limit.')
        else:
            logger.warn('While parsing data from ComicVine, got exception: %s for data: %s' % (e, r.content))
        return
    except Exception as e:
        logger.warn('While parsing data from ComicVine, got exception: %s for data: %s' % (e, r.content))
        return

    try:
        logger.fdebug('story_arc ascension')
        issuedom = arcdom.getElementsByTagName('issue')
        issuecount = len( issuedom ) #arcdom.getElementsByTagName('issue') )
        isc = 0
        arclist = ''
        ordernum = 1
        for isd in issuedom:
            zeline = isd.getElementsByTagName('id')
            isdlen = len( zeline )
            isb = 0
            while ( isb < isdlen):
                if isc == 0:
                    arclist = str(zeline[isb].firstChild.wholeText).strip() + ',' + str(ordernum)
                else:
                    arclist += '|' + str(zeline[isb].firstChild.wholeText).strip() + ',' + str(ordernum)
                ordernum+=1 
                isb+=1

            isc+=1

    except:
        logger.fdebug('unable to retrive issue count - nullifying value.')
        issuecount = 0

    try:
        firstid = None
        arcyear = None
        fid = len ( arcdom.getElementsByTagName('id') )
        fi = 0
        while (fi < fid):
            if arcdom.getElementsByTagName('id')[fi].parentNode.nodeName == 'first_appeared_in_issue':
                if not arcdom.getElementsByTagName('id')[fi].firstChild.wholeText == xmlid:
                    logger.fdebug('hit it.')
                    firstid = arcdom.getElementsByTagName('id')[fi].firstChild.wholeText
                    break # - dont' break out here as we want to gather ALL the issue ID's since it's here
            fi+=1
        logger.fdebug('firstid: ' + str(firstid))
        if firstid is not None:
            firstdom = cv.pulldetails(comicid=None, type='firstissue', issueid=firstid)
            logger.fdebug('success')
            arcyear = cv.Getissue(firstid,firstdom,'firstissue')
    except:
        logger.fdebug('Unable to retrieve first issue details. Not caclulating at this time.')

    try:
        xmlimage = arcdom.getElementsByTagName('super_url')[0].firstChild.wholeText
    except:
        xmlimage = "cache/blankcover.jpg"

    try:
        xmldesc = arcdom.getElementsByTagName('desc')[0].firstChild.wholeText
    except:
        xmldesc = "None"

    try:
        xmlpub = arcdom.getElementsByTagName('publisher')[0].firstChild.wholeText
    except:
        xmlpub = "None"

    try:
        xmldeck = arcdom.getElementsByTagName('deck')[0].firstChild.wholeText
    except:
        xmldeck = "None"

    if xmlid in comicLibrary:
        haveit = comicLibrary[xmlid]
    else:
        haveit = "No"

    arcinfo = {
            #'name':                 xmlTag,    #theese four are passed into it only when it's a new add
            #'url':                  xmlurl,    #needs to be modified for refreshing to work completely.
            #'publisher':            xmlpub,
            'comicyear':            arcyear,
            'comicid':              xmlid,
            'issues':               issuecount,
            'comicimage':           xmlimage,
            'description':          xmldesc,
            'deck':                 xmldeck,
            'arclist':              arclist,
            'haveit':               haveit,
            'publisher':            xmlpub
            }

    return arcinfo