コード例 #1
0
ファイル: mitele.py プロジェクト: Medisan/TVWeb
def load_json(data):
    # callback to transform json string values to utf8
    def to_utf8(dct):
        rdct = {}
        for k, v in dct.items() :
            if isinstance(v, (str, unicode)) :
                rdct[k] = v.encode('utf8', 'ignore')
            else :
                rdct[k] = v
        return rdct

    try:
        from lib import simplejson
        json_data = simplejson.loads(data, object_hook=to_utf8)
        return json_data
    except:
        try :        
            import simplejson
            json_data = simplejson.loads(data, object_hook=to_utf8)
            return json_data
        except:
            import traceback
            from pprint import pprint
            exc_type, exc_value, exc_tb = sys.exc_info()
            lines = traceback.format_exception(exc_type, exc_value, exc_tb)
            for line in lines:
                line_splits = line.split("\n")
                for line_split in line_splits:
                    logger.error(line_split)
            
            try:
                import json
                json_data = json.loads(data, object_hook=to_utf8)
                return json_data
            except:
                import traceback
                from pprint import pprint
                exc_type, exc_value, exc_tb = sys.exc_info()
                lines = traceback.format_exception(exc_type, exc_value, exc_tb)
                for line in lines:
                    line_splits = line.split("\n")
                    for line_split in line_splits:
                        logger.error(line_split)

                try:
                    json_data = JSON.ObjectFromString(data, encoding="utf-8")
                    return json_data
                except:
                    import traceback
                    from pprint import pprint
                    exc_type, exc_value, exc_tb = sys.exc_info()
                    lines = traceback.format_exception(exc_type, exc_value, exc_tb)
                    for line in lines:
                        line_splits = line.split("\n")
                        for line_split in line_splits:
                            logger.error(line_split)
コード例 #2
0
def load_json(data):
    logger.info("core.jsontools.load_json Probando simplejson en directorio lib")

    def to_utf8(dct):
        if isinstance(dct, dict):
            return dict((to_utf8(key), to_utf8(value)) for key, value in dct.iteritems())
        elif isinstance(dct, list):
            return [to_utf8(element) for element in dct]
        elif isinstance(dct, unicode):
            return dct.encode("utf-8")
        else:
            return dct

    try:
        logger.info("core.jsontools.load_json Probando simplejson en directorio lib")
        from lib import simplejson

        json_data = simplejson.loads(data, object_hook=to_utf8)
        logger.info("core.jsontools.load_json -> " + repr(json_data))
        return json_data
    except:
        logger.info(traceback.format_exc())

        try:
            logger.info("core.jsontools.load_json Probando simplejson incluido en el interprete")
            import simplejson

            json_data = simplejson.loads(data, object_hook=to_utf8)
            logger.info("core.jsontools.load_json -> " + repr(json_data))
            return json_data
        except:
            logger.info(traceback.format_exc())

            try:
                logger.info("core.jsontools.load_json Probando json incluido en el interprete")
                import json

                json_data = json.loads(data, object_hook=to_utf8)
                logger.info("core.jsontools.load_json -> " + repr(json_data))
                return json_data
            except:
                logger.info(traceback.format_exc())

                try:
                    logger.info("core.jsontools.load_json Probando JSON de Plex")
                    json_data = JSON.ObjectFromString(data, encoding="utf-8")
                    logger.info("core.jsontools.load_json -> " + repr(json_data))
                    return json_data
                except:
                    logger.info(traceback.format_exc())

    logger.info("core.jsontools.load_json No se ha encontrado un parser de JSON valido")
    logger.info("core.jsontools.load_json -> (nada)")
    return ""
コード例 #3
0
def load_json(data):
    #logger.info("core.jsontools.load_json Probando simplejson en directorio lib")

    try:
        #logger.info("streamondemand.core.jsontools.load_json Probando simplejson en directorio lib")
        from lib import simplejson
        json_data = simplejson.loads(data, object_hook=to_utf8)
        logger.info("streamondemand.core.jsontools.load_json -> " +
                    repr(json_data))
        return json_data
    except:
        logger.info(traceback.format_exc())

        try:
            logger.info(
                "streamondemand.core.jsontools.load_json Probando simplejson incluido en el interprete"
            )
            import simplejson
            json_data = simplejson.loads(data, object_hook=to_utf8)
            logger.info("streamondemand.core.jsontools.load_json -> " +
                        repr(json_data))
            return json_data
        except:
            logger.info(traceback.format_exc())

            try:
                logger.info(
                    "streamondemand.core.jsontools.load_json Probando json incluido en el interprete"
                )
                import json
                json_data = json.loads(data, object_hook=to_utf8)
                logger.info("streamondemand.core.jsontools.load_json -> " +
                            repr(json_data))
                return json_data
            except:
                logger.info(traceback.format_exc())

                try:
                    logger.info(
                        "streamondemand.core.jsontools.load_json Probando JSON de Plex"
                    )
                    json_data = JSON.ObjectFromString(data, encoding="utf-8")
                    logger.info("streamondemand.core.jsontools.load_json -> " +
                                repr(json_data))
                    return json_data
                except:
                    logger.info(traceback.format_exc())

    logger.info(
        "streamondemand.core.jsontools.load_json No se ha encontrado un parser de JSON valido"
    )
    logger.info("streamondemand.core.jsontools.load_json -> (nada)")
    return ""
コード例 #4
0
ファイル: jsontools.py プロジェクト: bialagary/mw
def load_json(data):
    plugintools.log("jsontools.load_json Probando simplejson en directorio lib")

    # callback to transform json string values to utf8
    def to_utf8(dct):
        rdct = {}
        for k, v in dct.items() :
            if isinstance(v, (str, unicode)) :
                rdct[k] = v.encode('utf8', 'ignore')
            else :
                rdct[k] = v
        return rdct

    try:
        plugintools.log("jsontools.load_json Probando simplejson en directorio lib")
        from lib import simplejson
        json_data = simplejson.loads(data, object_hook=to_utf8)
        plugintools.log("jsontools.load_json -> "+repr(json_data))
        return json_data
    except:
        plugintools.log(traceback.format_exc())

        try:
            plugintools.log("jsontools.load_json Probando simplejson incluido en el interprete")
            import simplejson
            json_data = simplejson.loads(data, object_hook=to_utf8)
            plugintools.log("jsontools.load_json -> "+repr(json_data))
            return json_data
        except:
            plugintools.log(traceback.format_exc())
            
            try:
                plugintools.log("jsontools.load_json Probando json incluido en el interprete")
                import json
                json_data = json.loads(data, object_hook=to_utf8)
                plugintools.log("jsontools.load_json -> "+repr(json_data))
                return json_data
            except:
                plugintools.log(traceback.format_exc())

                try:
                    plugintools.log("jsontools.load_json Probando JSON de Plex")
                    json_data = JSON.ObjectFromString(data, encoding="utf-8")
                    plugintools.log("jsontools.load_json -> "+repr(json_data))
                    return json_data
                except:
                    plugintools.log(traceback.format_exc())

    plugintools.log("jsontools.load_json No se ha encontrado un parser de JSON valido")
    plugintools.log("jsontools.load_json -> (nada)")
    return ""
コード例 #5
0
def load_json(data):
    logger.info("core.jsontools.load_json Probando simplejson en directorio lib")

    # callback to transform json string values to utf8
    def to_utf8(dct):
        rdct = {}
        for k, v in dct.items() :
            if isinstance(v, (str, unicode)) :
                rdct[k] = v.encode('utf8', 'ignore')
            else :
                rdct[k] = v
        return rdct

    try:
        logger.info("core.jsontools.load_json Probando simplejson en directorio lib")
        from lib import simplejson
        json_data = simplejson.loads(data, object_hook=to_utf8)
        logger.info("core.jsontools.load_json -> "+repr(json_data))
        return json_data
    except:
        logger.info(traceback.format_exc())

        try:
            logger.info("core.jsontools.load_json Probando simplejson incluido en el interprete")
            import simplejson
            json_data = simplejson.loads(data, object_hook=to_utf8)
            logger.info("core.jsontools.load_json -> "+repr(json_data))
            return json_data
        except:
            logger.info(traceback.format_exc())
            
            try:
                logger.info("core.jsontools.load_json Probando json incluido en el interprete")
                import json
                json_data = json.loads(data, object_hook=to_utf8)
                logger.info("core.jsontools.load_json -> "+repr(json_data))
                return json_data
            except:
                logger.info(traceback.format_exc())

                try:
                    logger.info("core.jsontools.load_json Probando JSON de Plex")
                    json_data = JSON.ObjectFromString(data, encoding="utf-8")
                    logger.info("core.jsontools.load_json -> "+repr(json_data))
                    return json_data
                except:
                    logger.info(traceback.format_exc())

    logger.info("core.jsontools.load_json No se ha encontrado un parser de JSON valido")
    logger.info("core.jsontools.load_json -> (nada)")
    return ""
コード例 #6
0
def load_json(data):
    # callback to transform json string values to utf8
    def to_utf8(dct):
        rdct = {}
        for k, v in dct.items():
            if isinstance(v, (str, unicode)):
                rdct[k] = v.encode('utf8', 'ignore')
            else:
                rdct[k] = v
        return rdct

    try:
        from lib import simplejson
        json_data = simplejson.loads(data, object_hook=to_utf8)
        return json_data
    except:
        try:
            import json
            json_data = json.loads(data, object_hook=to_utf8)
            return json_data
        except:
            import sys
            for line in sys.exc_info():
                logger.error("%s" % line)
    return None
コード例 #7
0
 def searchSerie(self, url_serie=""):
     import re
     settings.log(message=url_serie)
     settings.notification(message="Buscando en linea... %s" % url_serie[url_serie.rfind("/") + 1:])
     response = browser.open(url_serie)  # open the serie
     if response.code == 200:
         html = response.read()
         seasons = re.findall("/temporada-(.*?)'", html)
         seasons = list(set(seasons))
         sid = re.findall("var sid = '(.*?)'", html)[0]
         for season in seasons:
             url_search = "%s/a/episodes" % settings.url_address
             response = browser.open(url_search, urlencode(
                 {"action": "season", "start": "0", "limit": "0", "show": sid, "season": season}))
             if response.code == 200:
                 data = json.loads(response.read())
                 for item in data:
                     self.url_list.append(
                         settings.url_address + '/serie/' + item['permalink'] + '/temporada-' + item[
                             'season'] + '/episodio-' +
                         item['episode'])
                     self.titles.append(item['show']['title']['en'] + " S%sE%s" % (
                         item['season'], item['episode']))  # get the title
             else:
                 settings.log(">>>>>>>HTTP %s<<<<<<<" % response.code)
                 settings.notification(message="HTTP %s" % response.code, force=True)
     else:
         settings.log(">>>>>>>HTTP %s<<<<<<<" % response.code)
         settings.notification(message="HTTP %s" % response.code, force=True)
コード例 #8
0
ファイル: backend.py プロジェクト: theguardian/JIRA-APPy
def validate_oauth(oauth_token):
    consumer, client = request_oauth(oauth_token)

    access_token_url = os.path.join(jiraappy.JIRA_BASE_URL, 'plugins/servlet/oauth/access-token')
    data_url = os.path.join(jiraappy.JIRA_BASE_URL, 'rest/api/2/myself')

    resp, content = client.request(access_token_url, "POST")
    access_token = dict(urlparse.parse_qsl(content))

    consumer, client = verified_oauth(access_token['oauth_token'], access_token['oauth_token_secret'])

    resp, content = client.request(data_url, "GET")
    if resp['status'] != '200':
        jiraappy.JIRA_OAUTH_TOKEN = None
        jiraappy.JIRA_OAUTH_SECRET = None
        cherrystrap.config_write()
        jiraappy.JIRA_LOGIN_STATUS = None
        jiraappy.JIRA_LOGIN_USER = None
        status, status_msg = ajaxMSG('failure', 'Could not handshake with JIRA Server. Tokens reset')
    else:
        resp_dict = json.loads(content)
        jiraappy.JIRA_OAUTH_TOKEN = access_token['oauth_token']
        jiraappy.JIRA_OAUTH_SECRET = access_token['oauth_token_secret']
        cherrystrap.config_write()
        jiraappy.JIRA_LOGIN_STATUS = True
        jiraappy.JIRA_LOGIN_USER = resp_dict['name']
        status, status_msg = ajaxMSG('success', 'JIRA OAuth Tokens successfully saved to configuration file')

    return status, status_msg
コード例 #9
0
    def _discover_server(self):

        cs = socket(AF_INET, SOCK_DGRAM)
        mb_listen_port = 7359

        cs.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
        cs.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
        cs.settimeout(10)
        result, sock_issue = '', None
        for server in ('EmbyServer', 'MediaBrowserServer'):
            bufr = 'who is %s?' % server
            try:
                assert len(bufr) == cs.sendto(bufr, ('255.255.255.255', mb_listen_port)), \
                    'Not all data sent through the socket'
                message, host = cs.recvfrom(1024)
                if message:
                    self._log('%s found at %s: udp query response (%s)' % (server, host[0], message))
                    result = ('{"Address":' not in message and message.split('|')[1] or
                              json.loads(message).get('Address', ''))
                    if result:
                        break
            except AssertionError:
                sock_issue = True
            except (StandardError, Exception):
                pass
        if not sock_issue:
            try:
                cs.shutdown(SHUT_RDWR)
            except (StandardError, Exception):
                pass
        return result
コード例 #10
0
ファイル: jiraInt.py プロジェクト: theguardian/JIRA-APPy
def reindex(action=None):
    status, msg = '', ''

    consumer, client = backend.stored_oauth()

    if action=='POST':
        data_url = os.path.join(jiraappy.JIRA_BASE_URL, 'rest/api/2/reindex?type=BACKGROUND&indexComments=true&indexChangeHistory=true')
        try:
            resp, content = client.request(data_url, "POST")
            if resp['status'] != '202':
                status, msg = backend.ajaxMSG('failure', 'Call for JIRA Reindex failed with status code '+resp['status'])
            else:
                status, msg = backend.ajaxMSG('success', 'JIRA is now reindexing')
        except:
            status, msg = backend.ajaxMSG('failure', 'Could not connect to '+data_url)

    elif action=='GET':
        data_url = os.path.join(jiraappy.JIRA_BASE_URL, 'rest/api/2/reindex')
        try:
            resp, content = client.request(data_url, "GET")
            if resp['status'] != '200':
                status, msg = backend.ajaxMSG('failure', 'Call for JIRA Reindex status failed with status code '+resp['status'])
            else:
                resp_dict = json.loads(content)
                currentProgress = resp_dict['currentProgress']
                status, msg = backend.ajaxMSG('success', 'JIRA reindex is '+str(currentProgress)+'% complete')
        except:
            status, msg = backend.ajaxMSG('failure', 'Could not connect to '+data_url)

    else:
        pass

    return status, msg
コード例 #11
0
ファイル: chimpy.py プロジェクト: raddevon/pixed
    def _rpc(self, method, **params):
        """make an rpc call to the server"""

        params = urllib.urlencode(params, doseq=True)

        if _debug > 1:
            print __name__, "making request with parameters"
            pprint.pprint(params)
            print __name__, "encoded parameters:", params


        response = urllib2.urlopen("%s?method=%s" %(self.url, method), params)
        data = response.read()
        response.close()

        if _debug > 1:
            print __name__, "rpc call received", data

        result = simplejson.loads(data)

        try:
            if 'error' in result:
                raise ChimpyException(result['error'])
        except TypeError:
            # thrown when results is not iterable (eg bool)
            pass

        return result
コード例 #12
0
def calidades(item):
    logger.info("[" + CHANNELNAME + ".py] calidades")

    # Descargo la página del clip.
    data = scrapertools.cache_page(item.url)
    if (DEBUG): logger.info("data=" + data)

    ititle = scrapertools.get_match(data, '<h1 id="title">(.*?)</h1>')
    sRtmp = scrapertools.get_match(data, 'streamer: "(rtmp://.*?)/.*?",')
    sApp  = scrapertools.get_match(data, 'streamer: "rtmp://.*?/(.*?)",')
    sSwfUrl = MAIN_URL + scrapertools.get_match(data, 'flashplayer: "(.*?)",')
    
    # Solicito las calidades del clip.
    clip_id = scrapertools.get_match(item.url, '/(\d+)/')
    if (DEBUG): logger.info('ID:' + clip_id)
    data = scrapertools.cachePage(MAIN_URL + '/clip/ajax/' + clip_id)
    if (DEBUG): logger.info('Json:' + data)

    objects = simplejson.loads(data)

    itemlist = []
    for object in objects['versions']:
        sPlaypath = 'mp4:' + object['src']
        sStreamUrl = sRtmp + ' app=' + sApp + ' swfurl=' + sSwfUrl + ' playpath=' + sPlaypath
        if (DEBUG): logger.info("stream=" + sStreamUrl)

        # Añado el item de la calidad al listado.
        itemlist.append( Item(channel=CHANNELNAME, title=object['name'].title()+' ('+object['bitrate']+'kbps)', action="play", url=sStreamUrl, thumbnail=item.thumbnail, extra=ititle, folder=False ) )

    return itemlist
コード例 #13
0
def capitulos(item):
    logger.info("[" + CHANNELNAME + ".py] capitulos")

    try:
        # Extraigo el id del programa.    
        programa_id = scrapertools.get_match(item.url, '/(\d+)/')
        if (DEBUG): logger.info('ID:' + programa_id)

        # Solicito los capítulos del programa.
        data = scrapertools.cachePage(MAIN_URL + '/chapters/ajax/' + programa_id)
        if (DEBUG): logger.info('Json:' + data)
    
        objects = simplejson.loads(data, object_hook=to_utf8)
    
        itemlist = []
        for object in objects['chapters']:
            try:
                # Si el nombre del capítulo incluye el nombre del programa, extraigo sólo el nombre del capítulo.
                ititle = scrapertools.get_match(object['title'], '.*?: (.*)')
            except:
                ititle = object['title']
    
            # Añado el item del capítulo al listado.
            itemlist.append( Item(channel=CHANNELNAME, title=ititle, action="calidades", url=MAIN_URL+'/clip/'+object['id']+'/', thumbnail=item.thumbnail, folder=True ) )
    
        return itemlist
    except:
        # Si no existen capítulos para este programa entonces es un clip.
        return calidades(item)
コード例 #14
0
ファイル: boxeeboxclient.py プロジェクト: cold12/Scrobbee
 def handleResponse(self, data):
     self.log.debug("Received response.")
     json = simplejson.loads(data)
     if "error" in data:
         raise BoxeeClientException("Method %s failed: %s" % (json['data']['method'], json['data']['message']), "APIError")
     else:
         self.log.debug("No error found in response.")
         self.response = data
コード例 #15
0
 def get_association_data(self):
     name = self.AUTH_BACKEND_NAME + 'association_data'
     if name in self.request.session:
         association_data = simplejson.loads(self.request.session[name])
         del self.request.session[name]
     else:
         association_data = None
     return association_data
コード例 #16
0
 def user_data(self, access_token):
     """Return user data provided"""
     request = self.oauth_request(access_token, TWITTER_CHECK_AUTH)
     json = self.fetch_response(request)
     try:
         return simplejson.loads(json)
     except ValueError:
         return None
コード例 #17
0
ファイル: twitter.py プロジェクト: mfkasim1/gae-boilerplate
 def get_association_data(self):
     name = self.AUTH_BACKEND_NAME + 'association_data'
     if name in self.request.session:
         association_data = simplejson.loads(self.request.session[name])
         del self.request.session[name]
     else:
         association_data = None
     return association_data
コード例 #18
0
ファイル: twitter.py プロジェクト: mfkasim1/gae-boilerplate
 def user_data(self, access_token):
     """Return user data provided"""
     request = self.oauth_request(access_token, TWITTER_CHECK_AUTH)
     json = self.fetch_response(request)
     try:
         return simplejson.loads(json)
     except ValueError:
         return None
コード例 #19
0
ファイル: default.py プロジェクト: Xoz/xbmc-sickbeard
	def __init__(self, tvdbid = None):
		if tvdbid:
			return self.__show(tvdbid)
		
		f = urllib.urlopen("http://localhost:8081/api/shows/")
		shows = json.loads(f.read())
		for show in shows:
			xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows", str(show['tvdbid'])), xbmcgui.ListItem(show['name'], thumbnailImage="http://cache.thetvdb.com/banners/_cache/posters/" + str(show['tvdbid']) + "-1.jpg"), isFolder = True)
		xbmcplugin.endOfDirectory(int(sys.argv[1]))
コード例 #20
0
ファイル: prov_nzbx.py プロジェクト: wraslor/mylar
def searchit(cm):
    entries = []
    mres = {}

    if mylar.NZBX:
        provider = "nzbx"
        #stringsearch = str(cm) + "%20" + str(issue) + "%20" + str(year)
        searchURL = 'https://nzbx.co/api/search?cat=7030&q=' + str(cm)

        logger.fdebug(u'Parsing results from <a href="%s">nzbx.co</a>' %
                      searchURL)
        request = urllib2.Request(searchURL)
        request.add_header('User-Agent', str(mylar.USER_AGENT))
        opener = urllib2.build_opener()

        try:
            data = opener.open(request).read()
        except Exception, e:
            logger.warn('Error fetching data from nzbx.co : %s' % str(e))
            data = False
            return "no results"

        if data:

            d = json.loads(data)

            if not len(d):
                logger.info(u"No results found from nzbx.co")
                return "no results"

            else:
                for item in d:
                    try:
                        url = item['nzb']
                        title = item['name']
                        size = item['size']
                        nzbdate = datetime.datetime.fromtimestamp(
                            item['postdate'])
                        nzbage = abs((datetime.datetime.now() - nzbdate).days)
                        if nzbage <= int(mylar.USENET_RETENTION):
                            entries.append({
                                'title': str(title),
                                'link': str(url)
                            })
                            #logger.fdebug('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
                        else:
                            logger.fdebug(
                                '%s outside usenet retention: %s days.' %
                                (title, nzbage))

                        #resultlist.append((title, size, url, provider))
                        #logger.fdebug('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))

                    except Exception, e:
                        logger.error(
                            u"An unknown error occurred trying to parse the feed: %s"
                            % e)
コード例 #21
0
ファイル: default.py プロジェクト: Xoz/xbmc-sickbeard
	def __show(self, tvdbid):
		# List show information
		
		# List episodes
		f = urllib.urlopen("http://localhost:8081/api/shows/" + tvdbid + "/")
		show = json.loads(f.read())
		# TODO: deal with error
		for episode in show['episodes']:
			xbmcplugin.addDirectoryItem(int(sys.argv[1]), "", xbmcgui.ListItem(str(episode['season']) + "x" + str(episode['episode']) + " " + episode['name'], thumbnailImage="http://thetvdb.com/banners/_cache/episodes/" + str(tvdbid) + "/" + str(episode["tvdbid"]) + ".jpg"))
		xbmcplugin.endOfDirectory(int(sys.argv[1]))
コード例 #22
0
ファイル: springnote.py プロジェクト: jangxyz/me2chatnote
    def build_from_response(self, data): 
        self.raw = data
        # build proper object
        object_name = self.__class__.__name__.lower()
        self.resource = json.loads(data)[object_name]

        #
        self.process_resource(self.resource)

        return self.resource
コード例 #23
0
ファイル: me2day.py プロジェクト: jangxyz/me2chatnote
 def fetch_resource(url, params={}):
     url += '.json'
     if params:
         query = urllib.urlencode(params)
         url += "?" + urllib.unquote(query)
     # fetch from me2day
     data = urllib.urlopen(url).read()
     #data = Json.parse(data)
     data = json.loads(data)
     return OpenStruct.parse(data)
コード例 #24
0
def load_json(data):
    #logger.info("core.jsontools.load_json Probando simplejson en directorio lib")

    try:
        #logger.info("pelisalacarta.core.jsontools.load_json Probando simplejson en directorio lib")
        from lib import simplejson
        json_data = simplejson.loads(data, object_hook= to_utf8)
        logger.info("pelisalacarta.core.jsontools.load_json -> "+repr(json_data))
        return json_data
    except:
        logger.info(traceback.format_exc())

        try:
            logger.info("pelisalacarta.core.jsontools.load_json Probando simplejson incluido en el interprete")
            import simplejson
            json_data = simplejson.loads(data, object_hook=to_utf8)
            logger.info("pelisalacarta.core.jsontools.load_json -> "+repr(json_data))
            return json_data
        except:
            logger.info(traceback.format_exc())
            
            try:
                logger.info("pelisalacarta.core.jsontools.load_json Probando json incluido en el interprete")
                import json
                json_data = json.loads(data, object_hook=to_utf8)
                logger.info("pelisalacarta.core.jsontools.load_json -> "+repr(json_data))
                return json_data
            except:
                logger.info(traceback.format_exc())

                try:
                    logger.info("pelisalacarta.core.jsontools.load_json Probando JSON de Plex")
                    json_data = JSON.ObjectFromString(data, encoding="utf-8")
                    logger.info("pelisalacarta.core.jsontools.load_json -> "+repr(json_data))
                    return json_data
                except:
                    logger.info(traceback.format_exc())

    logger.info("pelisalacarta.core.jsontools.load_json No se ha encontrado un parser de JSON valido")
    logger.info("pelisalacarta.core.jsontools.load_json -> (nada)")
    return ""
コード例 #25
0
ファイル: springnote.py プロジェクト: jangxyz/me2chatnote
 def find_error(response):
     body = response.read()
     try:
         errors = json.loads(body)
     except ValueError:
         errors = body
     if response.status == 401:
         return SpringnoteError.Unauthorized(errors)
     elif response.status == 404:
         return SpringnoteError.NotFound(errors)
     else:
         return SpringnoteError.Base(errors)
コード例 #26
0
ファイル: default.py プロジェクト: Xoz/xbmc-sickbeard
	def __init__(self, tvdbid = None, root_directory = None, initial_status = None, season_folders = None, quality = None):
		if tvdbid == None:
			# Find the tvdbid for the show
			kb = xbmc.Keyboard('', 'Enter the show name', False)
			kb.doModal()
			if (kb.isConfirmed()):
				name = urllib.quote(kb.getText())
				f = urllib.urlopen("http://cache.thetvdb.com/api/GetSeries.php?seriesname="+name)
				data = f.read()
		
				dom = minidom.parseString(data)
				data = dom.getElementsByTagName("Data")[0]
				series = data.getElementsByTagName("Series")
				for curSeries in series:
					if len(curSeries.getElementsByTagName('FirstAired')) > 0:
						xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows","add", getText(curSeries.getElementsByTagName('seriesid')[0].childNodes)), xbmcgui.ListItem(getText(curSeries.getElementsByTagName('SeriesName')[0].childNodes) + " (" + getText(curSeries.getElementsByTagName('FirstAired')[0].childNodes) + ")", thumbnailImage="http://thetvdb.com/banners/_cache/posters/" + getText(curSeries.getElementsByTagName('seriesid')[0].childNodes) + "-1.jpg"), isFolder = True)
					else:
						xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows","add", getText(curSeries.getElementsByTagName('seriesid')[0].childNodes)), xbmcgui.ListItem(getText(curSeries.getElementsByTagName('SeriesName')[0].childNodes), thumbnailImage="http://thetvdb.com/banners/_cache/posters/" + getText(curSeries.getElementsByTagName('seriesid')[0].childNodes) + "-1.jpg"), isFolder = True)
				xbmcplugin.endOfDirectory(int(sys.argv[1]))
		elif root_directory == None:
			# Show is selected, got tvdbid - get root directory
			f = urllib.urlopen("http://localhost:8081/api/root_directories/")
			directories = json.loads(f.read())
	
			# Move default to the top of the list
			d = directories['directories'].pop(directories['default'])
			directories['directories'].insert(0, d)
		
			for directory in directories['directories']:
				xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows", "add", tvdbid, directory), xbmcgui.ListItem(directory), isFolder = True)
			xbmcplugin.endOfDirectory(int(sys.argv[1]))
		elif initial_status == None:
		    # Select the initial status of passed episodes
		    
		    # TODO: Being lazy here... make this work the same as on the web client
		    xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows", "add", tvdbid, root_directory, 5), xbmcgui.ListItem("Skipped"), isFolder = True)
		    xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows", "add", tvdbid, root_directory, 3), xbmcgui.ListItem("Wanted"), isFolder = True)
		    xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows", "add", tvdbid, root_directory, 6), xbmcgui.ListItem("Archived"), isFolder = True)
		    xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows", "add", tvdbid, root_directory, 7), xbmcgui.ListItem("Ignored"), isFolder = True)
		    xbmcplugin.endOfDirectory(int(sys.argv[1]))
		elif season_folders == None:
		    xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows", "add", tvdbid, root_directory, initial_status, False), xbmcgui.ListItem("Don't use season folders"), isFolder = True)
		    xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows", "add", tvdbid, root_directory, initial_status, True), xbmcgui.ListItem("Use season folders"), isFolder = True)
		    xbmcplugin.endOfDirectory(int(sys.argv[1]))
		elif quality == None:
		    # TODO: Again... lazy.... get these from the server
		    xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows", "add", tvdbid, root_directory, initial_status, initial_status, 3), xbmcgui.ListItem("Standard Definition"), isFolder = True)
		    xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows", "add", tvdbid, root_directory, initial_status, initial_status, 28), xbmcgui.ListItem("High Definition"), isFolder = True)
		    xbmcplugin.addDirectoryItem(int(sys.argv[1]), state("shows", "add", tvdbid, root_directory, initial_status, initial_status, 32799), xbmcgui.ListItem("Any"), isFolder = True)
		    xbmcplugin.endOfDirectory(int(sys.argv[1]))
		else:
		    # Done
		    pass
コード例 #27
0
ファイル: jiraInt.py プロジェクト: theguardian/JIRA-APPy
def bulkDeleteWorklogs(issue_list=None):
    status, msg = '', ''

    consumer, client = backend.stored_oauth()

    num_issues = 0
    num_worklogs = 0
    for issue in issue_list:
        num_issues += 1
        data_url = os.path.join(jiraappy.JIRA_BASE_URL,
                                'rest/api/2/issue/' + issue + '/worklog')
        try:
            resp, content = client.request(data_url, "GET")
            if resp['status'] != '200':
                logger.warn("Request for %s failed with status code %s - %s" (
                    data_url, resp['status'], content))
            else:
                resp_dict = json.loads(content)
                num_results = resp_dict['total']
                if num_results != 0:
                    for result in range(0, num_results):
                        worklog_id = resp_dict['worklogs'][result]['id']
                        data_url = os.path.join(
                            jiraappy.JIRA_BASE_URL,
                            'rest/api/2/issue/' + issue + '/worklog/' +
                            worklog_id + '?adjustEstimate=leave')
                        try:
                            resp, content = client.request(data_url, "DELETE")
                            if resp['status'] != '204':
                                logger.warn(
                                    "Request for %s failed with status code %s - %s" (
                                        data_url, resp['status'], content))
                            else:
                                num_worklogs += 1
                                logger.info(
                                    "Worklog ID %s for issue %s has been deleted"
                                    % (worklog_id, issue))
                        except:
                            logger.warn("Could not connect to %s" % data_url)
                    status, msg = backend.ajaxMSG(
                        'success',
                        '%d worklog(s) have been deleted for %d issues' %
                        (num_worklogs, num_issues))
                else:
                    logger.info("No worklogs found for issue %s" % issue)
        except:
            logger.warn("Could not connect to %s" % data_url)

    return status, msg
コード例 #28
0
 def searchEpisode(self, url_episode="", action=""):
     settings.log(message=url_episode)
     settings.notification(message="Buscando en linea... %s" % url_episode[url_episode.rfind("/") + 1:])
     response = browser.open(url_episode, urlencode({"action": action, "start": "0", "limit": "24", "elang": "ALL"}))
     if response.code == 200:
         data = json.loads(response.read())
         for item in data:
             self.url_list.append(
                 settings.url_address + '/serie/' + item['permalink'] + '/temporada-' + item[
                     'season'] + '/episodio-' + item['episode'])
             self.titles.append(
                 item['show']['title']['en'] + " S%sE%s" % (item['season'], item['episode']))  # get the title
     else:
         settings.log(">>>>>>>HTTP %s<<<<<<<" % response.code)
         settings.notification(message="HTTP %s" % response.code, force=True)
コード例 #29
0
ファイル: prov_nzbx.py プロジェクト: nncrypted/mylar
def searchit(cm):
    entries = []
    mres = {}

    if mylar.NZBX:
        provider = "nzbx"
        # stringsearch = str(cm) + "%20" + str(issue) + "%20" + str(year)
        searchURL = "https://nzbx.co/api/search?cat=7030&q=" + str(cm)

        logger.fdebug(u'Parsing results from <a href="%s">nzbx.co</a>' % searchURL)
        request = urllib2.Request(searchURL)
        request.add_header("User-Agent", str(mylar.USER_AGENT))
        opener = urllib2.build_opener()

        try:
            data = opener.open(request).read()
        except Exception, e:
            logger.warn("Error fetching data from nzbx.co : %s" % str(e))
            data = False
            return "no results"

        if data:

            d = json.loads(data)

            if not len(d):
                logger.info(u"No results found from nzbx.co")
                return "no results"

            else:
                for item in d:
                    try:
                        url = item["nzb"]
                        title = item["name"]
                        size = item["size"]
                        nzbdate = datetime.datetime.fromtimestamp(item["postdate"])
                        nzbage = abs((datetime.datetime.now() - nzbdate).days)
                        if nzbage <= int(mylar.USENET_RETENTION):
                            entries.append({"title": str(title), "link": str(url)})
                            # logger.fdebug('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
                        else:
                            logger.fdebug("%s outside usenet retention: %s days." % (title, nzbage))

                        # resultlist.append((title, size, url, provider))
                        # logger.fdebug('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))

                    except Exception, e:
                        logger.error(u"An unknown error occurred trying to parse the feed: %s" % e)
コード例 #30
0
ファイル: jiraInt.py プロジェクト: theguardian/JIRA-APPy
def reindex(action=None):
    status, msg = '', ''

    consumer, client = backend.stored_oauth()

    if action == 'POST':
        data_url = os.path.join(
            jiraappy.JIRA_BASE_URL,
            'rest/api/2/reindex?type=BACKGROUND&indexComments=true&indexChangeHistory=true'
        )
        try:
            resp, content = client.request(data_url, "POST")
            if resp['status'] != '202':
                status, msg = backend.ajaxMSG(
                    'failure',
                    'Call for JIRA Reindex failed with status code ' +
                    resp['status'])
            else:
                status, msg = backend.ajaxMSG('success',
                                              'JIRA is now reindexing')
        except:
            status, msg = backend.ajaxMSG('failure',
                                          'Could not connect to ' + data_url)

    elif action == 'GET':
        data_url = os.path.join(jiraappy.JIRA_BASE_URL, 'rest/api/2/reindex')
        try:
            resp, content = client.request(data_url, "GET")
            if resp['status'] != '200':
                status, msg = backend.ajaxMSG(
                    'failure',
                    'Call for JIRA Reindex status failed with status code ' +
                    resp['status'])
            else:
                resp_dict = json.loads(content)
                currentProgress = resp_dict['currentProgress']
                status, msg = backend.ajaxMSG(
                    'success',
                    'JIRA reindex is ' + str(currentProgress) + '% complete')
        except:
            status, msg = backend.ajaxMSG('failure',
                                          'Could not connect to ' + data_url)

    else:
        pass

    return status, msg
コード例 #31
0
ファイル: dailytvtorrents.py プロジェクト: gabon/Sick-Beard
    def _api_call(self, fnName, params=dict()):
        """
        Wrapper for simple json api call.
            
        @param fnName: string, something like '1.0/torrent.getInfo'
        @param params: dict of params, if any
        @return: mixed - returns json result as an object, or None on failure.
        """
        try:
            paramsEnc = urllib.urlencode(params)

            opener = urllib2.build_opener()
            opener.addheaders = [('User-Agent', USER_AGENT),
                                 ('Accept-Encoding', 'gzip,deflate')]

            usock = opener.open('http://api.dailytvtorrents.org/%s?%s' %
                                (fnName, paramsEnc))
            url = usock.geturl()
            encoding = usock.info().get("Content-Encoding")

            if encoding in ('gzip', 'x-gzip', 'deflate'):
                content = usock.read()
                if encoding == 'deflate':
                    data = StringIO.StringIO(zlib.decompress(content))
                else:
                    data = gzip.GzipFile(fileobj=StringIO.StringIO(content))
                result = data.read()

            else:
                result = usock.read()

            usock.close()

            if result:
                return json.loads(result)
            else:
                return None

        except urllib2.HTTPError, e:
            if e.code == 404:
                # for a 404, we fake an empty result
                return None
            logger.log(
                u"HTTP error " + str(e.code) +
                " while calling DailyTvTorrents api " + fnName, logger.ERROR)
            return None
コード例 #32
0
ファイル: prov_nzbx.py プロジェクト: Tension113/mylar
def searchit(cm):
        entries = []
        mres = {}

        if mylar.NZBX:
            provider = "nzbx"
            #stringsearch = str(cm) + "%20" + str(issue) + "%20" + str(year)
            searchURL = 'https://nzbx.co/api/search?cat=7030&q=' + str(cm)
                
            logger.fdebug(u'Parsing results from <a href="%s">nzbx.co</a>' % searchURL)
            request = urllib2.Request(searchURL)
            request.add_header('User-Agent', str(mylar.USER_AGENT))
            opener = urllib2.build_opener()

            try:
                data = opener.open(request).read()
            except Exception, e:
                logger.warn('Error fetching data from nzbx.co : %s' % str(e))
                data = False
                return "no results"
           
            if data:
                
                d = json.loads(data)
                
                if not len(d):
                    logger.info(u"No results found from nzbx.co")
                    return "no results"
                
                else:
                    for item in d:
                        try:
                            url = item['nzb']
                            title = item['name']
                            size = item['size']
                            
                            entries.append({
                                    'title':   str(title),
                                    'link':    str(url)
                                    })
                            #resultlist.append((title, size, url, provider))
                            logger.fdebug('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
                            
                        except Exception, e:
                            logger.error(u"An unknown error occurred trying to parse the feed: %s" % e)
コード例 #33
0
ファイル: backend.py プロジェクト: theguardian/JIRA-APPy
def check_oauth():
    consumer = oauth.Consumer(jiraappy.CONSUMER_KEY, jiraappy.CONSUMER_SECRET)
    accessToken = oauth.Token(jiraappy.JIRA_OAUTH_TOKEN, jiraappy.JIRA_OAUTH_SECRET)
    client = oauth.Client(consumer, accessToken)
    client.set_signature_method(SignatureMethod_RSA_SHA1())

    data_url = os.path.join(jiraappy.JIRA_BASE_URL, 'rest/api/2/myself')

    resp, content = client.request(data_url, "GET")
    if resp['status'] != '200':
        jiraappy.JIRA_LOGIN_STATUS = None
        jiraappy.JIRA_LOGIN_USER = None
        logger.warn("OAuth credentials missing or invalid")
    else:
        resp_dict = json.loads(content)
        jiraappy.JIRA_LOGIN_STATUS = True
        jiraappy.JIRA_LOGIN_USER = resp_dict['name']
        logger.info("JIRA user %s verified login" % resp_dict['name'])
コード例 #34
0
    def _notifyTrakt(self, method, api, username, password, data={}):
        logger.log("trakt_notifier: Call method " + method, logger.DEBUG)

        json._toggle_speedups(False)

        if not api:
            api = self._api()
        if not username:
            username = self._username()
        if not password:
            password = self._password()
        password = sha1(password).hexdigest()

        method = method.replace("%API%", api)

        data["username"] = username
        data["password"] = password

        encoded_data = json.dumps(data)

        try:
            logger.log(
                "trakt_notifier: Calling method http://api.trakt.tv/" +
                method + ", with data" + encoded_data, logger.DEBUG)
            stream = urllib2.urlopen("http://api.trakt.tv/" + method,
                                     encoded_data)
            resp = stream.read()

            resp = json.loads(resp)

            if ("error" in resp):
                raise Exception(resp["error"])
        except (IOError, json.JSONDecodeError):
            logger.log("trakt_notifier: Failed calling method", logger.ERROR)
            return False

        if (resp["status"] == "success"):
            logger.log(
                "trakt_notifier: Succeeded calling method. Result: " +
                resp["message"], logger.DEBUG)
            return True

        logger.log("trakt_notifier: Failed calling method", logger.ERROR)
        return False
コード例 #35
0
ファイル: boxeeboxclient.py プロジェクト: cold12/Scrobbee
 def readResponse(self):
     self.log.debug("Reading response...")
     data = ""
     while True:
         chunk = self.socket.recv(1024)
         if not chunk:
             break
         data += chunk
         if data.find("\n") >= 0:
             break            
     self.log.debug("Decoding response...")
     self.log.debug(str(data))
     json = simplejson.loads(data)
     if json.has_key("error"):
         raise BoxeeClientException("Found error code %i in response: %s" % (json['error']['code'], json['error']['message']), "APIError")
     else:
         self.log.debug("No error found in response.")
         self.log.debug("Response: %s" % str(json))
         return json
コード例 #36
0
ファイル: dailytvtorrents.py プロジェクト: gabon/Sick-Beard
 def _api_call(self, fnName, params = dict()):
     """
     Wrapper for simple json api call.
         
     @param fnName: string, something like '1.0/torrent.getInfo'
     @param params: dict of params, if any
     @return: mixed - returns json result as an object, or None on failure.
     """
     try:
         paramsEnc = urllib.urlencode(params)
         
         opener = urllib2.build_opener()
         opener.addheaders = [('User-Agent', USER_AGENT), ('Accept-Encoding', 'gzip,deflate')]
         
         usock = opener.open('http://api.dailytvtorrents.org/%s?%s' % (fnName, paramsEnc))
         url = usock.geturl()
         encoding = usock.info().get("Content-Encoding")
 
         if encoding in ('gzip', 'x-gzip', 'deflate'):
             content = usock.read()
             if encoding == 'deflate':
                 data = StringIO.StringIO(zlib.decompress(content))
             else:
                 data = gzip.GzipFile(fileobj=StringIO.StringIO(content))
             result = data.read()
 
         else:
             result = usock.read()
 
         usock.close()
         
         if result:
             return json.loads(result)
         else:
             return None   
 
     except urllib2.HTTPError, e:
         if e.code == 404:
             # for a 404, we fake an empty result
             return None
         logger.log(u"HTTP error " + str(e.code) + " while calling DailyTvTorrents api " + fnName, logger.ERROR)
         return None
コード例 #37
0
ファイル: addon.py プロジェクト: frankc916/anonymous-repo
def load_json(data):
	def to_utf8(dct):
		rdct = {}
		for k, v in dct.items() :
			if isinstance(v, (str, unicode)): rdct[k] = v.encode('utf8', 'ignore')
			else: rdct[k] = v
		return rdct
	try :        
		from lib import simplejson
		json_data = simplejson.loads(data, object_hook=to_utf8)
		return json_data
	except:
		try:
			import json
			json_data = json.loads(data, object_hook=to_utf8)
			return json_data
		except:
			import sys
			for line in sys.exc_info(): print "%s" % line
	return None
コード例 #38
0
ファイル: addon.py プロジェクト: georgdimitrov/anonymous-repo
def load_json(data):
	def to_utf8(dct):
		rdct = {}
		for k, v in dct.items() :
			if isinstance(v, (str, unicode)): rdct[k] = v.encode('utf8', 'ignore')
			else: rdct[k] = v
		return rdct
	try :        
		from lib import simplejson
		json_data = simplejson.loads(data, object_hook=to_utf8)
		return json_data
	except:
		try:
			import json
			json_data = json.loads(data, object_hook=to_utf8)
			return json_data
		except:
			import sys
			for line in sys.exc_info(): print "%s" % line
	return None
コード例 #39
0
ファイル: trakt.py プロジェクト: BigCabbage/Sick-Beard
    def _notifyTrakt(self, method, api, username, password, data = {}):
        logger.log("trakt_notifier: Call method " + method, logger.DEBUG)

        json._toggle_speedups(False)

        if not api:
            api = self._api()
        if not username:
            username = self._username()
        if not password:
            password = self._password()
        password = sha1(password).hexdigest()

        method = method.replace("%API%", api)

        data["username"] = username
        data["password"] = password

        encoded_data = json.dumps(data);

        try:
            logger.log("trakt_notifier: Calling method http://api.trakt.tv/" + method + ", with data" + encoded_data, logger.DEBUG)
            stream = urllib2.urlopen("http://api.trakt.tv/" + method, encoded_data)
            resp = stream.read()

            resp = json.loads(resp)
            
            if ("error" in resp):
                raise Exception(resp["error"])
        except (IOError, json.JSONDecodeError):
            logger.log("trakt_notifier: Failed calling method", logger.ERROR)
            return False

        if (resp["status"] == "success"):
            logger.log("trakt_notifier: Succeeded calling method. Result: " + resp["message"], logger.DEBUG)
            return True

        logger.log("trakt_notifier: Failed calling method", logger.ERROR)
        return False
コード例 #40
0
def searchit(cm):
    entries = []
    mres = {}

    if mylar.NZBX:
        provider = "nzbx"
        # stringsearch = str(cm) + "%20" + str(issue) + "%20" + str(year)
        searchURL = "https://nzbx.co/api/search?cat=7030&q=" + str(cm)

        logger.fdebug(u'Parsing results from <a href="%s">nzbx.co</a>' % searchURL)

        try:
            data = urllib2.urlopen(searchURL, timeout=20).read()
        except urllib2.URLError, e:
            logger.fdebug("Error fetching data from nzbx.co: %s" % str(e))
            data = False
            return "no results"

        if data:

            d = json.loads(data)

            if not len(d):
                logger.info(u"No results found from nzbx.co")
                return "no results"

            else:
                for item in d:
                    try:
                        url = item["nzb"]
                        title = item["name"]
                        size = item["size"]

                        entries.append({"title": str(title), "link": str(url)})
                        # resultlist.append((title, size, url, provider))
                        logger.fdebug("Found %s. Size: %s" % (title, helpers.bytes_to_mb(size)))

                    except Exception, e:
                        logger.error(u"An unknown error occurred trying to parse the feed: %s" % e)
コード例 #41
0
ファイル: a3media.py プロジェクト: Medisan/TVWeb
def load_json(data):
    # callback to transform json string values to utf8
    def to_utf8(dct):
        rdct = {}
        for k, v in dct.items() :
            if isinstance(v, (str, unicode)) :
                rdct[k] = v.encode('utf8', 'ignore')
            else :
                rdct[k] = v
        return rdct
    try :        
        from lib import simplejson
        json_data = simplejson.loads(data, object_hook=to_utf8)
        return json_data
    except:
        try:
            import json
            json_data = json.loads(data, object_hook=to_utf8)
            return json_data
        except:
            import sys
            for line in sys.exc_info():
                logger.error("%s" % line)
コード例 #42
0
ファイル: utorrent.py プロジェクト: achmetinternet/Sick-Beard
        logger.log(u"Unable to connect to uTorrent, Please check your config", logger.ERROR)
        return False    
    
    try:
        open_request = urllib2.urlopen(host+"token.html")
        token = re.findall("<div.*?>(.*?)</", open_request.read())[0]
    except:
        logger.log(u"Unable to get uTorrent Token "+ex(e), logger.ERROR)
        return False            
    # obtained the token
    
    try:
        #get the list of current torrents
        list_url = "%s?list=1&token=%s" % (host, token)
        open_request = urllib2.urlopen(list_url)
        torrent_list = json.loads(open_request.read())["torrents"]

        add_url = "%s?action=add-url&token=%s&s=%s" % (host, token, urllib.quote_plus(result.url))
        if result.provider.token:
            add_url = add_url + ":COOKIE:" + result.provider.token
        
        logger.log(u"Calling uTorrent with url: "+add_url,logger.DEBUG)

        open_request = urllib2.urlopen(add_url)
        
        #add label and/or pause torrent
        if sickbeard.TORRENT_PATH or sickbeard.TORRENT_PAUSED:
            start_time = time.time()
            new_torrent_hash = ""
            while (True):
                time.sleep(1)
コード例 #43
0
ファイル: transmission.py プロジェクト: lindhor/headphones
    opener = urllib2.build_opener()
    try:
        data = opener.open(request).read()
    except urllib2.HTTPError, e:
        if e.code == 409:
            sessionid = e.hdrs['x-transmission-session-id']
        else:
            logger.error('Could not connect to Transmission. Error: ' + str(e))
    except Exception, e:
        logger.error('Could not connect to Transmission. Error: ' + str(e))

    if not sessionid:
        logger.error("Error getting Session ID from Transmission")
        return

    request.add_header('x-transmission-session-id', sessionid)

    postdata = json.dumps({'method': method, 'arguments': arguments})

    request.add_data(postdata)

    try:
        #logger.debug(u"Req: %s" % postdata)
        response = json.loads(opener.open(request).read())
        #logger.debug(u"Rsp: %s" % response)
    except Exception, e:
        logger.error("Error sending torrent to Transmission: " + str(e))
        return

    return response
コード例 #44
0
ファイル: blutopia.py プロジェクト: jihoon720/SickGear
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
            'info': 'torrents',
            'get': '(.*?download)(?:_check)?(.*)'
        }.items())
        log = ''
        if self.filter:
            non_marked = 'f0' in self.filter
            # if search_any, use unselected to exclude, else use selected to keep
            filters = ([f for f in self.may_filter if f in self.filter],
                       [f for f in self.may_filter
                        if f not in self.filter])[non_marked]
            filters += (
                ((all([x in filters for x in 'free', 'double', 'feat'])
                  and ['freedoublefeat'] or []) +
                 (all([x in filters
                       for x in 'free', 'double']) and ['freedouble'] or []) +
                 (all([x in filters
                       for x in 'feat', 'double']) and ['featdouble'] or [])),
                ((not all([x not in filters for x in 'free', 'double', 'feat'])
                  and ['freedoublefeat'] or []) +
                 (not all([x not in filters
                           for x in 'free', 'double']) and ['freedouble']
                  or []) +
                 (not all([x not in filters
                           for x in 'feat', 'double']) and ['featdouble']
                  or [])))[non_marked]
            rc['filter'] = re.compile('(?i)^(%s)$' % '|'.join([
                '%s' % f for f in filters
                if (f in self.may_filter and self.may_filter[f][1]) or f
            ]))
            log = '%sing (%s) ' % (('keep', 'skipp')[non_marked], ', '.join([
                f in self.may_filter and self.may_filter[f][0] or f
                for f in filters
            ]))
        for mode in search_params.keys():
            if mode in ['Season', 'Episode']:
                show_type = self.show.air_by_date and 'Air By Date' \
                    or self.show.is_sports and 'Sports' or None
                if show_type:
                    logger.log(
                        u'Provider does not carry shows of type: [%s], skipping'
                        % show_type, logger.DEBUG)
                    return results

            for search_string in search_params[mode]:
                search_string = isinstance(
                    search_string,
                    unicode) and unidecode(search_string) or search_string
                search_url = self.urls['search'] % (self.token, '+'.join(
                    search_string.split()), self._categories_string(
                        mode, ''), '', '', '')

                resp = self.get_url(search_url)
                if self.should_skip():
                    return results

                resp_json = None
                if None is not self.resp:
                    try:
                        from lib import simplejson as json
                        resp_json = json.loads(resp)
                    except (StandardError, Exception):
                        pass

                cnt = len(items[mode])
                try:
                    if not resp or (resp_json and not resp_json.get('rows')):
                        raise generic.HaltParseException

                    html = '<html><body>%s</body></html>' % \
                           (resp if None is self.resp else
                            self.resp.replace('</tbody>', '%s</tbody>' % ''.join(resp_json.get('result', []))))
                    with BS4Parser(html, features=['html5lib',
                                                   'permissive']) as soup:
                        torrent_table = soup.find('table', class_='table')
                        torrent_rows = [] if not torrent_table else torrent_table.find_all(
                            'tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in torrent_rows[1:]:
                            cells = tr.find_all('td')
                            if 5 > len(cells):
                                continue
                            if any(self.filter):
                                marked = ','.join([
                                    x.attrs.get('data-original-title',
                                                '').lower()
                                    for x in tr.find_all(
                                        'i',
                                        attrs={
                                            'class': [
                                                'text-gold', 'fa-diamond',
                                                'fa-certificate'
                                            ]
                                        })
                                ])
                                # noinspection PyTypeChecker
                                munged = ''.join(
                                    filter(marked.__contains__,
                                           ['free', 'double', 'feat']))
                                if ((non_marked
                                     and rc['filter'].search(munged)) or
                                    (not non_marked
                                     and not rc['filter'].search(munged))):
                                    continue
                            try:
                                head = head if None is not head else self._header_row(
                                    tr, {
                                        'seed': r'circle-up',
                                        'leech': r'circle-down',
                                        'size': r'fa-file'
                                    })
                                seeders, leechers, size = [
                                    tryInt(n, n) for n in [
                                        cells[head[x]].get_text().strip()
                                        for x in 'seed', 'leech', 'size'
                                    ]
                                ]
                                if self._reject_item(seeders, leechers):
                                    continue

                                title = tr.find('a', href=rc['info'])
                                title = title.get_text().strip(
                                ) if None is self.resp else title[
                                    'data-original-title']
                                download_url = self._link(''.join(
                                    rc['get'].findall(
                                        tr.find('a',
                                                href=rc['get'])['href'])[0]))
                            except (AttributeError, TypeError, ValueError,
                                    IndexError):
                                continue

                            if title and download_url:
                                items[mode].append(
                                    (title, download_url, seeders,
                                     self._bytesizer(size)))
コード例 #45
0
def play(item):
    logger.info("[rtve.py] play")

    # Extrae el código
    #http://www.rtve.es/mediateca/videos/20100410/telediario-edicion/741525.shtml
    #http://www.rtve.es/alacarta/videos/espana-entre-el-cielo-y-la-tierra/espana-entre-el-cielo-y-la-tierra-la-mancha-por-los-siglos-de-los-siglos/232969/
    logger.info("url=" + item.url)
    patron = 'http://.*?/([0-9]+)/'
    data = item.url.replace(".shtml", "/")
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    codigo = matches[0]
    logger.info("assetid=" + codigo)

    thumbnail = item.thumbnail

    ##### Nuevo metodo Octubre 2012
    #### Descargamos imagen con metadatos
    #### http://www.rtve.es/ztnr/movil/thumbnail/mandulis/videos/1538906.png
    #### direccion manager: http://www.rtve.es/odin/loki/TW96aWxsYS81LjAgKExpbnV4OyBVOyBBbmRyb2lkIDQuMC4zOyBlcy1lczsgTlZTQkwgVk9SVEVYIEJ1aWxkL0lNTDc0SykgQXBwbGVXZWJLaXQvNTM0LjMwIChLSFRNTCwgbGlrZSBHZWNrbykgVmVyc2lvbi80LjAgTW9iaWxlIFNhZmFyaS81MzQuMzA=/
    #urlimg = 'http://www.rtve.es/ztnr/movil/thumbnail/mandulis/videos/'+codigo+'.png'

    try:
        from lib import simplejson
        data = scrapertools.cachePage(
            "http://www.rtve.es/odin/loki/TW96aWxsYS81LjAgKExpbnV4OyBVOyBBbmRyb2lkIDQuMC4zOyBlcy1lczsgTlZTQkwgVk9SVEVYIEJ1aWxkL0lNTDc0SykgQXBwbGVXZWJLaXQvNTM0LjMwIChLSFRNTCwgbGlrZSBHZWNrbykgVmVyc2lvbi80LjAgTW9iaWxlIFNhZmFyaS81MzQuMzA=/"
        )
        json_data = simplejson.loads(data)
        manager = json_data["manager"]
    except:
        manager = "mandulis"

    urlimg = 'http://www.rtve.es/ztnr/movil/thumbnail/' + manager + '/videos/' + codigo + '.png'
    data = scrapertools.cachePage(urlimg)  ### descarga png con metadatos
    data = data.decode("base-64")  ### decodifica en base64
    patron = 'tEXt([^#]+)#'
    matches = re.compile(patron,
                         re.DOTALL).findall(data)  ## extrae el texto ofuscado
    try:
        cyphertext = matches[0]
    except:
        cyphertext = ""
    try:
        key = data.split('#')[1]
        key = key[1:270]  ## extrae la clave
        clave = ""
        for x in key:
            if x.isdigit():
                clave = clave + x
            else:
                continue
    except:
        clave = ""

    try:
        intermediate_cyphertext = first_pass(
            cyphertext)  ## primer paso: extrae el texto intermediario
        url = second_pass(
            clave, intermediate_cyphertext)  ## segundo paso: decodifica la url
    except:
        url = ""
    #################################################################################

    if url == "":
        try:
            # Compone la URL
            #http://www.rtve.es/api/videos/1311573/config/alacarta_videos.xml
            url = 'http://www.rtve.es/api/videos/' + codigo + '/config/alacarta_videos.xml'
            logger.info("[rtve.py] url=" + url)
            # Descarga el XML y busca el DataId
            #<cdnAssetDataId>828164</cdnAssetDataId>
            data = scrapertools.cachePage(url)
            patron = '<cdnAssetDataId>([^<]+)</cdnAssetDataId>'
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            url = ""
            if len(matches) > 0:
                codigo = matches[0]
            else:
                codigo = ""
            logger.info("assetDataId=" + codigo)
            if codigo != "":
                #url = http://www.rtve.es/ztnr/preset.jsp?idpreset=828164&lenguaje=es&tipo=video
                url = 'http://www.rtve.es/ztnr/preset.jsp?idpreset=' + codigo + '&lenguaje=es&tipo=video'
                data = scrapertools.cachePage(url)
                # Busca la url del video
                # <li><em>File Name</em>&nbsp;<span class="titulo">mp4/4/8/1328228115384.mp4</span></li>
                patron = '<li><em>File Name</em>.*?"titulo">([^<]+)</span></li>'
                matches = re.compile(patron, re.DOTALL).findall(data)
                scrapertools.printMatches(matches)
                if len(matches) > 0:
                    # match = mp4/4/8/1328228115384.mp4
                    #http://www.rtve.es/resources/TE_NGVA/mp4/4/8/1328228115384.mp4
                    url = "http://www.rtve.es/resources/TE_NGVA/" + matches[0]
                else:
                    url = ""

        except:
            url = ""

    if url == "":
        try:
            # Compone la URL
            #http://www.rtve.es/swf/data/es/videos/alacarta/5/2/5/1/741525.xml
            url = 'http://www.rtve.es/swf/data/es/videos/alacarta/' + codigo[
                -1:] + '/' + codigo[-2:-1] + '/' + codigo[
                    -3:-2] + '/' + codigo[-4:-3] + '/' + codigo + '.xml'
            logger.info("[rtve.py] url=" + url)

            # Descarga el XML y busca el vídeo
            #<file>rtmp://stream.rtve.es/stream/resources/alacarta/flv/6/9/1270911975696.flv</file>
            data = scrapertools.cachePage(url)
            #print url
            #print data
            patron = '<file>([^<]+)</file>'
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            if len(matches) > 0:
                #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
                url = matches[0]
            else:
                url = ""

            patron = ''
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            #print len(matches)
            #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
            thumbnail = matches[0]
        except:
            url = ""

    # Hace un segundo intento
    if url == "":
        try:
            # Compone la URL
            #http://www.rtve.es/swf/data/es/videos/video/0/5/8/0/500850.xml
            url = 'http://www.rtve.es/swf/data/es/videos/video/' + codigo[
                -1:] + '/' + codigo[-2:-1] + '/' + codigo[
                    -3:-2] + '/' + codigo[-4:-3] + '/' + codigo + '.xml'
            logger.info("[rtve.py] url=" + url)

            # Descarga el XML y busca el vídeo
            #<file>rtmp://stream.rtve.es/stream/resources/alacarta/flv/6/9/1270911975696.flv</file>
            data = scrapertools.cachePage(url)
            patron = '<file>([^<]+)</file>'
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
            url = matches[0]
        except:
            url = ""

    if url == "":

        try:
            # Compone la URL
            #http://www.rtve.es/swf/data/es/videos/video/0/5/8/0/500850.xml
            url = 'http://www.rtve.es/swf/data/es/videos/video/' + codigo[
                -1:] + '/' + codigo[-2:-1] + '/' + codigo[
                    -3:-2] + '/' + codigo[-4:-3] + '/' + codigo + '.xml'
            logger.info("[rtve.py] url=" + url)

            # Descarga el XML y busca el assetDataId
            #<plugin ... assetDataId::576596"/>
            data = scrapertools.cachePage(url)
            #logger.info("[rtve.py] data="+data)
            patron = 'assetDataId\:\:([^"]+)"'
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
            codigo = matches[0]
            logger.info("assetDataId=" + codigo)

            #url = http://www.rtve.es/scd/CONTENTS/ASSET_DATA_VIDEO/6/9/5/6/ASSET_DATA_VIDEO-576596.xml
            url = 'http://www.rtve.es/scd/CONTENTS/ASSET_DATA_VIDEO/' + codigo[
                -1:] + '/' + codigo[-2:-1] + '/' + codigo[-3:-2] + '/' + codigo[
                    -4:-3] + '/ASSET_DATA_VIDEO-' + codigo + '.xml'
            logger.info("[rtve.py] url=" + url)

            data = scrapertools.cachePage(url)
            #logger.info("[rtve.py] data="+data)
            patron = '<field>[^<]+'
            patron += '<key>ASD_FILE</key>[^<]+'
            patron += '<value>([^<]+)</value>[^<]+'
            patron += '</field>'
            matches = re.compile(patron, re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            codigo = matches[0]
            logger.info("[rtve.py] url=" + url)

            #/deliverty/demo/resources/mp4/4/3/1290960871834.mp4
            #http://media4.rtve.es/deliverty/demo/resources/mp4/4/3/1290960871834.mp4
            #http://www.rtve.es/resources/TE_NGVA/mp4/4/3/1290960871834.mp4
            url = "http://www.rtve.es/resources/TE_NGVA" + codigo[-26:]

        except:
            url = ""
    logger.info("[rtve.py] url=" + url)

    itemlist = []
    if url == "":
        logger.info("[rtve.py] Extrayendo URL tipo iPad")
        headers = []
        headers.append([
            "User-Agent",
            "Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10"
        ])
        location = scrapertools.get_header_from_response(
            item.url, headers=headers, header_to_get="location")
        logger.info("[rtve.py] location=" + location)

        data = scrapertools.cache_page(location, headers=headers)
        #<a href="/usuarios/sharesend.shtml?urlContent=/resources/TE_SREP63/mp4/4/8/1334334549284.mp4" target
        url = scrapertools.get_match(
            data,
            '<a href="/usuarios/sharesend.shtml\?urlContent\=([^"]+)" target')
        logger.info("[rtve.py] url=" + url)
        #http://www.rtve.es/resources/TE_NGVA/mp4/4/8/1334334549284.mp4
        url = urlparse.urljoin("http://www.rtve.es", url)
        logger.info("[rtve.py] url=" + url)

    if url != "":
        itemlist.append(
            Item(channel=CHANNELNAME,
                 title=item.title,
                 action="play",
                 url=url,
                 thumbnail=thumbnail,
                 plot=item.plot,
                 server="directo",
                 show=item.title,
                 folder=False))

    return itemlist
コード例 #46
0
ファイル: jsontools.py プロジェクト: Jpocas3212/salva59sg
def load_json(data):
    logger.info(
        "core.jsontools.load_json Probando simplejson en directorio lib")

    def to_utf8(dct):
        if isinstance(dct, dict):
            return dict((to_utf8(key), to_utf8(value))
                        for key, value in dct.iteritems())
        elif isinstance(dct, list):
            return [to_utf8(element) for element in dct]
        elif isinstance(dct, unicode):
            return dct.encode('utf-8')
        else:
            return dct

    try:
        logger.info(
            "core.jsontools.load_json Probando simplejson en directorio lib")
        from lib import simplejson
        json_data = simplejson.loads(data, object_hook=to_utf8)
        logger.info("core.jsontools.load_json -> " + repr(json_data))
        return json_data
    except:
        logger.info(traceback.format_exc())

        try:
            logger.info(
                "core.jsontools.load_json Probando simplejson incluido en el interprete"
            )
            import simplejson
            json_data = simplejson.loads(data, object_hook=to_utf8)
            logger.info("core.jsontools.load_json -> " + repr(json_data))
            return json_data
        except:
            logger.info(traceback.format_exc())

            try:
                logger.info(
                    "core.jsontools.load_json Probando json incluido en el interprete"
                )
                import json
                json_data = json.loads(data, object_hook=to_utf8)
                logger.info("core.jsontools.load_json -> " + repr(json_data))
                return json_data
            except:
                logger.info(traceback.format_exc())

                try:
                    logger.info(
                        "core.jsontools.load_json Probando JSON de Plex")
                    json_data = JSON.ObjectFromString(data, encoding="utf-8")
                    logger.info("core.jsontools.load_json -> " +
                                repr(json_data))
                    return json_data
                except:
                    logger.info(traceback.format_exc())

    logger.info(
        "core.jsontools.load_json No se ha encontrado un parser de JSON valido"
    )
    logger.info("core.jsontools.load_json -> (nada)")
    return ""