Example #1
0
    def execute(self):
        # Parse URL
        password = None
        portnum = None
        if self.url.startswith('ssh:'):
            m = self.SSH_FORMAT.match(self.url)
            if m is None:
                raise ModuleError(self.module,
                                  "SSH error: invalid URL %r" % self.url)
            username, password, hostname, portnum, path = m.groups()
            password = urllib.unquote_plus(password)
            path = urllib.unquote_plus(path)
        elif self.url.startswith('scp:'):
            m = self.SCP_FORMAT.match(self.url)
            if m is None:
                raise ModuleError(self.module,
                                  "SSH error: invalid URL %r" % self.url)
            username, hostname, path = m.groups()
        else:
            raise ModuleError(self.module, "SSHDownloader: Invalid URL")

        if portnum is None:
            portnum = 22
        else:
            portnum = int(portnum)
        return self._open_ssh(username, password, hostname, portnum, path)
Example #2
0
def get_subscriptions():
    try:
        content = read_from_file(SUBSCRIPTION_FILE)
        lines = content.split('\n')
        
        for line in lines:
            data = line.split('\t')
            if len(data) == 2:
                if data[1].startswith('tt'):
                    tv_show_name = data[0]
                    tv_show_imdb = data[1]
                    tv_show_mode = "strm tv show dialog"
                    create_tv_show_strm_files(tv_show_name, tv_show_imdb, tv_show_mode, TV_SHOWS_PATH)
                else:
                    mode = data[1]
                    items = get_menu_items(name, mode, "", "")
                    
                    for (url, li, isFolder) in items:
                        paramstring = url.replace(sys.argv[0], '')
                        params = get_params(paramstring)
                        movie_name = urllib.unquote_plus(params["name"])
                        movie_data = urllib.unquote_plus(params["name"])
                        movie_imdb = urllib.unquote_plus(params["imdb_id"])
                        movie_mode = "strm movie dialog"
                        create_strm_file(movie_name, movie_data, movie_imdb, movie_mode, MOVIES_PATH)
                    
    except:
        xbmc.log("[What the Furk] Failed to fetch subscription")
Example #3
0
def processWithRE(qs):
	# return dict(map(remapGET, re.findall("&?([^&=]+)=?([^&]*)", qs)))
	r = {}
	for item in detectRE(qs):  # An iterator!
		if not item[1:]:
			item = list(item)
			item[1] = None
		key, val = item
		if "[" in key:
			brackets = splitBrackets(key)
			# It's a Array, and it's recursive
			brackets.insert(0, key.split("[")[0])
			children = r  # Children is just a pointer to r
			c = 0  # Initialize at zero
			l = len(brackets) - 1  # Length-1 to detect end
			for key_child in brackets:
				if not key_child and c > 0:
					key_child = str(len(children))
				children[key_child] = children.get(key_child, {})
				if c != l:
					children = children[key_child]  # Replaces the pointer			
				else: 
					children[key_child] = urllib.unquote_plus(val)  # set the value
				c += 1
		else:
			# It's not a array \o/
			r[key] = urllib.unquote_plus(val)
	return convertToList(r)
Example #4
0
def get_clean_body(response):
    """
    :see: BlindSqliResponseDiff.get_clean_body()

    Definition of clean in this method:
        - input:
            - response.get_url() == http://host.tld/aaaaaaa/
            - response.get_body() == 'spam aaaaaaa eggs'

        - output:
            - self._clean_body( response ) == 'spam  eggs'

    The same works with file names.
    All of them, are removed encoded and "as is".

    :param response: The HTTPResponse object to clean
    :return: A string that represents the "cleaned" response body of the
             response.
    """
    body = response.body

    if response.is_text_or_html():
        url = response.get_url()
        to_replace = url.url_string.split('/')
        to_replace.append(url.url_string)

        for repl in to_replace:
            if len(repl) > 6:
                body = body.replace(repl, '')
                body = body.replace(urllib.unquote_plus(repl), '')
                body = body.replace(cgi.escape(repl), '')
                body = body.replace(cgi.escape(urllib.unquote_plus(repl)), '')

    return body
Example #5
0
	def __init__(self):
		global action
		params = {}
		splitparams = sys.argv[2][sys.argv[2].find('?') + 1:].split('&')
		for param in splitparams:
			if (len(param) > 0):
				splitparam = param.split('=')
				key = splitparam[0]
				try:    value = splitparam[1].encode("utf-8")
				except: value = splitparam[1]
				params[key] = value

		try:        action = urllib.unquote_plus(params["action"])
		except:     action = None
		try:        categoryid = urllib.unquote_plus(params["categoryid"])
		except:     categoryid = None
		try:        offset = urllib.unquote_plus(params["offset"])
		except:     offset = 0
		try:        movie_id = urllib.unquote_plus(params["movie_id"])
		except:     movie_id = 0
		try:        episode = urllib.unquote_plus(params["episode"])
		except:     episode = 0


		if action == None:                            self.main_menu()
		elif action == 'list_movies':                 self.list_movies(categoryid, offset)
		elif action == 'play_movie':                  self.play_movie(movie_id, episode)
		elif action == 'list_seasons':                self.list_seasons(movie_id)
		elif action == 'list_episodes':               self.list_episodes(movie_id)
		elif action == 'Search':					  self.Search()
Example #6
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            video_id = re.compile('video_id *= *[\'|\"](.+?)[\'|\"]').findall(result)[0]
            post = urllib.urlencode({'video_id': video_id})

            result = client.source(urlparse.urljoin(self.base_link, self.info_link), post=post)

            u = [i for i in result.split('&') if 'google' in i][0]
            u = urllib.unquote_plus(u)
            u = [urllib.unquote_plus(i.split('|')[-1]) for i in u.split(',')]
            u = [googleplus.tag(i)[0] for i in u]
            u = [i for i in u if i['quality'] in ['1080p', 'HD']]

            for i in u: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'Afdah', 'url': i['url']})

            return sources
        except:
            return sources
Example #7
0
def processWithMap(qs):
	r = {}
	for item in qs.split("&"):
		item = item.split("=", 1)
		if not item[1:]:
			item[1] = None
		key, val = item
		if "[" in key:
			brackets = key.split("[")
			# It's a Array, and it's recursive
			children = r  # Children is just a pointer to r
			c = 0  # Initialize at zero
			l = len(brackets) - 1  # Length-1 to detect end
			for bracket in brackets:
				key_child = bracket.split("]")[0]
				if not key_child and c > 0:
					key_child = str(len(children))
				children[key_child] = children.get(key_child, {})
				if c == l:
					children[key_child] = urllib.unquote_plus(val)
				else:
					children = children[key_child]  # Replaces the pointer
				c += 1
		else:
			# It's not a array \o/
			r[key] = urllib.unquote_plus(val)
	return convertToList(r)
def detail(params,url,category):
	xbmc.output("[peliculasyonkis.py] detail")

	title = urllib.unquote_plus( params.get("title") )
	thumbnail = urllib.unquote_plus( params.get("thumbnail") )
	plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )

	# Descarga la página
	data = scrapertools.cachePage(url)
	#xbmc.output(data)

	# ------------------------------------------------------------------------------------
	# Busca los enlaces a los videos
	# ------------------------------------------------------------------------------------
	patronvideos  = 'href="http://www.peliculasyonkis.com/player/visor_pymeno2.*?id=([^&]+)&al=[^"]+"'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	if len(matches)>0:
		scrapertools.printMatches(matches)
	
	
		id = matches[0]
		xbmc.output("[peliculasyonkis.py]  id="+id)
		dec = Yonkis.DecryptYonkis()
		url = dec.decryptID(dec.unescape(id))
		if ":" in url:
			match = url.split(":")
			url = choiceOne(match)
			if url == "": return
		print 'codigo :%s' %url
	else:
		xbmctools.alertnodisponible()
		return
	
	
	xbmctools.playvideo(CHANNELNAME,"Megavideo",url,category,title,thumbnail,plot)
Example #9
0
    def __init__(self):
        global action
        params = {}
        splitparams = sys.argv[2][sys.argv[2].find('?') + 1:].split('&')
        for param in splitparams:
            if (len(param) > 0):
                splitparam = param.split('=')
                key = splitparam[0]
                try:    value = splitparam[1].encode("utf-8")
                except: value = splitparam[1]
                params[key] = value

        try:        action = urllib.unquote_plus(params["action"])
        except:     action = None
        try:        channel = urllib.unquote_plus(params["channel"])
        except:     channel = None

        if action == None:                          channels().get()
        elif action == 'dialog':                    channels().dialog()
        elif action == 'epg_menu':                  contextMenu().epg(channel)
        elif action == 'refresh':                   index().container_refresh()
        elif action == 'play':                      resolver().run(channel)

        xbmcplugin.setContent(int(sys.argv[1]), 'Episodes')
        xbmcplugin.setPluginFanart(int(sys.argv[1]), addonFanart)
        xbmcplugin.endOfDirectory(int(sys.argv[1]))
        return
Example #10
0
def play(params):
    referer = urllib.unquote_plus(params['referer'])
    file = urllib.unquote_plus(params['file'])
    headers['Referer'] = referer

    i = xbmcgui.ListItem(path=getFile(file))
    xbmcplugin.setResolvedUrl(h, True, i)
Example #11
0
def parse_uri(uri_string):
  """Creates a Uri object which corresponds to the URI string.
 
  This method can accept partial URIs, but it will leave missing
  members of the Uri unset.
  """
  parts = urlparse.urlparse(uri_string)
  uri = Uri()
  if parts[0]:
    uri.scheme = parts[0]
  if parts[1]:
    host_parts = parts[1].split(':')
    if host_parts[0]:
      uri.host = host_parts[0]
    if len(host_parts) > 1:
      uri.port = int(host_parts[1])
  if parts[2]:
    uri.path = parts[2]
  if parts[4]:
    param_pairs = parts[4].split('&')
    for pair in param_pairs:
      pair_parts = pair.split('=')
      if len(pair_parts) > 1:
        uri.query[urllib.unquote_plus(pair_parts[0])] = (
            urllib.unquote_plus(pair_parts[1]))
      elif len(pair_parts) == 1:
        uri.query[urllib.unquote_plus(pair_parts[0])] = None
  return uri
Example #12
0
def params_end():

        url=None
        name=None
        mode=None
        iconimage=None
        fanart=None
        description=None

        try:
                url=urllib.unquote_plus(params["url"])
        except:
                pass
        try:
                name=urllib.unquote_plus(params["name"])
        except:
                pass
        try:
                iconimage=urllib.unquote_plus(params["iconimage"])
        except:
                pass
        try:        
                mode=int(params["mode"])
        except:
                pass
        try:        
                fanart=urllib.unquote_plus(params["fanart"])
        except:
                pass
        try:        
                description=urllib.unquote_plus(params["description"])
        except:
                pass
Example #13
0
def remFav(url):
    log("> remFav()")
    if url:
        try:
            favoritesRE=re.compile('(?i)name=(.+?)&url=(.+?)\n')
            favorites = favoritesRE.findall(url)
            for favorite in favorites:
                name = favorite[0]
                url = favorite[1]
            nameurl = 'name=%s&url=%s%s' % (name, url, '\n')
            if dialog.yesno( __plugin__ + ' v' + __version__, __language__(30009), '', urllib.unquote_plus(name).decode('utf-8') ):
                doc = open(FILE_FAVS, "rU")
                text = doc.read().decode('utf-8')
                doc.close()
                doc = open(FILE_FAVS, "w")
                doc.write(text.replace(nameurl, ''))
                doc.close()    
                xbmc.executebuiltin('Container.Refresh')
                dialog.ok( __plugin__ + ' v' + __version__, __language__(30010), '', urllib.unquote_plus(name).decode('utf-8') )
                doc = open(FILE_FAVS).read().decode('utf-8')
                if doc == 'This is your favorites file.\n':
                    dialog.ok( __plugin__ + ' v' + __version__, __language__(30016) )
        except:
            dialog.ok( __plugin__ + ' v' + __version__, __language__(30011), '', urllib.unquote_plus(name).decode('utf-8') )
    return True
Example #14
0
def removePlaylist(title,murl,folders):
    if os.path.exists(PlaylistFile):
        playlist=re.compile("{'name': '(.*?)', 'url': '(.*?)', 'fanart': '(.*?)', 'folder': '(.*?)', 'typeXml': '(.*?)', 'thumb': '(.*?)'}").findall(open(PlaylistFile,'r').read())
        if len(playlist)<=1 and str(playlist).find(title):
            os.remove(PlaylistFile)
            xbmc.executebuiltin("Container.Refresh")
        if os.path.exists(PlaylistFile):
            for name,url,fanart,folder,typeXml,thumb in reversed (playlist):
                if title == urllib.unquote_plus(name) and urllib.unquote_plus(folders)==urllib.unquote_plus(folder):
                    playlist.remove((name,url,fanart,folder,typeXml,thumb))
                    os.remove(PlaylistFile)
                    for name,url,fanart,folder,typeXml,thumb in playlist:
                        try:
                            playlists = {}
                            playlists['name'] = name
                            playlists['url'] = url.replace('\\\\','\\')
                            playlists['thumb'] = thumb
                            playlists['fanart'] = fanart
                            playlists['folder'] = folder
                            playlists['typeXml'] = typeXml
                            open(PlaylistFile,'a').write(str(playlists))
                            xbmc.executebuiltin("Container.Refresh")
                            xbmc.executebuiltin("XBMC.Notification([B][COLOR=FF67cc33]"+title+"[/COLOR][/B],[B]Playlist Removed[/B],4000,"")")
                        except: pass
        else: xbmc.executebuiltin("XBMC.Notification([B][COLOR green]Mash Up[/COLOR][/B],[B]You Have No Playlists[/B],1000,"")")
    return
Example #15
0
def get_clean_body(mutant, response):
    '''
    @see: Very similar to fingerprint_404.py get_clean_body() bug not quite
          the same maybe in the future I can merge both?

    Definition of clean in this method:
        - input:
            - response.get_url() == http://host.tld/aaaaaaa/?id=1 OR 23=23
            - response.get_body() == '...<x>1 OR 23=23</x>...'

        - output:
            - self._clean_body( response ) == '...<x></x>...'

    All injected values are removed encoded and "as is".

    :param mutant: The mutant where I can get the value from.
    :param response: The HTTPResponse object to clean
    :return: A string that represents the "cleaned" response body.
    '''

    body = response.body

    if response.is_text_or_html():
        mod_value = mutant.get_mod_value()

        body = body.replace(mod_value, '')
        body = body.replace(urllib.unquote_plus(mod_value), '')
        body = body.replace(cgi.escape(mod_value), '')
        body = body.replace(cgi.escape(urllib.unquote_plus(mod_value)), '')

    return body
Example #16
0
File: http.py Project: halayli/lime
 def get_body(self, length):
     # at this point, we have received a complete header and we are ready
     # to process the body
     while 1:
         try:
             try:
                 self._data_recvd += self._sock.recv(length)
             except:
                 return 3
             ln = len(self._data_recvd)
             if ln > Http.max_post_length: 
                 raise HttpError(self.get_peer(), 413, ln)
             if self._method == 'POST':
                 body = self._data_recvd[self._header_length:]
                 if int(self._client_headers['content-length']) !=len(body):
                     return 1
                 else:
                     body_fields = body.split('&')
                     # fields of more than 50 chars are not allowed 
                     # because we are using dicts. 
                     for field_value in body_fields:
                         field, value = field_value.split('=')
                         field = urllib.unquote_plus(field)[0:50]
                         value = urllib.unquote_plus(value)
                         self._post[field] = value
         except Exception, e:
             raise HttpError(self.get_peer(), 400, str(e))
         break
Example #17
0
def parse_params(param_str):
    param_dic = {}
    # Parameters are on the 3rd arg passed to the script
    param_str = sys.argv[2]
    if len(param_str) > 1:
        param_str = param_str.replace('?', '')

        # Ignore last char if it is a '/'
        if param_str[len(param_str) - 1] == '/':
            param_str = param_str[0:len(param_str) - 2]

        # Processing each parameter splited on  '&'
        for param in param_str.split('&'):
            try:
                # Spliting couple key/value
                key, value = param.split('=')
            except:
                key = param
                value = ''

            key = urllib.unquote_plus(key)
            value = urllib.unquote_plus(value)

            # Filling dictionnary
            param_dic[key] = value

    return param_dic
Example #18
0
  def render(self, request):
    request.setHeader('Content-Type', 'text/xml')

    # Reload the nagios data files
    self.status_parsed = self.parse_status()
    self.objects_parsed = self.parse_objects()

    # Parse the request..
    try:
      action, hostname, service = request.postpath
      hostname = urllib.unquote_plus(hostname)
      service = urllib.unquote_plus(service)
    except (KeyError, ValueError):
      return '<Response/>'

    # Trigger the correct action
    if action == 'host':
      response = self.hostalert(request, hostname)
    elif action == 'hostaction':
      response = self.hostaction(request, hostname)
    elif action == 'service':
      response = self.servicealert(request, hostname, service)
    elif action == 'serviceaction':
      response = self.serviceaction(request, hostname, service)

    return response
Example #19
0
def detail(params,url,category):
	logger.info("[divxonline.py] detail")
	title=''
	thumbnail=''
	plot=''

	try:
		title = urllib.unquote_plus( params.get("title") )
		thumbnail = urllib.unquote_plus( params.get("thumbnail") )
		plot = urllib.unquote_plus( params.get("plot") )
	except:
		pass
	# Descarga la página
	data = scrapertools.cachePage(url)
	#logger.info(data)

	# ------------------------------------------------------------------------------------
	# Busca los enlaces a los videos
	# ------------------------------------------------------------------------------------
	
	data=decryptinks(data);
	listavideos = servertools.findvideos(data)

	for video in listavideos:
		videotitle = video[0]
		url = video[1]
		server = video[2]
		xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
	# ------------------------------------------------------------------------------------

	# Cierra el directorio
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
 def construct_video_url_keepvid( self, url, quality=18, encoding="utf-8" ):
     try:
         url = unquote_plus( url )
         video_id = url.split( "v=" )[ 1 ]
         # we need to unquote the play url
         url = "http://keepvid.com/?url=" + quote_plus( url )
         # spam url to log for checking
         if ( not DEBUG ):
             xbmc.log( "[PLUGIN] '%s: version %s' - (quality=%d, video url=%s)" % ( sys.modules[ "__main__" ].__plugin__, sys.modules[ "__main__" ].__version__, quality, url, ), xbmc.LOGDEBUG )
         # we need to request the url to be redirected to the swf player url to grab the session id
         request = urllib2.Request( url )
         # add a faked header, we use ie 8.0. it gives correct results for regex
         request.add_header( 'User-Agent', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)' )
         # create an opener object to grab the info
         opener = urllib2.urlopen( request )
         # read source
         htmlSource = opener.read()
         # close opener
         opener.close()
         # get the video id and session id
         video_url = unquote_plus( re.findall( "<a href=\"/save-video.mp4?(.+?)\"", htmlSource )[ 0 ] )[ 1 : ]
         # get details for the video and return the details
         title, author, genre, rating, runtime, count, date, thumbnail_url, plot = self.get_details( video_id )
         # return our values
         return video_url, title, author, genre, rating, runtime, count, date, thumbnail_url, plot, video_id
     except:
         # oops return an empty string
         print "ERROR: %s::%s (%d) - %s" % ( self.__class__.__name__, sys.exc_info()[ 2 ].tb_frame.f_code.co_name, sys.exc_info()[ 2 ].tb_lineno, sys.exc_info()[ 1 ], )
         return [ "" ] * 11
 def openSection(self, params = {}):
     get = params.get
     url = urllib.unquote_plus(get("url"))
     try: external=urllib.unquote_plus(get("external"))
     except: external=None
     filesList = []
     if None == get('isApi'):
         progressBar = xbmcgui.DialogProgress()
         progressBar.create(Localization.localize('Please Wait'), Localization.localize('Materials are loading now.'))
         iterator = 0
     searchersList = []
     dirList = os.listdir(self.ROOT + os.sep + 'resources' + os.sep + 'searchers')
     if not external or external=='torrenterall':
         for searcherFile in dirList:
             if re.match('^(\w+)\.py$', searcherFile):
                 searchersList.append(searcherFile)
     else: searchersList.append(external+'.py')
     for searcherFile in searchersList:
         searcher = re.search('^(\w+)\.py$', searcherFile).group(1)
         if searcher:
             if None == get('isApi'):
                 progressBar.update(int(iterator), searcher)
                 iterator += 100 / len(searchersList)
             filesList += self.searchWithSearcher(url, searcher)
         if None == get('isApi') and progressBar.iscanceled():
             progressBar.update(0)
             progressBar.close()
             return
     if None == get('isApi'):
         progressBar.update(0)
         progressBar.close()
     filesList = sorted(filesList, key=lambda x: x[0], reverse=True)
     self.showFilesList(filesList, params)
def play(params,url,category):
	xbmc.output("[yotix.py] play")

	title = urllib.unquote_plus( params.get("title") )
	thumbnail = urllib.unquote_plus( params.get("thumbnail") )
	plot = urllib.unquote_plus( params.get("plot") )
	server = urllib.unquote_plus( params.get("server") )

	# Abre dialogo
	dialogWait = xbmcgui.DialogProgress()
	dialogWait.create( 'Accediendo al video...', title , plot )

	if server=="Directo":
		# Descarga la página del reproductor
		# http://yotix.com/flash/UPY6KEB4/cleaner.html
		xbmc.output("url="+url)
		data = scrapertools.cachePage(url)

		patron = 'so.addParam\(\'flashvars\',\'\&file\=([^\&]+)\&'
		matches = re.compile(patron,re.DOTALL).findall(data)
		if len(matches)>0:
			url = matches[0]
	else:
		patron = 'http://yotix.tv/flash/([^\/]+)/'
		matches = re.compile(patron,re.DOTALL).findall(url)
		if len(matches)>0:
			url = matches[0]

	xbmc.output("url="+url)

	# Cierra dialogo
	dialogWait.close()
	del dialogWait

	xbmctools.playvideo(CHANNELNAME,server,url,category,title,thumbnail,plot)
Example #23
0
def variable_edit(name=None, scope=None):
  from models.variable import Variable
  
  if name and scope:
    variable = Variable.query.filter_by(name=urllib.unquote_plus(name), scope=urllib.unquote_plus(scope)).first_or_404()
  else:
    variable = Variable()
  
  errors = []
  
  if request.method == 'POST' and request.values.get( 'csrf_token', None ):
    variable.scope = request.form.get('variable_scope')
    variable.name = request.form.get('variable_name')
    variable.raw_value = request.form.get('variable_raw_value')
    errors = variable.validate()
    if not len(errors):
      variable.save()
      flash( g._t('variable submit success'))
      return redirect(url_for('variable_index'))
  
  if name:
    title = g._t('edit')
  else:
    title = g._t('add')
  breadcrumbs = (
    (g._t('administration'), url_for('administration_index')),
    (g._t('variables'), url_for('variable_index')),
    (title, "#")
  )
  
  return render_template('administration/variable/edit.html', title=title, breadcrumbs=breadcrumbs, variable=variable, errors=errors)
def mirrors(params,url,category):
	logger.info("[capitancinema.py] mirrors")

	title = urllib.unquote_plus( params.get("title") )
	thumbnail = urllib.unquote_plus( params.get("thumbnail") )
	plot = urllib.unquote_plus( params.get("plot") )

	# Descarga la página
	data = scrapertools.cachePage(url)
	patronvideos  = '<li><strong>DISPONIBLE EN EL FORO</strong>[^<]+<a href="([^"]+)"'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	if len(matches)>0:
		url = matches[0]
		data = scrapertools.cachePage(url)

		# ------------------------------------------------------------------------------------
		# Busca los enlaces a los videos
		# ------------------------------------------------------------------------------------
		listavideos = servertools.findvideos(data)

		for video in listavideos:
			videotitle = video[0]
			url = video[1]
			server = video[2]
			xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
		# ------------------------------------------------------------------------------------

	# Cierra el directorio
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def playDM(id):
    try:
        if xbox:
            addToHistory("plugin://video/My Music TV/?url="+id+"&mode=playDM")
        else:
            addToHistory("plugin://"+addonID+"/?url="+id+"&mode=playDM")
        content = opener.open("http://www.dailymotion.com/embed/video/"+id).read()
        if '"statusCode":410' in content or '"statusCode":403' in content:
            xbmc.executebuiltin('XBMC.Notification(Dailymotion:, Video is not available,5000)')
        else:
            matchFullHD = re.compile('"stream_h264_hd1080_url":"(.+?)"', re.DOTALL).findall(content)
            matchHD = re.compile('"stream_h264_hd_url":"(.+?)"', re.DOTALL).findall(content)
            matchHQ = re.compile('"stream_h264_hq_url":"(.+?)"', re.DOTALL).findall(content)
            matchSD = re.compile('"stream_h264_url":"(.+?)"', re.DOTALL).findall(content)
            matchLD = re.compile('"stream_h264_ld_url":"(.+?)"', re.DOTALL).findall(content)
            url = ""
            if matchFullHD and resolutionDM == "1080p":
                url = urllib.unquote_plus(matchFullHD[0]).replace("\\", "")
            elif matchHD and (resolutionDM == "720p" or resolutionDM == "1080p"):
                url = urllib.unquote_plus(matchHD[0]).replace("\\", "")
            elif matchHQ:
                url = urllib.unquote_plus(matchHQ[0]).replace("\\", "")
            elif matchSD:
                url = urllib.unquote_plus(matchSD[0]).replace("\\", "")
            elif matchLD:
                url = urllib.unquote_plus(matchLD[0]).replace("\\", "")
            listitem = xbmcgui.ListItem(path=url)
            xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
            if infoEnabled:
                showInfo()
    except:
        pass
def playLiveVideo(id):
    content = getUrl2("http://www.dailymotion.com/sequence/"+id)
    if content.find('"statusCode":410') > 0 or content.find('"statusCode":403') > 0:
        xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30022)+' (DailyMotion)!,5000)')
    else:
        matchFullHD = re.compile('"hd1080URL":"(.+?)"', re.DOTALL).findall(content)
        matchHD = re.compile('"hd720URL":"(.+?)"', re.DOTALL).findall(content)
        matchHQ = re.compile('"hqURL":"(.+?)"', re.DOTALL).findall(content)
        matchSD = re.compile('"sdURL":"(.+?)"', re.DOTALL).findall(content)
        matchLD = re.compile('"video_url":"(.+?)"', re.DOTALL).findall(content)
        url = ""
        if matchFullHD and maxVideoQuality == "1080p":
            url = urllib.unquote_plus(matchFullHD[0]).replace("\\", "")
        elif matchHD and (maxVideoQuality == "720p" or maxVideoQuality == "1080p"):
            url = urllib.unquote_plus(matchHD[0]).replace("\\", "")
        elif matchHQ:
            url = urllib.unquote_plus(matchHQ[0]).replace("\\", "")
        elif matchSD:
            url = urllib.unquote_plus(matchSD[0]).replace("\\", "")
        elif matchLD:
            url = urllib.unquote_plus(matchSD2[0]).replace("\\", "")
        if url:
            url = getUrl(url)
            listitem = xbmcgui.ListItem(path=url)
            xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
Example #27
0
def parse(query_string, unquote=True):
    '''
    Main parse function
    @param query_string:
    @param unquote: unquote html query string ?
    '''
    mydict = {}
    plist = []
    if query_string == "":
        return mydict
    for element in query_string.split("&"):
        try:
            if unquote:
                (var, val) = element.split("=")
                var = urllib.unquote_plus(var)
                val = urllib.unquote_plus(val)
            else:
                (var, val) = element.split("=")
        except ValueError:
            raise MalformedQueryStringError
        plist.append(parser_helper(var, val))
    for di in plist:
        (k, v) = di.popitem()
        tempdict = mydict
        while k in tempdict and type(v) is dict:
            tempdict = tempdict[k]
            (k, v) = v.popitem()
        if k in tempdict and type(tempdict[k]).__name__ == 'list':
            tempdict[k].append(v)
        elif k in tempdict:
            tempdict[k] = [tempdict[k], v]
        else:
            tempdict[k] = v
    return mydict
Example #28
0
    def parse(self):
        """Call feed first"""
        track = self.cut_content_simple('%2C+', '+%2A')[0]
        artist, title = track.split('%2C+')

        self.artist = self.capstext(urllib.unquote_plus(artist))
        self.title = urllib.unquote_plus(title)
    def get_media_url(self, host, media_id):
        if self.get_setting('login') == 'true':
            if self.login_stale():
                self.login()
        self.net.set_cookies(self.cookie_file)
        web_url = self.get_url(host, media_id)
        if web_url[-1:1]=="#": web_url.replace("#",""); 

        #find session_hash
        try:
            html = self.net.http_GET(web_url).content
            if ">This file doesn't exist, or has been removed.<" in html: raise Exception (host+": This file doesn't exist, or has been removed.")
            elif "This file might have been moved, replaced or deleted.<" in html: raise Exception (host+": This file might have been moved, replaced or deleted.")
            #Shortcut for logged in users
            pattern = '<a href="(/.+?)" class="download_file_link" style="margin:0px 0px;">Download File</a>'
            link = re.search(pattern, html)
            if link:
                common.addon.log('Direct link found: %s' % link.group(1))
                if 'putlocker' in host:
                    return 'http://www.filedrive.com%s' % link.group(1)
                    #return 'http://www.putlocker.com%s' % link.group(1)
                elif 'filedrive' in host:
                    return 'http://www.filedrive.com%s' % link.group(1)
                elif 'firedrive' in host:
                    return 'http://www.firedrive.com%s' % link.group(1)

            if ('firedrive' in host) or ('filedrive' in host) or ('putlocker' in host):
                try:
                	data = {}; r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)"/>', html); #data['usr_login']=''
                	for name, value in r: data[name] = value
                	#data['imhuman']='Proceed to video'; data['btn_download']='Proceed to video'
                	#xbmc.sleep(2000)
                	html = self.net.http_POST(web_url, data).content
                except urllib2.URLError, e:
                    common.addon.log_error(host+': got http error %d fetching 2nd url %s' % (e.code, web_url))
                    return self.unresolvable(code=3, msg='Exception: %s' % e) #return False
                if "file: '" in html:
                    r = re.search("file\s*:\s*'(.+?)'", html)
                    if r: return urllib.unquote_plus(r.group(1))
                if '" target="_blank" '+"id='top_external_download' title='Download This File'>" in html:
                    r = re.search('<a href="(.+?)" target="_blank" '+"id='top_external_download' title='Download This File'>", html)
                    if r: return urllib.unquote_plus(r.group(1))
                if "id='external_download' title='Download This File'>" in html:
                    r = re.search('<a href="(.+?)" id=\'external_download\' title=\'Download This File\'>', html)
                    if r: return urllib.unquote_plus(r.group(1))
                if "<a id='fd_vid_btm_download_front' title='Copy to my computer' class='video_bottom_buttons' target='_blank' href='" in html:
                    r = re.search("<a id='fd_vid_btm_download_front' title='Copy to my computer' class='video_bottom_buttons' target='_blank' href='(.+?)'>", html)
                    if r: return urllib.unquote_plus(r.group(1))
                #if r:
                #    return urllib.unquote_plus(r.group(1))
                #else:
                #    common.addon.log_error(host+': stream url not found')
                #    return self.unresolvable(code=0, msg='no file located') #return False
                r = re.search("$.post('(.+?)', function(data) {", html)
                if r:
                    return urllib.unquote_plus(r.group(1))
                else:
                    common.addon.log_error(host+': stream url not found')
                    return self.unresolvable(code=0, msg='no file located') #return False
            else:
Example #30
0
def getGenreList(params):
    http = client.GET(urllib.unquote_plus(params['href']), httpSiteUrl)
    if http is None:
        return False

    beautifulSoup = BeautifulSoup(http)
    items = beautifulSoup.find('div', params['css']).findAll('a')

    if len(items) == 0:
        show_message('ОШИБКА', 'Неверная страница', 3000)
        return False
    else:
        for item in items:
            li = xbmcgui.ListItem(item.string)
            li.setProperty('IsPlayable', 'false')
            uri = strutils.construct_request({
                'href': client.get_full_url(item['href'].encode('utf-8')),
                'mode': 'readcategory',
                'section': params['section'],
                'filter': '',
                'cleanUrl': urllib.unquote_plus(params['cleanUrl']),
                'start': 0,
                'hideFirstPageData': 1
            })
            xbmcplugin.addDirectoryItem(h, uri, li, True)
        xbmcplugin.endOfDirectory(h)
Example #31
0
#     print cookie
##创建一个Pattern 编译好的正则表达式
pat = re.compile('and the next busynothing is (\d+)')  ##()表示一个分组 为之后的匹配提供索引
##r开头的字符串都是raw字符串 所有的字符串都不会被转义e.r'\n' 不会被认为是换行
url_template = r'http://www.pythonchallenge.com/pc/def/linkedlist.php?busynothing={0}'  ##{0}处待替换的数据
next_num = '12345'
cookies = []
while True:
    f = opener.open(url_template.format(next_num))
    html = f.read()
    for cookie in cj:
        cookies.append(cookie)
        # print cookie
    matchRes = pat.findall(html)
    if matchRes:
        next_num = matchRes[0]
        print next_num
    else:
        break
print len(cookies)
print type(cookies[0])
values = [x.value for x in cookies]
msg = urllib.unquote_plus("".join(values))
print bz2.decompress(msg)

proxy = xmlrpclib.ServerProxy(
    "http://www.pythonchallenge.com/pc/phonebook.php")
print proxy.phone("Leopold")

list(cj)[0].value = 'the+flowers+are+on+their+way'
print opener.open('http://www.pythonchallenge.com/pc/stuff/violin.php').read()
Example #32
0
            if (len(splitparams)) == 2:
                param[splitparams[0]] = splitparams[1]

    return param


params = get_params()
url = None
name = None
mode = None
iconimage = None
fanart = None
description = None

try:
    url = urllib.unquote_plus(params["url"])
except:
    pass
try:
    name = urllib.unquote_plus(params["name"])
except:
    pass
try:
    iconimage = urllib.unquote_plus(params["iconimage"])
except:
    pass
try:
    mode = int(params["mode"])
except:
    pass
try:
Example #33
0
        'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
    )
    response = urllib2.urlopen(req)
    link = response.read()
    response.close()
    return link


params = get_params()
url = None
name = None
mode = None
page = 1

try:
    url = urllib.unquote_plus(params["url"])
except:
    pass
try:
    name = urllib.unquote_plus(params["name"])
except:
    pass
try:
    mode = int(params["mode"])
except:
    pass
try:
    page = int(params["page"])
except:
    pass
Example #34
0
def lambda_handler(event, context):
    '''Demonstrates S3 trigger that uses
    Rekognition APIs to detect faces, labels and index faces in S3 Object.
    '''
    #print("Received event: " + json.dumps(event, indent=2))

    # Get the object from the event
    bucket = event['Records'][0]['s3']['bucket']['name']
    key = urllib.unquote_plus(
        event['Records'][0]['s3']['object']['key'].encode('utf8'))
    try:
        # Calls rekognition DetectFaces API to detect faces in S3 object
        # response = detect_faces(bucket, key)

        # Calls rekognition DetectLabels API to detect labels in S3 object
        response = detect_labels(bucket, key)

        for record in event['Records']:
            filename = record['s3']['object']['key']
            #filesize = record['s3']['object']['size'];
            #source = record['requestParameters']['sourceIPAddress'];
            eventTime = record['eventTime']

    #Iterate on rekognition labels. Enrich and prep them for storage in DynamoDB
        labels_on_watch_list = []
        for label in response['Labels']:

            lbl = label['Name']
            conf = label['Confidence']
            label['OnWatchList'] = False

            #Print labels and confidence to lambda console
            print('{} .. conf %{:.2f}'.format(lbl, conf))

            #Check label watch list and trigger action
            if (lbl.upper() in (label.upper() for label in label_watch_list)
                    and conf >= label_watch_min_conf):

                label['OnWatchList'] = True
                labels_on_watch_list.append(deepcopy(label))

        tosend = ""

        for Label in response["Labels"]:
            print('{0} - {1}%'.format(Label["Name"], Label["Confidence"]))
            tosend += '{0} - {1}%'.format(Label["Name"],
                                          round(Label["Confidence"], 2))

        # Calls rekognition IndexFaces API to detect faces in S3 object and index faces into specified collection
        #response = index_faces(bucket, key)

        # Print response to console.
        print(response)

        def pushover_handler(message):
            """ Send parsed message to Pushover """
            #logger.info('Received message' + json.dumps(message))
            conn = HTTPSConnection("api.pushover.net:443")
            conn.request(
                "POST", "/1/messages.json",
                urllib.urlencode({
                    "token": '----',
                    "user": '******',
                    "message": filename + "  " + tosend,
                    "sound": 'pushover',
                    "priority": 0,
                    "title": "SecuritySpy"
                }), {"Content-type": "application/x-www-form-urlencoded"})
            response = conn.getresponse()
            return response.status

        if len(labels_on_watch_list) > 0:
            return pushover_handler(tosend)

        return response
    except Exception as e:
        print(e)
        print(
            "Error processing object {} from bucket {}. ".format(key, bucket) +
            "Make sure your object and bucket exist and your bucket is in the same region as this function."
        )
        raise e
Example #35
0
    def search(self, keyword, external):
        self.log("*** Search")

        keyword = urllib.unquote_plus(keyword) if (
            external != None) else self.getUserInput()

        if keyword:
            url = self.url + '/?do=search'

            # Advanced search: titles only
            values = {
                "do": "search",
                "subaction": "search",
                "mode": "simple",
                "story": keyword.encode("cp1251"),
                "x": "0",
                "y": "0"
            }

            data = urllib.urlencode(values)
            request = urllib2.Request(url, data)
            response = urllib2.urlopen(request).read()

            container = common.parseDOM(response,
                                        "div",
                                        attrs={"id": "container"})
            posts = common.parseDOM(container,
                                    "div",
                                    attrs={"class": "custom-post"})

            if posts:
                for i, post in enumerate(posts):
                    poster = common.parseDOM(post,
                                             "div",
                                             attrs={"class": "custom-poster"})
                    title = self.encode(
                        common.stripTags(common.parseDOM(post, "a")[0]))
                    link = common.parseDOM(post, "a", ret="href")[0]
                    image = common.parseDOM(post, "img", ret="src")[0]

                    uri = sys.argv[0] + '?mode=show&url=%s' % link
                    item = xbmcgui.ListItem(title, thumbnailImage=image)

                    self.favorites.addContextMenuItem(
                        item, {
                            'title': title,
                            'url': link,
                            'image': image,
                            'playable': False,
                            'action': 'add',
                            'plugin': self.id
                        })

                    xbmcplugin.addDirectoryItem(self.handle, uri, item, True)

                xbmcplugin.setContent(self.handle, 'movies')
            #else:
            #    if external != "usearch":
            #        item = xbmcgui.ListItem(self.language(2001), iconImage=self.icon, thumbnailImage=self.icon)
            #        xbmcplugin.addDirectoryItem(self.handle, '', item, True)
            xbmcplugin.endOfDirectory(self.handle, True)
        else:
            self.menu()
Example #36
0
    def run(self, argv=None):

        self.addon = Addon('plugin.video.SportsDevil', argv)

        common.log('SportsDevil running')

        base = argv[0]
        handle = int(argv[1])
        parameter = argv[2]
        self.base = base
        self.handle = handle

        paramstring = urllib.unquote_plus(parameter)

        try:

            # if addon is started
            listItemPath = xbmcUtils.getListItemPath()
            if not listItemPath.startswith(self.base):
                if not ('mode=' in paramstring
                        and not 'mode=1&' in paramstring):
                    xbmcplugin.setPluginFanart(self.handle,
                                               common.Paths.pluginFanart)
                    self.clearCache()

                    #if common.getSetting('autoupdate') == 'true':
                    #    self.update()

            # Main Menu
            if len(paramstring) <= 2:
                mainMenu = ListItem.create()
                mainMenu['url'] = self.MAIN_MENU_FILE
                tmpList = self.parseView(mainMenu)
                if tmpList:
                    self.currentlist = tmpList

            else:
                [mode, item] = self._parseParameters()

                # switch(mode)
                if mode == Mode.VIEW:
                    tmpList = self.parseView(item)
                    if tmpList:
                        self.currentlist = tmpList
                        count = len(self.currentlist.items)
                        if count == 1:
                            # Autoplay single video
                            autoplayEnabled = common.getSetting(
                                'autoplay') == 'true'
                            if autoplayEnabled:
                                videos = self.currentlist.getVideos()
                                if len(videos) == 1:
                                    self.playVideo(videos[0], True)

                elif mode == Mode.ADDITEM:
                    tmp = os.path.normpath(paramstring.split('url=')[1])
                    if tmp:
                        suffix = tmp.split(os.path.sep)[-1]
                        tmp = tmp.replace(suffix,
                                          '') + urllib.quote_plus(suffix)
                    if self.favouritesManager.add(tmp):
                        xbmc.executebuiltin('Container.Refresh()')

                elif mode in [
                        Mode.ADDTOFAVOURITES, Mode.REMOVEFROMFAVOURITES,
                        Mode.EDITITEM
                ]:

                    if mode == Mode.ADDTOFAVOURITES:
                        self.favouritesManager.addItem(item)
                    elif mode == Mode.REMOVEFROMFAVOURITES:
                        self.favouritesManager.removeItem(item)
                        xbmc.executebuiltin('Container.Refresh()')
                    elif mode == Mode.EDITITEM:
                        if self.favouritesManager.editItem(item):
                            xbmc.executebuiltin('Container.Refresh()')

                elif mode == Mode.EXECUTE:
                    self.executeItem(item)

                elif mode == Mode.PLAY:
                    self.playVideo(item)

                elif mode == Mode.QUEUE:
                    self.queueAllVideos(item)

                elif mode == Mode.DOWNLOAD:
                    url = urllib.unquote(item['url'])
                    title = item['title']
                    self.downloadVideo(url, title)

                elif mode == Mode.REMOVEFROMCUSTOMMODULES:
                    self.removeCustomModule(item)

                #elif mode == Mode.UPDATE:
                #    self.update()

                elif mode == Mode.DOWNLOADCUSTOMMODULE:
                    self.downloadCustomModule()

                elif mode == Mode.INSTALLADDON:
                    success = install(item['url'])
                    if success:
                        xbmc.sleep(100)
                        if xbmcUtils.getCurrentWindowXmlFile(
                        ) == 'DialogAddonSettings.xml':
                            # workaround to update settings dialog
                            common.setSetting('', '')

        except Exception, e:
            common.showError('Error running SportsDevil')
            common.log('Error running SportsDevil. Reason:' + str(e))
Example #37
0
    def createXBMCListItem(self, item):
        title = enc.clean_safe(item['title'])

        m_type = item['type']

        icon = item['icon']

        if icon and not icon.startswith('http'):
            try:
                if not fu.fileExists(icon):
                    tryFile = os.path.join(common.Paths.modulesDir, icon)
                    if not fu.fileExists(tryFile):
                        tryFile = os.path.join(common.Paths.customModulesDir,
                                               icon)
                    if fu.fileExists(tryFile):
                        icon = tryFile
            except:
                pass

        if not icon:
            if m_type == 'video':
                icon = common.Paths.defaultVideoIcon
            else:
                icon = common.Paths.defaultCategoryIcon

        liz = xbmcgui.ListItem(title,
                               title,
                               iconImage=icon,
                               thumbnailImage=icon)

        fanart = item['fanart']
        if not fanart:
            fanart = common.Paths.pluginFanart
        liz.setProperty('fanart_image', fanart)
        """
        General Values that apply to all types:
            count         : integer (12) - can be used to store an id for later, or for sorting purposes
            size          : long (1024) - size in bytes
            date          : string (%d.%m.%Y / 01.01.2009) - file date

        Video Values:
            genre         : string (Comedy)
            year          : integer (2009)
            episode       : integer (4)
            season        : integer (1)
            top250        : integer (192)
            tracknumber   : integer (3)
            rating        : float (6.4) - range is 0..10
            watched       : depreciated - use playcount instead
            playcount     : integer (2) - number of times this item has been played
            overlay       : integer (2) - range is 0..8.  See GUIListItem.h for values
            cast          : list (Michal C. Hall)
            castandrole   : list (Michael C. Hall|Dexter)
            director      : string (Dagur Kari)
            mpaa          : string (PG-13)
            plot          : string (Long Description)
            plotoutline   : string (Short Description)
            title         : string (Big Fan)
            originaltitle : string (Big Fan)
            duration      : string (3:18)
            studio        : string (Warner Bros.)
            tagline       : string (An awesome movie) - short description of movie
            writer        : string (Robert D. Siegel)
            tvshowtitle   : string (Heroes)
            premiered     : string (2005-03-04)
            status        : string (Continuing) - status of a TVshow
            code          : string (tt0110293) - IMDb code
            aired         : string (2008-12-07)
            credits       : string (Andy Kaufman) - writing credits
            lastplayed    : string (%Y-%m-%d %h:%m:%s = 2009-04-05 23:16:04)
            album         : string (The Joshua Tree)
            votes         : string (12345 votes)
            trailer       : string (/home/user/trailer.avi)
        """

        infoLabels = {}
        for video_info_name in item.infos.keys():
            infoLabels[video_info_name] = enc.clean_safe(item[video_info_name])
        infoLabels['title'] = title

        liz.setInfo('video', infoLabels)

        url = urllib.unquote_plus(item['url'])
        liz.setPath(url)

        if m_type == 'video':
            liz.setProperty('IsPlayable', 'true')

        return liz
Example #38
0
def detail(params, url, category):
    logger.info("[newdivx.py] detail")

    title = urllib.unquote_plus(params.get("title"))
    thumbnail = urllib.unquote_plus(params.get("thumbnail"))
    plot = urllib.unquote_plus(params.get("plot"))

    # Descarga la p�gina
    data = scrapertools.cachePage(url)
    #logger.info(data)
    patronvideos = '<p class="Estilo2">([^<]+)</p>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    if len(matches) > 0:
        title = matches[0]
    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos en los servidores habilitados
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        if "stagevu.com/embed" not in video[1]:
            videotitle = video[0]
            url = video[1]
            server = video[2]
            xbmctools.addnewvideo(CHANNELNAME, "play", category, server,
                                  title.strip() + " - " + videotitle, url,
                                  thumbnail, plot)
    # ------------------------------------------------------------------------------------
    #--- Busca los videos Directos
    ## ------------------------------------------------------------------------------------##
    #               Busca  enlaces en el servidor  przeklej                                 #
    ## ------------------------------------------------------------------------------------##
    patronvideos = '<param name="src" value="([^"]+)"'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        if len(matches) == 1:
            subtitle = "[Divx-Directo-Przeklej]"
            xbmctools.addnewvideo(CHANNELNAME, "play", category, "Directo",
                                  title + " - " + subtitle, matches[0],
                                  thumbnail, plot)

        else:
            parte = 0
            subtitle = "[Divx-Directo-Przeklej]"
            for match in matches:
                logger.info(" matches = " + match)
                parte = parte + 1
                xbmctools.addnewvideo(
                    CHANNELNAME, "play", category, "Directo",
                    title + " - " + subtitle + " " + str(parte), match,
                    thumbnail, plot)

## --------------------------------------------------------------------------------------##
#  				 Busca enlaces en el servidor Fishker                                    #
## --------------------------------------------------------------------------------------##
    patronvideos = '<a href="(http\:\/\/www.fishker.com\/[^"]+)"'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    if len(matches) > 0:
        data2 = scrapertools.cachePage(matches[0])
        #print data2
        #<param name="flashvars" value="comment=Q&amp;m=video&amp;file=http://fish14.st.fishker.com/videos/1249504.flv?c=3948597662&amp;st=/plstyle.txt?video=1"
        patron = 'file=([^"]+)"'
        matches2 = re.compile(patron, re.DOTALL).findall(data2)
        if len(matches2) > 0:
            videourl = matches2[0].replace("&amp;", "&")
            subtitle = "[FLV-Directo-Fishker]"
            xbmctools.addnewvideo(CHANNELNAME, "play", category, "Directo",
                                  title + " - " + subtitle, videourl,
                                  thumbnail, plot)

## --------------------------------------------------------------------------------------##
#  				 Busca enlaces en el servidor Cinshare                                  #
## --------------------------------------------------------------------------------------##

    patronvideos = '<iframe src="(http://www.cinshare.com/[^"]+)"'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    if len(matches) > 0:
        ####
        '''
		data2 = scrapertools.cachePage(matches[0])
		#print data2
		
		patron = '<param name="src" value="([^"]+)"'
		matches2 = re.compile(patron,re.DOTALL).findall(data2)
		if len(matches2)>0:
		'''
        ####
        import cinshare
        videourl = matches[0]
        subtitle = "[divx-Directo-Cinshare]"
        xbmctools.addnewvideo(CHANNELNAME, "play", category, "Cinshare",
                              title + " - " + subtitle, videourl, thumbnail,
                              plot)

    ## --------------------------------------------------------------------------------------##
    #               Busca enlaces a videos .flv o (.mp4 dentro de un xml)                     #
    ## --------------------------------------------------------------------------------------##
    patronvideos = 'file=(http\:\/\/[^\&]+)\&'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    if len(matches) > 0:
        subtitle = "[FLV-Directo]"
        if ("xml" in matches[0]):
            data2 = scrapertools.cachePage(matches[0])
            logger.info("data2=" + data2)
            patronvideos = '<track>.*?'
            patronvideos += '<title>([^<]+)</title>(?:[^<]+'
            patronvideos += '<annotation>([^<]+)</annotation>[^<]+|[^<]+)'
            patronvideos += '<location>([^<]+)</location>[^<]+'
            patronvideos += '</track>'
            matches = re.compile(patronvideos, re.DOTALL).findall(data2)
            scrapertools.printMatches(matches)

            for match in matches:
                if ".mp4" in match[2]:
                    subtitle = "[MP4-Directo]"
                scrapedtitle = '%s (%s) - %s  %s' % (title, match[1].strip(),
                                                     match[0], subtitle)
                scrapedurl = match[2].strip()
                scrapedthumbnail = thumbnail
                scrapedplot = plot
                if (DEBUG):
                    logger.info("title=[" + scrapedtitle + "], url=[" +
                                scrapedurl + "], thumbnail=[" +
                                scrapedthumbnail + "]")

                # A�ade al listado de XBMC
                xbmctools.addnewvideo(CHANNELNAME, "play", category, "Directo",
                                      scrapedtitle, scrapedurl,
                                      scrapedthumbnail, scrapedplot)
        else:

            xbmctools.addnewvideo(CHANNELNAME, "play", category, "Directo",
                                  title + " - " + subtitle, matches[0],
                                  thumbnail, plot)

    ## --------------------------------------------------------------------------------------##
    #            Busca enlaces a video en el servidor Dailymotion                             #
    ## --------------------------------------------------------------------------------------##
    patronvideos = '<param name="movie" value="http://www.dailymotion.com/swf/video/([^\_]+)\_[^"]+"'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    playWithSubt = "play"
    if len(matches) > 0:
        daily = 'http://www.dailymotion.com/video/%s' % matches[0]
        data2 = scrapertools.cachePage(daily)

        # Busca los subtitulos en espa�ol
        subtitulo = re.compile('%22es%22%3A%22(.+?)%22').findall(data2)
        subtit = urllib.unquote(subtitulo[0])
        subtit = subtit.replace("\/", "/")
        #subt_ok = downloadstr(subtit,title)
        #print "subtitulo subt_ok = %s" % str(subt_ok)

        # Busca el enlace al video con formato FLV
        Lowres = re.compile('%22sdURL%22%3A%22(.+?)%22').findall(data2)
        if len(Lowres) > 0:
            videourl = urllib.unquote(Lowres[0])
            videourl = videourl.replace("\/", "/")
            if len(subtit) > 0:
                videourl = videourl + "|" + subtit
                playWithSubt = "play2"
            subtitle = "[FLV-Directo-Dailymotion]"
            xbmctools.addnewvideo(CHANNELNAME, playWithSubt, category,
                                  "Directo", title + " - " + subtitle,
                                  videourl, thumbnail, plot)

        # Busca el enlace al video con formato HQ (H264)
        Highres = re.compile('%22hqURL%22%3A%22(.+?)%22').findall(data2)
        if len(Highres) > 0:
            videourl = urllib.unquote(Highres[0])
            videourl = videourl.replace("\/", "/")
            if len(subtit) > 0:
                videourl = videourl + "|" + subtit
                playWithSubt = "play2"
            subtitle = "[h264-Directo-Dailymotion-este video no es soportado en versiones antiguas o xbox plataforma]"
            xbmctools.addnewvideo(CHANNELNAME, playWithSubt, category,
                                  "Directo", title + " - " + subtitle,
                                  videourl, thumbnail, plot)
    ## --------------------------------------------------------------------------------------##
    #            Busca enlaces a video en el servidor Gigabyteupload.com                      #
    ## --------------------------------------------------------------------------------------##

    patronvideos = '<a href="(http://www.gigabyteupload.com/[^"]+)"'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    if len(matches) > 0:
        print " encontro: %s" % matches[0]
        import gigabyteupload as giga
        videourl = giga.geturl(matches[0])
        if len(videourl) > 0:
            subtitle = "[Divx-Directo-Gigabyteupload]"
            xbmctools.addnewvideo(CHANNELNAME, "play", category, "Directo",
                                  title + " - " + subtitle, videourl,
                                  thumbnail, plot)
    ## --------------------------------------------------------------------------------------##
    #            Busca enlaces de videos para el servidor vk.com                             #
    ## --------------------------------------------------------------------------------------##
    '''
	var video_host = 'http://cs12644.vk.com/';
	var video_uid = '87155741';
	var video_vtag = 'fc697084d3';
	var video_no_flv = 1;
	var video_max_hd = '1'
	'''
    patronvideos = '<iframe src="(http:\/\/vk[^\/]+\/video_ext.php[^"]+)"'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    if len(matches) > 0:
        print " encontro VKServer :%s" % matches[0]
        videourl = vk.geturl(matches[0])
        xbmctools.addnewvideo(CHANNELNAME, "play", category, "Directo",
                              title + " - " + "[VKServer]", videourl,
                              thumbnail, plot)
    """	
		data2 = scrapertools.cachePage(matches[0])
		print data2
		
		patron  = "var video_host = '([^']+)'.*?"
		patron += "var video_uid = '([^']+)'.*?"
		patron += "var video_vtag = '([^']+)'.*?"
		patron += "var video_no_flv = ([^;]+);.*?"
		patron += "var video_max_hd = '([^']+)'"
		matches2 = re.compile(patron,re.DOTALL).findall(data2)
		if len(matches2)>0:    #http://cs12387.vk.com/u87155741/video/fe5ee11ddb.360.mp4
			for match in matches2:
				if match[3].strip() == "0":
					tipo = "flv"
					videourl = "%s/u%s/video/%s.%s" % (match[0],match[1],match[2],tipo)
					xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , title + " - "+"[VK] [%s]" %tipo, videourl , thumbnail , plot )
				else:
					tipo = "360.mp4"
					videourl = "%s/u%s/video/%s.%s" % (match[0],match[1],match[2],tipo)
					xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , title + " - "+"[VK] [%s]" %tipo, videourl , thumbnail , plot )
					tipo = "240.mp4"
					videourl = "%s/u%s/video/%s.%s" % (match[0],match[1],match[2],tipo)
					xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , title + " - "+"[VK] [%s]" %tipo, videourl , thumbnail , plot )
	"""

    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=pluginhandle,
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
Example #39
0
    def run(self):
        # Echo server program
        try:
            import socket
            import _pydev_log
            log = _pydev_log.Log()

            dbg(SERVER_NAME + ' creating socket', INFO1)
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.bind((HOST, self.thisPort))
            s.listen(1)  #socket to receive messages.

            #we stay here until we are connected.
            #we only accept 1 client.
            #the exit message for the server is @@KILL_SERVER_END@@
            dbg(SERVER_NAME + ' waiting for connection', INFO1)
            conn, addr = s.accept()
            time.sleep(0.5)  #wait a little before connecting to JAVA server

            dbg(SERVER_NAME + ' waiting to java client', INFO1)
            #after being connected, create a socket as a client.
            self.connectToServer()

            dbg(SERVER_NAME + ' Connected by ' + str(addr), INFO1)

            while 1:
                data = ''
                returnMsg = ''
                keepAliveThread = KeepAliveThread(self.socket)

                while data.find(MSG_END) == -1:
                    received = conn.recv(BUFFER_SIZE)
                    if len(received) == 0:
                        sys.exit(0)  #ok, connection ended
                    if IS_PYTHON3K:
                        data = data + received.decode('utf-8')
                    else:
                        data = data + received

                try:
                    try:
                        if data.find(MSG_KILL_SERVER) != -1:
                            dbg(SERVER_NAME + ' kill message received', INFO1)
                            #break if we received kill message.
                            self.ended = True
                            sys.exit(0)

                        dbg(SERVER_NAME + ' starting keep alive thread', INFO2)
                        keepAliveThread.start()

                        if data.find(MSG_PYTHONPATH) != -1:
                            comps = []
                            for p in _sys_path:
                                comps.append((p, ' '))
                            returnMsg = self.getCompletionsMessage(None, comps)

                        else:
                            data = data[:data.rfind(MSG_END)]

                            if data.startswith(MSG_IMPORTS):
                                data = data.replace(MSG_IMPORTS, '')
                                data = unquote_plus(data)
                                defFile, comps = importsTipper.GenerateTip(
                                    data, log)
                                returnMsg = self.getCompletionsMessage(
                                    defFile, comps)

                            elif data.startswith(MSG_CHANGE_PYTHONPATH):
                                data = data.replace(MSG_CHANGE_PYTHONPATH, '')
                                data = unquote_plus(data)
                                ChangePythonPath(data)
                                returnMsg = MSG_OK

                            elif data.startswith(MSG_SEARCH):
                                data = data.replace(MSG_SEARCH, '')
                                data = unquote_plus(data)
                                (f, line,
                                 col), foundAs = importsTipper.Search(data)
                                returnMsg = self.getCompletionsMessage(
                                    f, [(line, col, foundAs)])

                            elif data.startswith(MSG_CHANGE_DIR):
                                data = data.replace(MSG_CHANGE_DIR, '')
                                data = unquote_plus(data)
                                CompleteFromDir(data)
                                returnMsg = MSG_OK

                            elif data.startswith(MSG_BIKE):
                                returnMsg = MSG_INVALID_REQUEST  #No longer supported.

                            else:
                                returnMsg = MSG_INVALID_REQUEST
                    except SystemExit:
                        returnMsg = self.getCompletionsMessage(
                            None, [('Exit:', 'SystemExit', '')])
                        keepAliveThread.lastMsg = returnMsg
                        raise
                    except:
                        dbg(SERVER_NAME + ' exception occurred', ERROR)
                        s = StringIO.StringIO()
                        traceback.print_exc(file=s)

                        err = s.getvalue()
                        dbg(SERVER_NAME + ' received error: ' + str(err),
                            ERROR)
                        returnMsg = self.getCompletionsMessage(
                            None, [('ERROR:', '%s\nLog:%s' %
                                    (err, log.GetContents()), '')])

                finally:
                    log.Clear()
                    keepAliveThread.lastMsg = returnMsg

            conn.close()
            self.ended = True
            sys.exit(0)  #connection broken

        except SystemExit:
            raise
            #No need to log SystemExit error
        except:
            s = StringIO.StringIO()
            exc_info = sys.exc_info()

            traceback.print_exception(exc_info[0],
                                      exc_info[1],
                                      exc_info[2],
                                      limit=None,
                                      file=s)
            err = s.getvalue()
            dbg(SERVER_NAME + ' received error: ' + str(err), ERROR)
            raise
Example #40
0
    tools.addDir('Integrate With TV Guide', 'tv', 10, icon, fanart, '')
    tools.addDir('Run a Speed Test', 'ST', 10, icon, fanart, '')
    tools.addDir('Clear Cache', 'CC', 10, icon, fanart, '')


params = tools.get_params()
url = None
name = None
mode = None
iconimage = None
description = None
query = None
type = None

try:
    url = urllib.unquote_plus(params["url"])
except:
    pass
try:
    name = urllib.unquote_plus(params["name"])
except:
    pass
try:
    iconimage = urllib.unquote_plus(params["iconimage"])
except:
    pass
try:
    mode = int(params["mode"])
except:
    pass
try:
Example #41
0
 def Decode(formdict):
     return simplejson.loads(urllib.unquote_plus(formdict))
Example #42
0
    def do_GET(self):
        global g_param
        try:
            dprint(__name__, 2, "http request header:\n{0}", self.headers)
            dprint(__name__, 2, "http request path:\n{0}", self.path)

            # check for PMS address
            PMSaddress = ''
            pms_end = self.path.find(')')
            if self.path.startswith('/PMS(') and pms_end > -1:
                PMSaddress = urllib.unquote_plus(self.path[5:pms_end])
                self.path = self.path[pms_end + 1:]

            # break up path, separate PlexConnect options
            # clean path needed for filetype decoding
            parts = re.split(
                r'[?&]', self.path,
                1)  # should be '?' only, but we do some things different :-)
            if len(parts) == 1:
                self.path = parts[0]
                options = {}
                query = ''
            else:
                self.path = parts[0]

                # break up query string
                options = {}
                query = ''
                parts = parts[1].split('&')
                for part in parts:
                    if part.startswith('PlexConnect'):
                        # get options[]
                        opt = part.split('=', 1)
                        if len(opt) == 1:
                            options[opt[0]] = ''
                        else:
                            options[opt[0]] = urllib.unquote(opt[1])
                    else:
                        # recreate query string (non-PlexConnect) - has to be merged back when forwarded
                        if query == '':
                            query = '?' + part
                        else:
                            query += '&' + part

            # get aTV language setting
            options['aTVLanguage'] = Localize.pickLanguage(
                self.headers.get('Accept-Language', 'en'))

            # add client address - to be used in case UDID is unknown
            if 'X-Forwarded-For' in self.headers:
                options['aTVAddress'] = self.headers['X-Forwarded-For'].split(
                    ',', 1)[0]
            else:
                options['aTVAddress'] = self.client_address[0]

            # get aTV hard-/software parameters
            options['aTVFirmwareVersion'] = self.headers.get(
                'X-Apple-TV-Version', '5.1')
            options['aTVScreenResolution'] = self.headers.get(
                'X-Apple-TV-Resolution', '720')

            dprint(__name__, 2, "pms address:\n{0}", PMSaddress)
            dprint(__name__, 2, "cleaned path:\n{0}", self.path)
            dprint(__name__, 2, "PlexConnect options:\n{0}", options)
            dprint(__name__, 2, "additional arguments:\n{0}", query)

            if 'User-Agent' in self.headers and \
               'AppleTV' in self.headers['User-Agent']:

                # recieve simple logging messages from the ATV
                if 'PlexConnectATVLogLevel' in options:
                    dprint('ATVLogger', int(options['PlexConnectATVLogLevel']),
                           options['PlexConnectLog'])
                    self.send_response(200)
                    self.send_header('Content-type', 'text/plain')
                    self.end_headers()
                    return

                # serve "*.cer" - Serve up certificate file to atv
                if self.path.endswith(".cer"):
                    dprint(__name__, 1, "serving *.cer: " + self.path)
                    if g_param['CSettings'].getSetting('certfile').startswith(
                            '.'):
                        # relative to current path
                        cfg_certfile = sys.path[0] + sep + g_param[
                            'CSettings'].getSetting('certfile')
                    else:
                        # absolute path
                        cfg_certfile = g_param['CSettings'].getSetting(
                            'certfile')
                    cfg_certfile = path.normpath(cfg_certfile)

                    cfg_certfile = path.splitext(cfg_certfile)[0] + '.cer'
                    try:
                        f = open(cfg_certfile, "rb")
                    except:
                        dprint(__name__, 0,
                               "Failed to access certificate: {0}",
                               cfg_certfile)
                        return

                    self.sendResponse(f.read(), 'text/xml', False)
                    f.close()
                    return

                # serve .js files to aTV
                # application, main: ignore path, send /assets/js/application.js
                # otherwise: path should be '/js', send /assets/js/*.js
                dirname = path.dirname(self.path)
                basename = path.basename(self.path)
                if basename in ("application.js", "main.js", "javascript-packed.js", "bootstrap.js") or \
                   basename.endswith(".js") and dirname == '/js':
                    if basename in ("main.js", "javascript-packed.js",
                                    "bootstrap.js"):
                        basename = "application.js"
                    dprint(__name__, 1, "serving /js/{0}", basename)
                    JS = JSConverter(basename, options)
                    self.sendResponse(JS, 'text/javascript', True)
                    return

                # serve "*.jpg" - thumbnails for old-style mainpage
                if self.path.endswith(".jpg"):
                    dprint(__name__, 1, "serving *.jpg: " + self.path)
                    f = open(sys.path[0] + sep + "assets" + self.path, "rb")
                    self.sendResponse(f.read(), 'image/jpeg', False)
                    f.close()
                    return

                # serve "*.png" - only png's support transparent colors
                if self.path.endswith(".png"):
                    dprint(__name__, 1, "serving *.png: " + self.path)
                    f = open(sys.path[0] + sep + "assets" + self.path, "rb")
                    self.sendResponse(f.read(), 'image/png', False)
                    f.close()
                    return

                # serve subtitle file - transcoded to aTV subtitle json
                if 'PlexConnect' in options and \
                   options['PlexConnect']=='Subtitle':
                    dprint(__name__, 1, "serving subtitle: " + self.path)
                    XML = Subtitle.getSubtitleJSON(PMSaddress,
                                                   self.path + query, options)
                    self.sendResponse(XML, 'application/json', True)
                    return

                # get everything else from XMLConverter - formerly limited to trailing "/" and &PlexConnect Cmds
                if True:
                    dprint(__name__, 1, "serving .xml: " + self.path)
                    XML = XMLConverter.XML_PMS2aTV(PMSaddress,
                                                   self.path + query, options)
                    self.sendResponse(XML, 'text/xml', True)
                    return
                """
                # unexpected request
                self.send_error(403,"Access denied: %s" % self.path)
                """

            else:
                self.send_error(
                    403, "Not Serving Client %s" % self.client_address[0])
        except IOError:
            self.send_error(404, "File Not Found: %s" % self.path)
Example #43
0
    ok = True
    liz = xbmcgui.ListItem(name,
                           iconImage="DefaultFolder.png",
                           thumbnailImage=iconimage)
    liz.setInfo(type="Video", infoLabels={"Title": name})
    ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
                                     url=u,
                                     listitem=liz,
                                     isFolder=True)
    return ok


params = parameters_string_to_dict(sys.argv[2])
mode = params.get('mode')
url = params.get('url')
if isinstance(url, type(str())):
    url = urllib.unquote_plus(url)

if mode == 'listVideos':
    listVideos(url)
elif mode == 'sortDirection':
    sortDirection(url)
elif mode == 'playVideo':
    playVideo(url)
elif mode == 'playAll':
    playAll(url)
elif mode == 'search':
    search()
else:
    index()
def decodeUrl(encodedUrl):
    if not encodedUrl: return encodedUrl
    return urllib.unquote_plus(encodedUrl)
Example #45
0
fullQueryList = requestInfoList[1].split('?')
#print fullQueryList

form_dict = {}

# no variables to get
if len(fullQueryList) == 1:
    path_info = fullQueryList[0]
# we need to do further processing on the variables
else:
    path_info = fullQueryList[0]
    tmpVariables = fullQueryList[1].split('&')
    for variablePair in tmpVariables:
        key, value = variablePair.split('=')
        form_dict[key] = urllib.unquote_plus(value)

print allLines

cookie = ''
# get the cookie
for line in allLines:
    tmp = line.split(':', 2)
    if tmp[0] == 'cookie':
        cookie = tmp[1].strip()

print 'cookie = ', cookie

environ = {}  # make a fake request dictionary
environ['PATH_INFO'] = path_info
environ['wsgi.input'] = ''
Example #46
0
    ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
                                     url=u,
                                     listitem=liz)
    return ok


def addDir(name, url, mode, iconimage):
    u = sys.argv[0] + "?url=" + urllib.quote_plus(url) + "&mode=" + str(mode)
    ok = True
    liz = xbmcgui.ListItem(name,
                           iconImage="DefaultFolder.png",
                           thumbnailImage=iconimage)
    liz.setInfo(type="Video", infoLabels={"Title": name})
    ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
                                     url=u,
                                     listitem=liz,
                                     isFolder=True)
    return ok


params = parameters_string_to_dict(sys.argv[2])
mode = urllib.unquote_plus(params.get('mode', ''))
url = urllib.unquote_plus(params.get('url', ''))

if mode == 'listVideos':
    listVideos(url)
elif mode == 'playVideo':
    playVideo(url)
else:
    index()
Example #47
0
    def createXBMCListItem(self, item):        
        
        title = item['title']
        m_type = item['type']
        icon = item['icon']    
        v_type = 'default'  
        try:
            v_type = item['videoType'] if item['videoType'] is not None else 'default'
        except:
            v_type = 'default'

        if icon and not icon.startswith('http'):
            try:
                if not fu.fileExists(icon):
                    tryFile = os.path.join(common.Paths.modulesDir, icon)
                    if not fu.fileExists(tryFile):
                        tryFile = os.path.join(common.Paths.customModulesDir, icon)
                    if fu.fileExists(tryFile):
                        icon = tryFile
            except:
                pass

        if not icon:
            if m_type == 'video':
                icon = common.Paths.defaultVideoIcon
            else:
                icon = common.Paths.defaultCategoryIcon

        fanart = item['fanart']
        if not fanart:
            fanart = common.Paths.pluginFanart

        liz = xbmcgui.ListItem(title)
        try:
            liz.setArt({'thumb': icon, 'fanart': fanart})
        except:
            liz.setProperty('fanart_image', fanart)
            liz.setThumbnailImage(icon)
            common.log('main.py:374: setThumbnailImage is deprecated')

        """
        General Values that apply to all types:
            count         : integer (12) - can be used to store an id for later, or for sorting purposes
            size          : long (1024) - size in bytes
            date          : string (%d.%m.%Y / 01.01.2009) - file date

        Video Values:
            genre         : string (Comedy)
            year          : integer (2009)
            episode       : integer (4)
            season        : integer (1)
            top250        : integer (192)
            tracknumber   : integer (3)
            rating        : float (6.4) - range is 0..10
            watched       : depreciated - use playcount instead
            playcount     : integer (2) - number of times this item has been played
            overlay       : integer (2) - range is 0..8.  See GUIListItem.h for values
            cast          : list (Michal C. Hall)
            castandrole   : list (Michael C. Hall|Dexter)
            director      : string (Dagur Kari)
            mpaa          : string (PG-13)
            plot          : string (Long Description)
            plotoutline   : string (Short Description)
            title         : string (Big Fan)
            originaltitle : string (Big Fan)
            duration      : string (3:18)
            studio        : string (Warner Bros.)
            tagline       : string (An awesome movie) - short description of movie
            writer        : string (Robert D. Siegel)
            tvshowtitle   : string (Heroes)
            premiered     : string (2005-03-04)
            status        : string (Continuing) - status of a TVshow
            code          : string (tt0110293) - IMDb code
            aired         : string (2008-12-07)
            credits       : string (Andy Kaufman) - writing credits
            lastplayed    : string (%Y-%m-%d %h:%m:%s = 2009-04-05 23:16:04)
            album         : string (The Joshua Tree)
            votes         : string (12345 votes)
            trailer       : string (/home/user/trailer.avi)
        """

        infoLabels = {}
        for video_info_name in item.infos.keys():
            infoLabels[video_info_name] = item[video_info_name]
        infoLabels['title'] = title

        liz.setInfo('video', infoLabels)

        url = urllib.unquote_plus(item['url'])
        liz.setPath(url)

        if m_type == 'video':
            liz.setProperty('IsPlayable','true')

        if title.startswith('castflash'):
            try:
                liz.setMimeType('application/vnd.apple.mpegurl')
                liz.setContentLookup(False)
            except:
                common.showError('Update Kodi to 16+ to view this stream')
                return None
            
        if title.startswith('nohead'):
            try:
                liz.setMimeType('video/x-mpegts')
                liz.setContentLookup(False)
            except:
                pass
        
        if v_type is not None and v_type != 'default':
            try:
                if float(common.xbmcVersion) >= 17.0:
                    common.log('Trying to use inputstream.adaptive to demux stream... ', xbmc.LOGINFO)
                    liz.setProperty('inputstreamaddon', 'inputstream.adaptive')
                    liz.setContentLookup(False)

                    if v_type == 'adaptive_hls':
                        if float(common.xbmcVersion) >= 17.5:
                            liz.setMimeType('application/vnd.apple.mpegurl')
                            liz.setProperty('inputstream.adaptive.manifest_type', 'hls')
                            if '|' in url:
                                url,strhdr = url.split('|')
                                liz.setProperty('inputstream.adaptive.stream_headers', strhdr)
                                liz.setPath(url)
                        else:
                            liz.setProperty('inputstreamaddon', None)
                            liz.setContentLookup(True)
                        
                    elif v_type == 'adaptive_mpd':                    
                        liz.setMimeType('application/dash+xml')
                        liz.setProperty('inputstream.adaptive.manifest_type', 'mpd')                                        
                        
                    elif v_type == 'adaptive_ism':
                        liz.setProperty('inputstream.adaptive.manifest_type', 'ism')
                        liz.setMimeType('application/vnd.ms-sstr+xml')
                    
                else:
                    pass
            except:
                common.log('Error using inputstream.adaptive. Make sure plugin is installed and Kodi is version 17+. Falling back to ffmpeg ...')
                #common.showError('Error using inputstream.adaptive. Make sure plugin is installed and Kodi is version 17+.')
                liz.setProperty('inputstreamaddon', None)
                liz.setContentLookup(True)
                pass

        return liz
Example #48
0
            splitparams = pairsofparams[i].split('=')
            if (len(splitparams)) == 2:
                param[splitparams[0]] = splitparams[1]
    return param


params = get_params()
url = None
name = None
mode = None
iconimage = None
parser = None
parserfunction = None

try:
    url = urllib.unquote_plus(params["url"])
except:
    pass
try:
    name = urllib.unquote_plus(params["name"])
except:
    pass
try:
    mode = int(params["mode"])
except:
    pass
try:
    regexs = params["regexs"]
except:
    pass
try:
Example #49
0
        if (params[len(params) - 1] == '/'):
            params = params[0:len(params) - 2]
        pairsofparams = cleanedparams.split('&')
        param = {}
        for i in range(len(pairsofparams)):
            splitparams = {}
            splitparams = pairsofparams[i].split('=')
            if (len(splitparams)) == 2:
                param[splitparams[0]] = splitparams[1]
    return param


params = get_params()

mode = int(params.get('mode', 0))
url = urllib.unquote_plus(params.get('url', ''))
name = urllib.unquote_plus(params.get('name', ''))
title = urllib.unquote_plus(params.get('title', ''))
thumbnail = urllib.unquote_plus(params.get('thumbnail', ''))
season = urllib.unquote_plus(params.get('season', ''))
episode = urllib.unquote_plus(params.get('episode', ''))
ep_name = urllib.unquote_plus(params.get('ep_name', ''))

if mode: print 'Mode: ' + str(mode)
if url: print 'URL: ' + str(url)

if mode == 0 or not url or len(url) < 1: MAIN()
elif mode == 1: getTVshows(url)
elif mode == 2: getSeasons(name, url)
elif mode == 3: getEpisodes(url, season, title, thumbnail)
elif mode == 4: TVSHOWS(url)
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            imdb = data['imdb']
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                url = self.searchShow(title, int(data['season']),
                                      int(data['episode']), aliases, headers)
            else:
                url = self.searchMovie(title, data['year'], aliases, headers)

            r = client.request(url,
                               headers=headers,
                               output='extended',
                               timeout='10')

            if not imdb in r[0]: raise Exception()

            cookie = r[4]
            headers = r[3]
            result = r[0]

            try:
                r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
                for i in r:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'language':
                            'en',
                            'url':
                            i,
                            'direct':
                            True,
                            'debridonly':
                            False
                        })
                    except:
                        pass
            except:
                pass

            try:
                auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except:
                auth = 'false'
            auth = 'Bearer %s' % urllib.unquote_plus(auth)
            headers['Authorization'] = auth
            headers['Referer'] = url

            u = '/ajax/vsozrflxcw.php'
            self.base_link = client.request(self.base_link,
                                            headers=headers,
                                            output='geturl')
            u = urlparse.urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(
                base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {
                'action': action,
                'idEl': idEl,
                'token': token,
                'nopop': '',
                'elid': elid
            }
            post = urllib.urlencode(post)
            cookie += ';%s=%s' % (idEl, elid)
            headers['Cookie'] = cookie

            r = client.request(u,
                               post=post,
                               headers=headers,
                               cookie=cookie,
                               XHR=True)
            r = str(json.loads(r))

            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try:
                    if 'google' in i:
                        quality = 'SD'

                        if 'googleapis' in i:
                            try:
                                quality = source_utils.check_sd_url(i)
                            except Exception:
                                pass

                        if 'googleusercontent' in i:
                            i = directstream.googleproxy(i)
                            try:
                                quality = directstream.googletag(
                                    i)[0]['quality']
                            except Exception:
                                pass

                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': i,
                            'direct': True,
                            'debridonly': False
                        })

                    elif 'llnwi.net' in i or 'vidcdn.pro' in i:
                        try:
                            quality = source_utils.check_sd_url(i)

                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': i,
                                'direct': True,
                                'debridonly': False
                            })

                        except Exception:
                            pass
                    else:
                        valid, hoster = source_utils.is_host_valid(i, hostDict)
                        if not valid: continue

                        sources.append({
                            'source': hoster,
                            'quality': '720p',
                            'language': 'en',
                            'url': i,
                            'direct': False,
                            'debridonly': False
                        })
                except Exception:
                    pass
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('CartoonHD - Exception: \n' + str(failure))
            return sources
Example #51
0
def findvideos(item):
    logger.info()
    if item.contentSeason != '':
        return episode_links(item)

    itemlist = []
    item.text_color = color3

    data = get_data(item.url)

    item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year:
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    old_format = False
    # Patron torrent antiguo formato
    if "Enlaces de descarga</div>" in data:
        old_format = True
        matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
        for scrapedurl in matches:
            scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
            scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
            title = "[Torrent] "
            title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
            itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
                                       text_color="green"))

    # Patron online
    data_online = scrapertools.find_single_match(data, 'Ver online</div>(.*?)<div class="section-box related-posts">')
    if data_online:
        title = "Enlaces Online"
        if '"l-latino2"' in data_online:
            title += " [LAT]"
        elif '"l-esp2"' in data_online:
            title += " [ESP]"
        elif '"l-vose2"' in data_online:
            title += " [VOSE]"

        patron = 'make_links.*?,[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for i, code in enumerate(matches):
            enlace = show_links(code)
            links = servertools.findvideos(data=enlace[0])
            if links and "peliculas.nu" not in links:
                if i == 0:
                    extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>')
                    size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()

                    if size:
                        title += " [%s]" % size
                    new_item = item.clone(title=title, action="", text_color=color1)
                    if extra_info:
                        extra_info = scrapertools.htmlclean(extra_info)
                        new_item.infoLabels["plot"] = extra_info
                        new_item.title += " +INFO"
                    itemlist.append(new_item)

                title = "   Ver vídeo en " + links[0][2]
                itemlist.append(item.clone(action="play", server=links[0][2], title=title, url=links[0][1]))
    scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
    if scriptg:
        gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
        url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"')
        if url:
            itemlist.append(item.clone(action="play", server="directo", url=url, extra=item.url,
                                       title="   Ver vídeo en Googlevideo (Máxima calidad)"))

    # Patron descarga
    patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \
             '(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-' \
             'posts">)'
    bloques_descarga = scrapertools.find_multiple_matches(data, patron)
    for title_bloque, bloque in bloques_descarga:
        if title_bloque == "Ver online":
            continue
        if '"l-latino2"' in bloque:
            title_bloque += " [LAT]"
        elif '"l-esp2"' in bloque:
            title_bloque += " [ESP]"
        elif '"l-vose2"' in bloque:
            title_bloque += " [VOSE]"

        extra_info = scrapertools.find_single_match(bloque, '<span class="tooltiptext">(.*?)</span>')
        size = scrapertools.find_single_match(bloque, '(?i)TAMAÑO:\s*(.*?)<').strip()

        if size:
            title_bloque += " [%s]" % size
        new_item = item.clone(title=title_bloque, action="", text_color=color1)
        if extra_info:
            extra_info = scrapertools.htmlclean(extra_info)
            new_item.infoLabels["plot"] = extra_info
            new_item.title += " +INFO"
        itemlist.append(new_item)

        if '<div class="subiendo">' in bloque:
            itemlist.append(item.clone(title="   Los enlaces se están subiendo", action=""))
            continue
        patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedserver, scrapedurl in matches:
            if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
                scrapedserver = "uploadedto"
            titulo = unicode(scrapedserver, "utf-8").capitalize().encode("utf-8")
            if titulo == "Magnet" and old_format:
                continue
            elif titulo == "Magnet" and not old_format:
                title = "   Enlace Torrent"
                scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
                scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
                itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
                                           text_color="green"))
                continue
            if servertools.is_server_enabled(scrapedserver):
                try:
                    # servers_module = __import__("servers." + scrapedserver)
                    # Saca numero de enlaces
                    urls = show_links(scrapedurl)
                    numero = str(len(urls))
                    titulo = "   %s - Nº enlaces: %s" % (titulo, numero)
                    itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver))
                except:
                    pass

    itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                               text_color="magenta"))
    if item.extra != "findvideos" and config.get_videolibrary_support():
        itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", action="add_pelicula_to_library",
                             extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle},
                             fulltitle=item.fulltitle, text_color="green"))

    return itemlist
Example #52
0
    return param


params = get_params()
mode = -1

try:
    mode = int(params['mode'])
except:
    pass

cacheToDisc = True
doRefresh = False

if mode == _COUNTRY:
    label = urllib.unquote_plus(params['label'])
    abrv = urllib.unquote_plus(params['abrv'])

    Country(label, abrv)

elif mode == _VPN:
    label = urllib.unquote_plus(params['label'])
    abrv = urllib.unquote_plus(params['abrv'])
    server = urllib.unquote_plus(params['server'])

    if len(server) == 0:
        server = vpn.GetBest(abrv)[3]
        doRefresh = True

    success = connect(label, abrv, server)
Example #53
0
    ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
                                     url=u,
                                     listitem=liz,
                                     isFolder=False)
    return ok


params = get_params()

url = None
name = None
mode = None
iconimage = None

try:
    url = urllib.unquote_plus(params["url"])
except:
    pass
try:
    name = urllib.unquote_plus(params["name"])
except:
    pass
try:
    mode = int(params["mode"])
except:
    pass
try:
    iconimage = urllib.unquote_plus(params["iconimage"])
except:
    pass
try:
def doDownload(url, dest, title, image, headers):

    headers = json.loads(urllib.unquote_plus(headers))

    url = urllib.unquote_plus(url)

    title = urllib.unquote_plus(title)

    image = urllib.unquote_plus(image)

    dest = urllib.unquote_plus(dest)

    file = dest.rsplit(os.sep, 1)[-1]

    resp = getResponse(url, headers, 0)

    if not resp:
        xbmcgui.Dialog().ok(title, dest, 'Download failed',
                            'No response from server')
        return

    try:
        content = int(resp.headers['Content-Length'])
    except:
        content = 0

    try:
        resumable = 'bytes' in resp.headers['Accept-Ranges'].lower()
    except:
        resumable = False

    #print "Download Header"
    #print resp.headers
    if resumable:
        print "Download is resumable"

    if content < 1:
        xbmcgui.Dialog().ok(title, file, 'Unknown filesize',
                            'Unable to download')
        return

    size = 1024 * 1024
    mb = content / (1024 * 1024)

    if content < size:
        size = content

    total = 0
    notify = 0
    errors = 0
    count = 0
    resume = 0
    sleep = 0

    if xbmcgui.Dialog().yesno(title + ' - Confirm Download', file,
                              'Complete file is %dMB' % mb,
                              'Continue with download?', 'Confirm',
                              'Cancel') == 1:
        return

    print 'Download File Size : %dMB %s ' % (mb, dest)

    #f = open(dest, mode='wb')
    f = xbmcvfs.File(dest, 'w')

    chunk = None
    chunks = []

    while True:
        downloaded = total
        for c in chunks:
            downloaded += len(c)
        percent = min(100 * downloaded / content, 100)
        if percent >= notify:
            xbmc.executebuiltin("XBMC.Notification(%s,%s,%i,%s)" %
                                (title + ' - Download Progress - ' +
                                 str(percent) + '%', dest, 10000, image))

            print 'Download percent : %s %s %dMB downloaded : %sMB File Size : %sMB' % (
                str(percent) + '%', dest, mb, downloaded / 1000000,
                content / 1000000)

            notify += 10

        chunk = None
        error = False

        try:
            chunk = resp.read(size)
            if not chunk:
                if percent < 99:
                    error = True
                else:
                    while len(chunks) > 0:
                        c = chunks.pop(0)
                        f.write(c)
                        del c

                    f.close()
                    print '%s download complete' % (dest)
                    return done(title, dest, True)

        except Exception, e:
            print str(e)
            error = True
            sleep = 10
            errno = 0

            if hasattr(e, 'errno'):
                errno = e.errno

            if errno == 10035:  # 'A non-blocking socket operation could not be completed immediately'
                pass

            if errno == 10054:  #'An existing connection was forcibly closed by the remote host'
                errors = 10  #force resume
                sleep = 30

            if errno == 11001:  # 'getaddrinfo failed'
                errors = 10  #force resume
                sleep = 30

        if chunk:
            errors = 0
            chunks.append(chunk)
            if len(chunks) > 5:
                c = chunks.pop(0)
                f.write(c)
                total += len(c)
                del c

        if error:
            errors += 1
            count += 1
            print '%d Error(s) whilst downloading %s' % (count, dest)
            xbmc.sleep(sleep * 1000)

        if (resumable and errors > 0) or errors >= 10:
            if (not resumable and resume >= 50) or resume >= 500:
                #Give up!
                print '%s download canceled - too many error whilst downloading' % (
                    dest)
                return done(title, dest, False)

            resume += 1
            errors = 0
            if resumable:
                chunks = []
                #create new response
                print 'Download resumed (%d) %s' % (resume, dest)
                resp = getResponse(url, headers, total)
            else:
                #use existing response
                pass
Example #55
0
geshou=None
cat=None
fname=None
order=None
date=None
page=None
thumb=None
listpage=None
timelist=None

try:
    mode=int(params["mode"])
except:
    pass
try:
    url=urllib.unquote_plus(params["url"])
except:
    pass
try:
    name=urllib.unquote_plus(params["name"])
except:
    pass
try:
    area=urllib.unquote_plus(params["area"])
except:
    pass
try:
    artist=urllib.unquote_plus(params["artist"])
except:
    pass
try:
Example #56
0
def UnquotePlus(sUrl):
    return urllib.unquote_plus(sUrl)
Example #57
0
def lambda_handler(event, context):
    sock = create_socket()

    if not validate_uuid(TOKEN):
        logger.critical('{} is not a valid token. Exiting.'.format(TOKEN))
        raise SystemExit
    else:
        # Get the object from the event and show its content type
        bucket = event['Records'][0]['s3']['bucket']['name']
        key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key']).decode('utf8')

        try:
            response = s3.get_object(Bucket=bucket, Key=key)
            logger.info('Fetched file {} from S3 bucket {}'.format(key, bucket))
            body = response['Body']
            data = body.read()
            # If the name has a .gz extension, then decompress the data
            if key[-3:] == '.gz':
                with tempfile.TemporaryFile() as temporary_file:
                    temporary_file.write(data)
                    temporary_file.seek(0)

                    with gzip.GzipFile(fileobj=temporary_file, mode="r") as gz:
                        data = gz.read()

            lines = data.split("\n")
            logger.info('Total number of lines: {}'.format(len(list(lines))))

            if validate_elb_log(str(key)) is True:
                # timestamp elb client:port backend:port request_processing_time backend_processing_time
                # response_processing_time elb_status_code backend_status_code received_bytes sent_bytes
                # "request" "user_agent" ssl_cipher ssl_protocol
                logger.info('File={} is AWS ELB log format. Parsing and sending to R7'.format(key))
                rows = csv.reader(data.splitlines(), delimiter=' ', quotechar='"')
                for line in rows:
                    request = line[11].split(' ')
                    idx = request[1].find('/', 9)
                    url = request[1][idx:]
                    parsed = OrderedDict()
                    parsed['timestamp'] = line[0]
                    parsed['elb_name'] = line[1]
                    parsed['client_ip'] = hashlib.sha256((line[2].split(':')[0] + SALT).encode()).hexdigest()
                    parsed['backend_ip'] = line[3].split(':')[0]
                    parsed['request_processing_time'] = line[4]
                    parsed['backend_processing_time'] = line[5]
                    parsed['response_processing_time'] = line[6]
                    parsed['elb_status_code'] = line[7]
                    parsed['backend_status_code'] = line[8]
                    parsed['received_bytes'] = line[9]
                    parsed['sent_bytes'] = line[10]
                    parsed['method'] = request[0]
                    parsed['url'] = url
                    parsed['user_agent'] = line[12]
                    parsed['ssl_cipher'] = line[13]
                    parsed['ssl_protocol'] = line[14]
                    mask = [
                        'elb_name',
                        'ssl_cipher'
                    ]
                    msg = ' '.join([
                        '"{}"'.format(str(value)) for value in mask_parsed(parsed, mask).values()
                    ])
                    sock.sendall('{} {}\n'.format(TOKEN, msg))
                logger.info('Finished sending file={} to R7'.format(key))
            elif validate_alb_log(str(key)) is True:
                logger.info('File={} is AWS ALB log format. Parsing and sending to R7'.format(key))
                rows = csv.reader(data.splitlines(), delimiter=' ', quotechar='"')
                total_run_count = 0
                good_run_count = 0
                bad_run_count = 0
                for line in rows:
                    total_run_count += 1
                    try:
                        request = line[12].split(' ')
                        url = request[1]
                        try:
                            http_version = request[2].split('/')[-1:][0]
                        except:
                            http_version = request[2]
                        parsed = OrderedDict()
                        parsed['type'] = line[0]
                        parsed['timestamp'] = line[1]
                        parsed['elb_id'] = line[2]
                        parsed['client_ip'] = hashlib.sha256((line[3].split(':')[0] + SALT).encode()).hexdigest()
                        parsed['client_port'] = line[3].split(':')[1]
                        parsed['target_ip'] = line[4].split(':')[0]
                        parsed['target_port'] = line[4].split(':')[1]
                        parsed['request_processing_time'] = line[5]
                        parsed['target_processing_time'] = line[6]
                        parsed['response_processing_time'] = line[7]
                        parsed['elb_status_code'] = line[8]
                        parsed['target_status_code'] = line[9]
                        parsed['received_bytes'] = line[10]
                        parsed['sent_bytes'] = line[11]
                        parsed['method'] = request[0]
                        parsed['url'] = url
                        parsed['http_version'] = http_version
                        parsed['user_agent'] = line[13]
                        parsed['ssl_cipher'] = line[14]
                        parsed['ssl_protocol'] = line[15]
                        parsed['target_group_arn'] = line[16]
                        parsed['trace_id'] = line[17]
                        mask = [
                            'elb_id',
                            'ssl_cipher',
                            'target_group_arn',
                            'trace_id'
                        ]
                        msg = ' '.join([
                            '"{}"'.format(str(value)) for value in mask_parsed(parsed, mask).values()
                        ])
                        sock.sendall('{} {}\n'.format(TOKEN, msg))
                        good_run_count += 1
                    except IndexError:
                        bad_run_count += 1
                        logger.info('[ALB logs] bad log line: {}'.format(line))
                        pass
                logger.info('[ALB logs] total run count: {}'.format(total_run_count))
                logger.info('[ALB logs] processed-and-sent run count: {}'.format(good_run_count))
                logger.info('[ALB logs] bad run count: {}'.format(bad_run_count))
                logger.info('Finished sending file={} to R7'.format(key))
            elif validate_cf_log(str(key)) is True:
                # date time x-edge-location sc-bytes c-ip cs-method cs(Host)
                # cs-uri-stem sc-status cs(Referer) cs(User-Agent) cs-uri-query
                # cs(Cookie) x-edge-result-type x-edge-request-id x-host-header
                # cs-protocol cs-bytes time-taken x-forwarded-for ssl-protocol
                # ssl-cipher x-edge-response-result-type
                logger.info('File={} is AWS CloudFront log format. Parsing and sending to R7'.format(key))
                rows = csv.reader(data.splitlines(), delimiter='\t', quotechar='"')
                for line in rows:
                    # Skip headers and lines with insufficient values
                    if len(line) < 23:
                        continue
                    msg = "\"{0}T{1}Z\" x_edge_location=\"{2}\"" \
                          " sc_bytes=\"{3}\" c_ip=\"{4}\" cs_method=\"{5}\"" \
                          " cs_host=\"{6}\" cs_uri_stem=\"{7}\" sc_status=\"{8}\"" \
                          " cs_referer=\"{9}\" cs_user_agent=\"{10}\" cs_uri_query=\"{11}\"" \
                          " cs_cookie=\"{12}\" x_edge_result_type=\"{13}\"" \
                          " x_edge_request_id=\"{14}\" x_host_header=\"{15}\"" \
                          " cs_protocol=\"{16}\" cs_bytes=\"{17}\" time_taken=\"{18}\"" \
                          " x_forwarded_for=\"{19}\" ssl_protocol=\"{20}\"" \
                          " ssl_cipher=\"{21}\" x_edge_response_result_type=\"{22}\"\n" \
                        .format(*line)
                    sock.sendall('{} {}\n'.format(TOKEN, msg))
                logger.info('Finished sending file={} to R7'.format(key))
            elif validate_ct_log(str(key)) is True:
                logger.info('File={} is AWS CloudTrail log format. Parsing and sending to R7'.format(key))
                cloud_trail = json.loads(data)
                for event in cloud_trail['Records']:
                    sock.sendall('{} {}\n'.format(TOKEN, json.dumps(event)))
                logger.info('Finished sending file={} to R7'.format(key))
            else:
                logger.info('File={} is unrecognized log format. Sending raw lines to R7'.format(key))
                for line in lines:
                    sock.sendall('{} {}\n'.format(TOKEN, line))
                logger.info('Finished sending file={} to R7'.format(key))
        except Exception as e:
            logger.error('Exception: {}'.format(e))
        finally:
            sock.close()
            logger.info('Function execution finished.')
Example #58
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            imdb = data['imdb']
            aliases = eval(data['aliases'])
            headers = {}
            if 'tvshowtitle' in data:
                url = self.searchShow(title, int(data['season']),
                                      int(data['episode']), aliases, headers)
            else:
                url = self.searchMovie(title, data['year'], aliases, headers)

            result = client.request(url, headers=headers, timeout='10')
            result = client.parseDOM(result, 'title')[0]

            if '%TITLE%' in result: raise Exception()

            r = client.request(url,
                               headers=headers,
                               output='extended',
                               timeout='10')

            if not imdb in r[0]: raise Exception()

            cookie = r[4]
            headers = r[3]
            result = r[0]

            try:
                r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
                for i in r:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'language':
                            'en',
                            'url':
                            i,
                            'direct':
                            True,
                            'debridonly':
                            False
                        })
                    except:
                        pass
            except:
                pass

            try:
                auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except:
                auth = 'false'
            auth = 'Bearer %s' % urllib.unquote_plus(auth)

            headers['Authorization'] = auth
            headers[
                'Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
            headers[
                'Accept'] = 'application/json, text/javascript, */*; q=0.01'
            headers['Cookie'] = cookie
            headers['Referer'] = url

            u = urlparse.urljoin(self.base_link, self.streampost)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(
                base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]
            post = {
                'action': action,
                'idEl': idEl,
                'token': token,
                'elid': elid
            }
            post = urllib.urlencode(post)

            with requests.session() as s:
                try:
                    headers_request = {
                        'referer':
                        url,
                        'user-agent':
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
                        ' AppleWebKit/537.36 (KHTML, like Gecko) '
                        'Chrome/63.0.3239.84 Safari/537.36'
                    }
                    data = {
                        'action': 'getEpisodeEmb',
                        'idEl': idEl,
                        'token': token,
                        'nopop': ''
                    }
                    p = s.post(self.base_link + self.streampost,
                               data=data,
                               headers=headers_request)
                except:
                    print("Unexpected error in Flix Sources Script:",
                          sys.exc_info()[0])
                    pass

            r = str(json.loads(p.text))
            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try:
                    if 'googleapis' in i:
                        sources.append({
                            'source': 'GVIDEO',
                            'quality': 'SD',
                            'language': 'en',
                            'url': i,
                            'direct': True,
                            'debridonly': False
                        })
                    else:
                        valid, hoster = source_utils.is_host_valid(i, hostDict)
                        urls, host, direct = source_utils.check_directstreams(
                            i, hoster)
                        if valid:
                            for x in urls:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'en',
                                    'url': x['url'],
                                    'direct': direct,
                                    'debridonly': False
                                })
                        else:
                            sources.append({
                                'source': 'CDN',
                                'quality': 'SD',
                                'language': 'en',
                                'url': i,
                                'direct': True,
                                'debridonly': False
                            })

                except:
                    pass

            return sources
        except:
            return sources
Example #59
0
 def _get_valuelist(self):
     data = self.strvaluelist.split(',')
     result = []
     for d in data:
         result.append(urllib.unquote_plus(d))
     return result
Example #60
0
 def _parse_argv( self ):
     # call _Info() with our formatted argv to create the self.args object
     exec "self.args = _Info(%s)" % ( sys.argv[ 2 ][ 1 : ].replace( "&", ", " ), )
     self.args.download_url = urllib.unquote_plus( self.args.download_url )
     self.args.repo = urllib.unquote_plus( self.args.repo )