Пример #1
0
def annual_update():
    import db, logger
    myDB = db.DBConnection()
    annuallist = myDB.select('SELECT * FROM annuals')
    if annuallist is None:
        logger.info('no annuals to update.')
        return

    cnames = []
    #populate the ComicName field with the corresponding series name from the comics table.
    for ann in annuallist:
        coms = myDB.selectone('SELECT * FROM comics WHERE ComicID=?', [ann['ComicID']]).fetchone()
        cnames.append({'ComicID':     ann['ComicID'],
                       'ComicName':   coms['ComicName']
                      })

    #write in a seperate loop to avoid db locks
    i=0
    for cns in cnames:
        ctrlVal = {"ComicID":      cns['ComicID']}
        newVal = {"ComicName":     cns['ComicName']}
        myDB.upsert("annuals", newVal, ctrlVal)
        i+=1

    logger.info(str(i) + ' series have been updated in the annuals table.')
    return 
Пример #2
0
 def discover(self):
     try:
         ips = self.filterValidIps(self.__getDnsServerIPs())
         logger.info("Found %s dns servers used by host" % (len(ips)))
         self.serversIpList.extend(ips)
     except Exception, ex:
         logger.warn('Failed to get DNS Servers information. %s' % ex)
Пример #3
0
def searchresults(params,tecleado,category):
	logger.info("[cinegratis.py] search")

	buscador.salvar_busquedas(params,tecleado,category)
	tecleado = tecleado.replace(" ", "+")
	searchUrl = "http://www.cinegratis.net/index.php?module=search&title="+tecleado
	listsimple(params,searchUrl,category)
Пример #4
0
def checkFolder():
    import PostProcessor, logger
    #monitor a selected folder for 'snatched' files that haven't been processed
    logger.info('Checking folder ' + mylar.CHECK_FOLDER + ' for newly snatched downloads')
    PostProcess = PostProcessor.PostProcessor('Manual Run', mylar.CHECK_FOLDER)
    result = PostProcess.Process()
    logger.info('Finished checking for newly snatched downloads')
Пример #5
0
def performsearch(texto):
	logger.info("[cinegratis.py] performsearch")
	url = "http://www.cinegratis.net/index.php?module=search&title="+texto

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae los items
	patronvideos  = "<a href='(index.php\?module\=player[^']+)'[^>]*>(.*?)</a>"
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	
	resultados = []

	for match in matches:
		# Atributos
		scrapedtitle = match[1]
		scrapedtitle = scrapedtitle.replace("<span class='style4'>","")
		scrapedtitle = scrapedtitle.replace("</span>","")
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = ""
		scrapedplot = ""

		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		resultados.append( [CHANNELNAME , "findvideos" , "buscador" , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ] )
		
	return resultados
Пример #6
0
def listsimple(item):
	logger.info("[cinegratis.py] listsimple")

	url = item.url

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae los items
	patronvideos  = "<a href='(index.php\?module\=player[^']+)'[^>]*>(.*?)</a>"
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	itemlist = []
	for match in matches:
		# Atributos
		scrapedtitle = match[1]
		scrapedtitle = scrapedtitle.replace("<span class='style4'>","")
		scrapedtitle = scrapedtitle.replace("</span>","")
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = ""
		scrapedplot = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		itemlist.append( Item(channel=CHANNELNAME, action="findvideos" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot))

	return itemlist
Пример #7
0
def pelisalfa(item):
	logger.info("[cinegratis.py] mainlist")

	itemlist = []
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="0-9", url="http://www.cinegratis.net/index.php?module=peliculaslist&init="))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="A", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=a"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="B", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=b"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="C", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=c"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="D", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=d"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="E", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=e"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="F", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=f"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="G", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=g"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="H", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=h"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="I", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=i"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="J", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=j"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="K", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=k"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="L", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=l"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="M", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=m"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="N", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=n"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="O", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=o"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="P", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=p"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="Q", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=q"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="R", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=r"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="S", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=s"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="T", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=t"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="U", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=u"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="V", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=v"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="W", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=w"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="X", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=x"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="Y", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=y"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple", title="Z", url="http://www.cinegratis.net/index.php?module=peliculaslist&init=z"))

	return itemlist
def _read_suscription_file():
    logger.info("suscription._read_suscription_file")

    # Read file
    if os.path.exists(SUSCRIPTIONS_FILE):
        f = open(SUSCRIPTIONS_FILE,"r")
        data = f.read()
        f.close()
    else:
        data = ""

    # Parse suscriptions
    suscriptions = []

    matches = scrapertools.find_multiple_matches(data,"<suscription>(.*?)</suscription>")
    for match in matches:
        channel = scrapertools.find_single_match(match,"<channel>([^<]+)</channel>")
        url = scrapertools.find_single_match(match,"<url>([^<]+)</url>")
        extra = scrapertools.find_single_match(match,"<extra>([^<]+)</extra>")
        action = scrapertools.find_single_match(match,"<action>([^<]+)</action>")
        show_name = scrapertools.find_single_match(match,"<show_name>([^<]+)</show_name>")
        thumbnail = scrapertools.find_single_match(match,"<thumbnail>([^<]+)</thumbnail>")

        suscriptions.append( Item( channel=channel, url=url, action=action, title=show_name, show=show_name, thumbnail=thumbnail ) )

    return suscriptions
Пример #9
0
def peliscat(item):
	logger.info("[cinegratis.py] peliscat")

	url = item.url

	itemlist = []
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple" , title="Versión original" , url="http://www.cinegratis.net/index.php?module=search&title=subtitulado"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple" , title="Versión latina"   , url="http://www.cinegratis.net/index.php?module=search&title=latino"))

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae los items
	patronvideos  = "<td align='left'><a href='([^']+)'><img src='([^']+)' border='0'></a></td>"
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	for match in matches:
		# Atributos
		patron2 = "genero/([A-Za-z\-]+)/"
		matches2 = re.compile(patron2,re.DOTALL).findall(match[0])
		scrapertools.printMatches(matches2)
		
		scrapedtitle = matches2[0]
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = urlparse.urljoin(url,match[1])
		scrapedplot = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		itemlist.append( Item(channel=CHANNELNAME, action="listvideos" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot))

	return itemlist
def mainlist(params,url,category):
	logger.info("[favoritos.py] mainlist")

	import xbmctools

	# Crea un listado con las entradas de favoritos
	if usingsamba:
		ficheros = samba.get_files(BOOKMARK_PATH)
	else:
		ficheros = os.listdir(BOOKMARK_PATH)
	ficheros.sort()
	for fichero in ficheros:

		try:
			# Lee el bookmark
			titulo,thumbnail,plot,server,url = readbookmark(fichero)

			# Crea la entrada
			# En la categoría va el nombre del fichero para poder borrarlo
			xbmctools.addnewvideo( CHANNELNAME , "play" , os.path.join( BOOKMARK_PATH, fichero ) , server , titulo , url , thumbnail, plot )
		except:
			pass

	# Label (top-right)...
	xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
	xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
Пример #11
0
def testScript():
    OSHVResult = ObjectStateHolderVector()
    DebugMode = 'false'
    DateParsePattern = 'EEE MMM dd HH:mm:ss z yyyy'
    userExtDir = 'E:\\data\\Desktop\\Pull_From_Remedy_backup\\'
    if (DebugMode != None):
        DebugMode = DebugMode.lower()
        if DebugMode == "true":
            logger.info ('[NOTE] UCMDB Integration is running in DEBUG mode. No data will be pushed to the destination server.')
            return
    filePathDir = userExtDir + 'TQLExport\\Atrium\\results\\'
    directory = File(filePathDir)
    files = directory.listFiles()
    try:
        for file in files:
            if file != None or file != '':
                builder = SAXBuilder ()
                doc = builder.build(file)
                logger.info("Start processing CIs to update in the destination server...")
                allObjects = doc.getRootElement().getChild('data').getChild('objects').getChildren('Object')
                (objVector, ciDict) = processObjects(allObjects, DateParsePattern)
                OSHVResult.addAll(objVector)
                logger.info("Start processing Relationships to update in the destination server...")
                allLinks = doc.getRootElement().getChild('data').getChild('links').getChildren('link')
                linkVector = processLinks(allLinks, ciDict)
                OSHVResult.addAll(linkVector)
                print OSHVResult.toXmlString()
    except:
        stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
        logger.info('Failure in processing data %s' % stacktrace)
    logger.info('Ending Push to UCMDB')

#testScript()
    
Пример #12
0
def replicateTopology(mapping, sourceSystem, targetSystem):
    for ciMapping in mapping.getCiMappings():
        sourceType = ciMapping.getSourceType()
        logger.info('processing %s to %s' % (sourceType, ciMapping.getTargetType()))
        for sourceCi in sourceSystem.getCis(sourceType, ciMapping):
            try:
                targetCiBuilder = targetSystem.createCiBuilder(ciMapping.getTargetType())
                for attributeMapping in ciMapping.getAttributeMappings():
                    value = attributeMapping.getValue(sourceCi)

                    for filter in attributeMapping.getFilters():
                        value = filter.filter(value)

                    for validator in attributeMapping.getValidators():
                        validator.validate(value)
                    targetCiBuilder.setCiAttribute(attributeMapping.getTargetName(), value)
                targetCi = targetCiBuilder.build()
                targetSystem.addCi(targetCi, sourceCi, sourceType)
            except InvalidValueException:
                logger.info('%s CI %s skipped because %s' % (sourceType, sourceCi.getId(), sys.exc_info()[1]))

    for linkMapping in mapping.getLinkMappings():
        logger.info('processing link %s(%s) -- %s --> %s(%s)' % (
            linkMapping.getTargetEnd1Type(), linkMapping.getSourceEnd1Type(), linkMapping.getTargetType(),
            linkMapping.getTargetEnd2Type(), linkMapping.getSourceEnd2Type()))
        try:
            linkMappingProcessor = sourceSystem.createLinkMappingProcessor(linkMapping)
            for link in linkMappingProcessor.getLinks():
                logger.info("link:=====", link)
                targetSystem.addLink(linkMapping, link)
        except:
            logger.info('CI Links skipped because %s' % (sys.exc_info()[1]))
Пример #13
0
    def addLink(self, linkMapping, link):
        "@types: LinkMapping, Link"

        sourceType1 = linkMapping.getSourceEnd1Type()
        sourceType2 = linkMapping.getSourceEnd2Type()
        targetType1 = linkMapping.getTargetEnd1Type()
        targetType2 = linkMapping.getTargetEnd2Type()
        sourceId1 = link.getEnd1Id()
        sourceId2 = link.getEnd2Id()

        targetEnd1Id = self.__createComplexId(sourceId1, sourceType1, targetType1)
        targetEnd2Id = self.__createComplexId(sourceId2, sourceType2, targetType2)

        if not self.__hasOsh(targetEnd1Id) or not self.__hasOsh(targetEnd2Id):
            failurePolicy = linkMapping.getFailurePolicy()

            if failurePolicy == 'exclude_end1':
                self.__excludeCi(targetEnd1Id)

            elif failurePolicy == 'exclude_end2':
                self.__excludeCi(targetEnd2Id)

            elif failurePolicy == 'exclude_both':
                self.__excludeCi(targetEnd1Id)
                self.__excludeCi(targetEnd2Id)
        else:
            logger.info('adding %s -- %s --> %s' % (targetEnd1Id, linkMapping.getTargetType(), targetEnd2Id))
            self.__links.append((linkMapping, link))
def __getSecurityCookieValue():
    oRequestHandler = cRequestHandler(URL_MAIN)
    sHtmlContent = oRequestHandler.request()

    sPattern = (
        '<HTML><HEAD><SCRIPT language="javascript" src="([^"]+)">'
        + "</SCRIPT></HEAD><BODY onload=\"scf\('(.*?)'\+'(.*?)','/'\);\"></BODY></HTML>"
    )
    oParser = cParser()
    aResult = oParser.parse(sHtmlContent, sPattern)
    if aResult[0] == False:
        return ""
    sScriptFile = URL_MAIN + str(aResult[1][0][0])
    sHashSnippet = str(aResult[1][0][1]) + str(aResult[1][0][2])

    oRequestHandler = cRequestHandler(sScriptFile)
    oRequestHandler.addHeaderEntry("Referer", "http://g-stream.in/")
    sHtmlContent = oRequestHandler.request()

    sPattern = 'escape\(hsh \+ "([^"]+)"\)'
    oParser = cParser()
    aResult = oParser.parse(sHtmlContent, sPattern)

    sHash = aResult[1][0]
    sHash = sHashSnippet + sHash
    sSecurityCookieValue = "sitechrx=" + str(sHash)
    logger.info("Token: " + sSecurityCookieValue)
    return sSecurityCookieValue
Пример #15
0
    def getTopology(self):
        self.linksMap = defaultdict(list)
        for (linkMapping, link) in self.__links:
            targetType = linkMapping.getTargetType()
            sourceType1 = linkMapping.getSourceEnd1Type()
            sourceType2 = linkMapping.getSourceEnd2Type()
            sourceId1 = link.getEnd1Id()
            sourceId2 = link.getEnd2Id()
            targetType1 = linkMapping.getTargetEnd1Type()
            targetType2 = linkMapping.getTargetEnd2Type()
            isContainer = linkMapping.isContainer()

            targetEnd1Id = self.__createComplexId(sourceId1, sourceType1, targetType1)
            targetEnd2Id = self.__createComplexId(sourceId2, sourceType2, targetType2)

            msg = "%s -- %s --> %s" % (targetEnd1Id, targetType, targetEnd2Id)
            logger.warn(msg)
            if self.__hasOsh(targetEnd1Id) and self.__hasOsh(targetEnd2Id):
                logger.info(msg)

                (osh1, osh2) = (self.__getOsh(targetEnd1Id), self.__getOsh(targetEnd2Id))
                if linkMapping.isReverse():
                    (osh1, osh2) = (osh2, osh1)

                link_osh = modeling.createLinkOSH(targetType, osh1, osh2)
                self.__vector.add(link_osh)
                self.linksMap[osh1].append(link_osh)
                self.linksMap[osh2].append(link_osh)
                if targetType == 'composition' or isContainer:
                    osh2.setContainer(osh1)

        self.addValidCis()

        return self.__vector
def ListadoTotal(params,url,category):
	logger.info("[peliculas24h.py] ListadoTotal")

	# Descarga la p�gina
	data = scrapertools.cachePage(url)
	#logger.info(data)

	# Patron de las entradas
	patron = "<a dir='ltr' href='([^']+)'>(.*?)</a>"
	matches = re.compile(patron,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	# A�ade las entradas encontradas
	for match in matches:
		# Atributos
		scrapedtitle = match[1]
		scrapedurl = match[0]
		scrapedthumbnail = ""
		scrapedplot = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# A�ade al listado de XBMC
		xbmctools.addnewfolder( CHANNELNAME , "detail" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

	# Asigna el t�tulo, desactiva la ordenaci�n, y cierra el directorio
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def __getHtmlContent(sUrl=None, sSecurityValue=None):
    oInputParameterHandler = cInputParameterHandler()

    # Test if a url is available and set it
    if sUrl is None and not oInputParameterHandler.exist("siteUrl"):
        logger.info("There is no url we can request.")
        return False
    else:
        if sUrl is None:
            sUrl = oInputParameterHandler.getValue("siteUrl")

    # Test if a security value is available
    if sSecurityValue is None:
        if oInputParameterHandler.exist("securityCookie"):
            sSecurityValue = oInputParameterHandler.getValue("securityCookie")
        else:
            sSecurityValue = ""

    # Make the request
    oRequest = cRequestHandler(sUrl)
    oRequest.addHeaderEntry("Cookie", sSecurityValue)
    oRequest.addHeaderEntry("Accept", "*/*")
    oRequest.addHeaderEntry("Host", "g-stream.in")

    return oRequest.request()
def performsearch(texto):
	logger.info("[tutvsite.py] performsearch")
	url = "http://www.tu.tv/buscar/?str="+texto

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae las entradas (carpetas)
	patronvideos  = '<div class="fila clearfix">[^<]+<div.*?</div>[^<]+<a href="([^"]+)"[^<]+<img src="([^"]+)".*?<span id="txtN">(.*?)</span>.*?<span class="tmp">([^<]+)</span.*?<span id="txtN">(.*?)</span>'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	
	resultados = []

	for match in matches:
		# Titulo
		try:
			scrapedtitle = unicode( match[2], "utf-8" ).encode("iso-8859-1")
		except:
			scrapedtitle = match[2]
		scrapedtitle = scrapedtitle.replace("<b>","")
		scrapedtitle = scrapedtitle.replace("</b>","")
		scrapedtitle = scrapedtitle.strip()
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = urlparse.urljoin(url,match[1])
		scrapedplot = match[4].strip()

		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		resultados.append( [CHANNELNAME , "playfolder" , "buscador" , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ] )
		
	return resultados
Пример #19
0
def file_exists(filename,url):

    logger.info("[samba.py] file_exists "+ filename )
    
    # Separa la URL en los elementos    
    server_name,share_name,path,user,password = parse_url(url)

    # Conecta con el servidor remoto
    remote = connect(server_name,user,password)

    ficheros = []
    
    for f in remote.list_path(share_name, path + '*'):
        name = f.get_longname()
        #logger.info("name="+name)
        if name == '.' or name == '..':
            continue

        if f.is_directory():
            continue

        ficheros.append(name)

    try:
        logger.info(ficheros.index(filename))
        return True
    except:
        return False
def detail(params, url, category):
    logger.info("[documentalesatonline.py] detail")

    title = unicode(xbmc.getInfoLabel("ListItem.Title"), "utf-8")
    thumbnail = urllib.unquote_plus(params.get("thumbnail"))
    plot = unicode(xbmc.getInfoLabel("ListItem.Plot"), "utf-8")

    # Descarga la página
    data = scrapertools.cachePage(url)
    # logger.info(data)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        xbmctools.addvideo(CHANNELNAME, "Megavideo - " + video[0], video[1], category, video[2])
        # ------------------------------------------------------------------------------------

        # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
def categorias(params, url, category):
    logger.info("[documentalesatonline.py] novedades")

    # Descarga la página
    data = scrapertools.cachePage(url)
    # logger.info(data)

    # Extrae las entradas (carpetas)
    patronvideos = "<a dir='ltr' href='([^']+)'>([^<]+)</a>[^<]+<span dir='ltr'>([^<]+)</span>"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for match in matches:
        scrapedtitle = match[1] + " " + match[2]
        scrapedurl = urlparse.urljoin(url, match[0])
        scrapedthumbnail = ""
        scrapedplot = ""
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")

        # Añade al listado de XBMC
        xbmctools.addnewfolder(
            CHANNELNAME, "novedades", category, scrapedtitle, scrapedurl, scrapedthumbnail, scrapedplot
        )

        # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)
    xbmcplugin.addSortMethod(handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE)
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
Пример #22
0
def busqueda(params,url,category):
	logger.info("busqueda")
	tecleado = ""
	keyboard = xbmc.Keyboard('')
	keyboard.doModal()
	if (keyboard.isConfirmed()):
		tecleado = keyboard.getText()
		if len(tecleado)<=0:
			return
	
	tecleado = tecleado.replace(" ", "+")
	data=scrapertools.cachePagePost("http://www.divxonline.info/buscador.html",'texto=' + tecleado + '&categoria=0&tipobusqueda=1&Buscador=Buscar')

	#logger.info(data)
	data=data[data.find('Se han encontrado un total de'):]
	
	#<li><a href="/pelicula/306/100-chicas-2000/">100 chicas (2000)</a></li>
	patronvideos  = '<li><a href="(.+?)">(.+?)</a></li>'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	if DEBUG: 
		scrapertools.printMatches(matches)
	
	for match in matches:
		xbmctools.addnewfolder( CHANNELNAME , "listmirrors" , category , match[1] , 'http://www.divxonline.info' + match[0] , 'scrapedthumbnail', 'scrapedplot' )
	
	xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
	xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
Пример #23
0
def detail(params,url,category):
	logger.info("[divxonline.py] detail")
	title=''
	thumbnail=''
	plot=''

	try:
		title = urllib.unquote_plus( params.get("title") )
		thumbnail = urllib.unquote_plus( params.get("thumbnail") )
		plot = urllib.unquote_plus( params.get("plot") )
	except:
		pass
	# Descarga la página
	data = scrapertools.cachePage(url)
	#logger.info(data)

	# ------------------------------------------------------------------------------------
	# Busca los enlaces a los videos
	# ------------------------------------------------------------------------------------
	
	data=decryptinks(data);
	listavideos = servertools.findvideos(data)

	for video in listavideos:
		videotitle = video[0]
		url = video[1]
		server = video[2]
		xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
	# ------------------------------------------------------------------------------------

	# Cierra el directorio
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Пример #24
0
def megavideo(params,url,category):
	logger.info("[divxonline.py] megavideo")

	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Acción" , "http://www.divxonline.info/peliculas/50/accion-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Animación" , "http://www.divxonline.info/peliculas/53/animacion-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Anime" , "http://www.divxonline.info/peliculas/51/anime-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Aventura" , "http://www.divxonline.info/peliculas/52/aventura-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Bélicas" , "http://www.divxonline.info/peliculas/95/belicas-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Ciencia Ficción" , "http://www.divxonline.info/peliculas/55/ciencia-ficcion-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Cine Clásico" , "http://www.divxonline.info/peliculas/58/cine-clasico-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Cine español" , "http://www.divxonline.info/peliculas/57/cine-espa%C3%B1ol-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Clásicos Disney" , "http://www.divxonline.info/peliculas/59/clasicos-disney-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Comedias" , "http://www.divxonline.info/peliculas/60/comedias-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Documentales" , "http://www.divxonline.info/peliculas/54/documentales-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Drama" , "http://www.divxonline.info/peliculas/62/drama-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Infantil" , "http://www.divxonline.info/peliculas/63/infantil-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Musicales" , "http://www.divxonline.info/peliculas/64/musicales-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Suspense" , "http://www.divxonline.info/peliculas/65/suspense-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Terror" , "http://www.divxonline.info/peliculas/66/terror-megavideo/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Western" , "http://www.divxonline.info/peliculas/67/western-megavideo/" , "", "" )

	# Label (top-right)...
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )

	# Disable sorting...
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )

	# End of directory...
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Пример #25
0
def veoh(params,url,category):
	logger.info("[divxonline.py] veoh")

	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Acción" , "http://www.divxonline.info/peliculas/30/accion-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Animación" , "http://www.divxonline.info/peliculas/33/animacion-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Anime" , "http://www.divxonline.info/peliculas/41/anime-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Aventura" , "http://www.divxonline.info/peliculas/32/aventura-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Bélicas" , "http://www.divxonline.info/peliculas/96/belicas-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Ciencia Ficción" , "http://www.divxonline.info/peliculas/35/ciencia0-ficcion-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Cine Clásico" , "http://www.divxonline.info/peliculas/38/cine-clasico-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Cine Español" , "http://www.divxonline.info/peliculas/37/cine-español-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Clásicos Disney" , "http://www.divxonline.info/peliculas/39/clasicos-disney-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Comedias" , "http://www.divxonline.info/peliculas/40/comedias-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Cortometrajes" , "http://www.divxonline.info/peliculas/41/cortometrajes-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Documentales" , "http://www.divxonline.info/peliculas/34/documentales-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Drama" , "http://www.divxonline.info/peliculas/42/dramas-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Infantiles" , "http://www.divxonline.info/peliculas/43/infantiles-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Musicales" , "http://www.divxonline.info/peliculas/44/musicales-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Suspense" , "http://www.divxonline.info/peliculas/45/suspense-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Terror" , "http://www.divxonline.info/peliculas/46/terror-veoh/" , "", "" )
	xbmctools.addnewfolder( CHANNELNAME , "movielist" , CHANNELNAME , "Western" , "http://www.divxonline.info/peliculas/49/western-veoh/" , "", "" )

	# Label (top-right)...
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Пример #26
0
def showCharacters():
  logger.info('load showCharacters') 
  oGui = cGui()

  oParams = ParameterHandler()
  sSecurityValue = oParams.getValue('securityCookie')
  if (oParams.exist('sUrl') and oParams.exist('page') and oParams.exist('mediaType')):
    siteUrl = oParams.getValue('sUrl')
    #iPage = oParams.getValue('page')
    #sMediaType = oParams.getValue('mediaType')
    # request
    sHtmlContent =__getHtmlContent(siteUrl, sSecurityValue)
    # parse content
    sPattern = 'class="LetterMode.*?>([^>]+)</a>'
    oParser = cParser()
    aResult = oParser.parse(sHtmlContent, sPattern)

    if (aResult[0] == True):
      for aEntry in aResult[1]:
        oGuiElement = cGuiElement(aEntry, SITE_IDENTIFIER, 'ajaxCall')
        #oOutputParameterHandler = ParameterHandler()
        oParams.setParam('character', aEntry[0])
        #oOutputParameterHandler.addParameter('page', iPage)
        #oOutputParameterHandler.addParameter('mediaType', sMediaType)
        #oOutputParameterHandler.addParameter('securityCookie', sSecurityValue)
        if oParams.exist('mediaTypePageId'):
            sMediaTypePageId = oParams.getValue('mediaTypePageId')
            oParams.setParam('mediaTypePageId', sMediaTypePageId)
        oGui.addFolder(oGuiElement, oParams)

  oGui.setEndOfDirectory()
Пример #27
0
def load():
    logger.info("Load %s" % SITE_NAME)

    sSecurityValue = __getSecurityCookieValue()
    if sSecurityValue == '':
        pass
    elif sSecurityValue == False:
        return
    oParams = ParameterHandler()
    oParams.setParam('securityCookie', sSecurityValue)
    ## Create all main menu entries
    oGui = cGui()
    
    oParams.setParam('sUrl', URL_NEWS)
    oParams.setParam('page', 1)
    oParams.setParam('mediaType', 'news')
    oGui.addFolder(cGuiElement('Neues von Heute',SITE_IDENTIFIER,'showNews'),oParams)
    oParams.setParam('sUrl', URL_MOVIE_PAGE)
    oParams.setParam('mediaType', 'movie')
    oGui.addFolder(cGuiElement('Filme',SITE_IDENTIFIER,'showMovieMenu'),oParams)
    oParams.setParam('sUrl', URL_SERIE_PAGE)
    oParams.setParam('mediaType', 'series')
    oGui.addFolder(cGuiElement('Serien',SITE_IDENTIFIER,'showSeriesMenu'),oParams)
    oParams.setParam('sUrl', URL_DOCU_PAGE)
    oParams.setParam('mediaType', 'documentation')
    oGui.addFolder(cGuiElement('Dokumentationen',SITE_IDENTIFIER,'showDocuMenu'),oParams)
    oParams.setParam('sUrl', URL_SEARCH)
    oParams.setParam('mediaType', '')
    oGui.addFolder(cGuiElement('Suche',SITE_IDENTIFIER,'showSearch'),oParams)
    oGui.setEndOfDirectory()
Пример #28
0
def searchresults(params,tecleado,category):
	logger.info("[divxonline.py] search")

	buscador.salvar_busquedas(params,tecleado,category)
	tecleado = tecleado.replace(" ", "+")
	#searchUrl = "http://documentalesatonline.loquenosecuenta.com/search/"+tecleado+"?feed=rss2&paged=1"
	busqueda(CHANNELNAME,tecleado,category)
Пример #29
0
def _search(oGui, sSearchText):
    # Create the request with the search value
    sFullSearchUrl = URL_SEARCH + ("?q=%s" % sSearchText)
    logger.info("Search URL: %s" % sFullSearchUrl)
    sHtmlContent = __getHtmlContent(sFullSearchUrl)
    # Display all items returned...
    __displayItems(oGui, sHtmlContent)
Пример #30
0
    def download(self, siteResult = False):
        #oGui = cGui()
        oInputParameterHandler = cInputParameterHandler()

        sMediaUrl = oInputParameterHandler.getValue('sMediaUrl')
        sFileName = oInputParameterHandler.getValue('sFileName')
        sFileName = oInputParameterHandler.getValue('sMovieTitle')
        if siteResult:
            sMediaUrl = siteResult['streamUrl']
            if siteResult['resolved']:
                sLink = sMediaUrl
            else:
                sLink = urlresolver.resolve(sMediaUrl)
        else:
            sLink = urlresolver.resolve(sMediaUrl)
        logger.info('call download: ' + sMediaUrl)
        logger.info('file link: ' + str(sLink))
        if self.dialog:
            self.dialog.close()
        if (sLink != False):
            oDownload = cDownload()
            oDownload.download(sLink, 'Stream')
        else:
            cGui().showError('Download', 'File deleted or Link could not be resolved', 5);
            return False
        return True
Пример #31
0
 def delete_sync(self, client_id, sync_id):
     logger.info("Tautulli PlexTV :: Deleting sync item '%s'." % sync_id)
     response = self.delete_plextv_sync(client_id=client_id,
                                        sync_id=sync_id)
     return response.ok
Пример #32
0
def update():
    if plexpy.PYTHON2:
        logger.warn('Tautulli is running using Python 2. Unable to update.')
        return

    if not plexpy.UPDATE_AVAILABLE:
        return

    if plexpy.INSTALL_TYPE in ('docker', 'snap', 'macos'):
        return

    elif plexpy.INSTALL_TYPE == 'windows':
        logger.info('Calling Windows scheduled task to update Tautulli')
        CREATE_NO_WINDOW = 0x08000000
        subprocess.Popen(['SCHTASKS', '/Run', '/TN', 'TautulliUpdateTask'],
                         creationflags=CREATE_NO_WINDOW)

    elif plexpy.INSTALL_TYPE == 'git':
        output, err = runGit('pull --ff-only {} {}'.format(
            plexpy.CONFIG.GIT_REMOTE, plexpy.CONFIG.GIT_BRANCH))

        if not output:
            logger.error('Unable to download latest version')
            return

        for line in output.split('\n'):
            if 'Already up-to-date.' in line or 'Already up to date.' in line:
                logger.info('No update available, not updating')
            elif line.endswith(('Aborting', 'Aborting.')):
                logger.error('Unable to update from git: ' + line)

    elif plexpy.INSTALL_TYPE == 'source':
        tar_download_url = 'https://github.com/{}/{}/tarball/{}'.format(
            plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO,
            plexpy.CONFIG.GIT_BRANCH)
        update_dir = os.path.join(plexpy.DATA_DIR, 'update')
        version_path = os.path.join(plexpy.PROG_DIR, 'version.txt')

        logger.info('Downloading update from: ' + tar_download_url)
        data = request.request_content(tar_download_url)

        if not data:
            logger.error(
                "Unable to retrieve new version from '%s', can't update",
                tar_download_url)
            return

        download_name = plexpy.CONFIG.GIT_BRANCH + '-github'
        tar_download_path = os.path.join(plexpy.DATA_DIR, download_name)

        # Save tar to disk
        with open(tar_download_path, 'wb') as f:
            f.write(data)

        # Extract the tar to update folder
        logger.info('Extracting file: ' + tar_download_path)
        tar = tarfile.open(tar_download_path)
        tar.extractall(update_dir)
        tar.close()

        # Delete the tar.gz
        logger.info('Deleting file: ' + tar_download_path)
        os.remove(tar_download_path)

        # Find update dir name
        update_dir_contents = [
            x for x in os.listdir(update_dir)
            if os.path.isdir(os.path.join(update_dir, x))
        ]
        if len(update_dir_contents) != 1:
            logger.error("Invalid update data, update failed: " +
                         str(update_dir_contents))
            return
        content_dir = os.path.join(update_dir, update_dir_contents[0])

        # walk temp folder and move files to main folder
        for dirname, dirnames, filenames in os.walk(content_dir):
            dirname = dirname[len(content_dir) + 1:]
            for curfile in filenames:
                old_path = os.path.join(content_dir, dirname, curfile)
                new_path = os.path.join(plexpy.PROG_DIR, dirname, curfile)

                if os.path.isfile(new_path):
                    os.remove(new_path)
                os.renames(old_path, new_path)

        # Update version.txt
        try:
            with open(version_path, 'w') as f:
                f.write(str(plexpy.LATEST_VERSION))
        except IOError as e:
            logger.error(
                "Unable to write current version to version.txt, update not complete: %s",
                e)
            return
Пример #33
0
def check_github(scheduler=False, notify=False, use_cache=False):
    plexpy.COMMITS_BEHIND = 0

    if plexpy.CONFIG.GIT_TOKEN:
        headers = {'Authorization': 'token {}'.format(plexpy.CONFIG.GIT_TOKEN)}
    else:
        headers = {}

    version = github_cache('version', use_cache=use_cache)
    if not version:
        # Get the latest version available from github
        logger.info('Retrieving latest version information from GitHub')
        url = 'https://api.github.com/repos/%s/%s/commits/%s' % (
            plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO,
            plexpy.CONFIG.GIT_BRANCH)
        version = request.request_json(url,
                                       headers=headers,
                                       timeout=20,
                                       validator=lambda x: type(x) == dict)
        github_cache('version', github_data=version)

    if version is None:
        logger.warn(
            'Could not get the latest version from GitHub. Are you running a local development version?'
        )
        return plexpy.CURRENT_VERSION

    plexpy.LATEST_VERSION = version['sha']
    logger.debug("Latest version is %s", plexpy.LATEST_VERSION)

    # See how many commits behind we are
    if not plexpy.CURRENT_VERSION:
        logger.info(
            'You are running an unknown version of Tautulli. Run the updater to identify your version'
        )
        return plexpy.LATEST_VERSION

    if plexpy.LATEST_VERSION == plexpy.CURRENT_VERSION:
        logger.info('Tautulli is up to date')
        return plexpy.LATEST_VERSION

    commits = github_cache('commits', use_cache=use_cache)
    if not commits:
        logger.info(
            'Comparing currently installed version with latest GitHub version')
        url = 'https://api.github.com/repos/%s/%s/compare/%s...%s' % (
            plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO,
            plexpy.LATEST_VERSION, plexpy.CURRENT_VERSION)
        commits = request.request_json(url,
                                       headers=headers,
                                       timeout=20,
                                       whitelist_status_code=404,
                                       validator=lambda x: type(x) == dict)
        github_cache('commits', github_data=commits)

    if commits is None:
        logger.warn('Could not get commits behind from GitHub.')
        return plexpy.LATEST_VERSION

    try:
        plexpy.COMMITS_BEHIND = int(commits['behind_by'])
        logger.debug("In total, %d commits behind", plexpy.COMMITS_BEHIND)
    except KeyError:
        logger.info(
            'Cannot compare versions. Are you running a local development version?'
        )
        plexpy.COMMITS_BEHIND = 0

    if plexpy.COMMITS_BEHIND > 0:
        logger.info('New version is available. You are %s commits behind' %
                    plexpy.COMMITS_BEHIND)

        releases = github_cache('releases', use_cache=use_cache)
        if not releases:
            url = 'https://api.github.com/repos/%s/%s/releases' % (
                plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO)
            releases = request.request_json(
                url,
                timeout=20,
                whitelist_status_code=404,
                validator=lambda x: type(x) == list)
            github_cache('releases', github_data=releases)

        if releases is None:
            logger.warn('Could not get releases from GitHub.')
            return plexpy.LATEST_VERSION

        if plexpy.CONFIG.GIT_BRANCH == 'master':
            release = next((r for r in releases if not r['prerelease']),
                           releases[0])
        elif plexpy.CONFIG.GIT_BRANCH == 'beta':
            release = next(
                (r
                 for r in releases if not r['tag_name'].endswith('-nightly')),
                releases[0])
        elif plexpy.CONFIG.GIT_BRANCH == 'nightly':
            release = next((r for r in releases), releases[0])
        else:
            release = releases[0]

        plexpy.LATEST_RELEASE = release['tag_name']

        if notify:
            plexpy.NOTIFY_QUEUE.put({
                'notify_action':
                'on_plexpyupdate',
                'plexpy_download_info':
                release,
                'plexpy_update_commit':
                plexpy.LATEST_VERSION,
                'plexpy_update_behind':
                plexpy.COMMITS_BEHIND
            })

        if plexpy.PYTHON2:
            logger.warn(
                'Tautulli is running using Python 2. Unable to run automatic update.'
            )

        elif scheduler and plexpy.CONFIG.PLEXPY_AUTO_UPDATE and \
                not plexpy.DOCKER and not plexpy.SNAP and \
                not (plexpy.FROZEN and common.PLATFORM == 'Darwin'):
            logger.info('Running automatic update.')
            plexpy.shutdown(restart=True, update=True)

    elif plexpy.COMMITS_BEHIND == 0:
        logger.info('Tautulli is up to date')

    return plexpy.LATEST_VERSION
Пример #34
0
 def __gen__(self):
     info("start")
     for n in range(4):
         info("next")
         msg = BaseMessage()
         yield msg
Пример #35
0
 def __iter__(self):
     info("iter")
     return self.__gen__()
Пример #36
0
 def __init__(self):
     info("init")
     Source.__init__(self)
     self.output_type = type(BaseMessage)
     delattr(self, __next__)
Пример #37
0
def verify_directories_created():
    import logger
    import os
    logger.info("pelisalacarta.core.config.verify_directories_created")

    # Force download path if empty
    download_path = get_setting("downloadpath")
    if download_path == "":
        download_path = os.path.join(get_data_path(), "downloads")
        set_setting("downloadpath", download_path)

    # Force download list path if empty
    download_list_path = get_setting("downloadlistpath")
    if download_list_path == "":
        download_list_path = os.path.join(get_data_path(), "downloads", "list")
        set_setting("downloadlistpath", download_list_path)

    # Force bookmark path if empty
    bookmark_path = get_setting("bookmarkpath")
    if bookmark_path == "":
        bookmark_path = os.path.join(get_data_path(), "bookmarks")
        set_setting("bookmarkpath", bookmark_path)

    # Create data_path if not exists
    if not os.path.exists(get_data_path()):
        logger.debug("Creating data_path " + get_data_path())
        try:
            os.mkdir(get_data_path())
        except:
            pass

    # Create download_path if not exists
    if not download_path.lower().startswith("smb") and not os.path.exists(
            download_path):
        logger.debug("Creating download_path " + download_path)
        try:
            os.mkdir(download_path)
        except:
            pass

    # Create download_list_path if not exists
    if not download_list_path.lower().startswith("smb") and not os.path.exists(
            download_list_path):
        logger.debug("Creating download_list_path " + download_list_path)
        try:
            os.mkdir(download_list_path)
        except:
            pass

    # Create bookmark_path if not exists
    if not bookmark_path.lower().startswith("smb") and not os.path.exists(
            bookmark_path):
        logger.debug("Creating bookmark_path " + bookmark_path)
        try:
            os.mkdir(bookmark_path)
        except:
            pass

    # Create library_path if not exists
    if not get_library_path().lower().startswith("smb") and not os.path.exists(
            get_library_path()):
        logger.debug("Creating library_path " + get_library_path())
        try:
            os.mkdir(get_library_path())
        except:
            pass

    # Checks that a directory "xbmc" is not present on platformcode
    old_xbmc_directory = os.path.join(get_runtime_path(), "platformcode",
                                      "xbmc")
    if os.path.exists(old_xbmc_directory):
        logger.debug("Removing old platformcode.xbmc directory")
        try:
            import shutil
            shutil.rmtree(old_xbmc_directory)
        except:
            pass
def do_file(pathname):
    y, sr = librosa.load(pathname, sr=None)
    logger.info('load ' + pathname)

    onset_env = librosa.onset.onset_strength(y=y, sr=sr)
    num_frames = onset_env.shape[0]
    logger.info('frame count ', num_frames)
    music_length = librosa.frames_to_time(num_frames, sr=sr)

    beat_frames, max_index = init_beat(y, sr, onset_env)
    beat_times = librosa.frames_to_time(beat_frames, sr=sr)

    numBeats = len(beat_times)
    itvals = beat_intervals(beat_times)
    #logger.info('mean ' + str(mean))

    #最小二乘计算固定间隔拍子·
    a, b = MSL(beat_times)

    #计算头,尾两处相对准确的拍子,进一步计算更准确的拍子间隔
    i1, i2 = diff(itvals, a)
    a, b = calc_beat_interval(beat_times, i1, i2)
    # 将b补偿到最近的正数位置
    compensate = int(min(0, b // a))
    b -= compensate * a
    numBeats += compensate
    logger.info('a b ', a, b)

    new_beat_times = np.arange(numBeats) * a + b

    bpm = 60.0 / a
    logger.info('bpm ', bpm)
    print('bpm', bpm)

    # 挑出重拍,只支持4/4拍
    bar_times = new_beat_times[max_index:new_beat_times.size:4]
    bar_frames = librosa.time_to_frames(bar_times, sr=sr, hop_length=512)

    print('et', bar_times[0])

    bound_frames, bound_segs = seg.calc_segment(y, sr)
    seg_power, power_data = seg.calc_power(y, sr, bound_frames, bound_segs)

    bar_value = np.zeros(bar_frames.shape[0])
    for i in range(bar_frames.shape[0]):
        index = np.flatnonzero(bound_frames < bar_frames[i])
        if len(index) == 0 or index.shape[0] == bound_frames.shape[0]:
            # 当前位置在第一个分段之前或最后一个分段之后
            bar_value[i] = 0
        else:
            bar_value[i] = seg_power[index[-1]]

    #计算转移概率,激烈变舒缓的地方概率高
    bar_value = bar_value[:-1] - bar_value[1:]
    bar_value = np.append(bar_value, 0)
    bar_value = np.fmax(bar_value, np.zeros(bar_value.shape))
    bar_value **= 0.5
    bar_value = bar_value * 0.5 + 0.5

    plt.plot(bar_frames, bar_value)

    gen_pro = seg.gen_seg_probability(num_frames, bar_frames)
    bar_value = bar_value * gen_pro
    plt.plot(bar_frames, bar_value)

    big_seg = np.zeros(2)
    bar_index = np.argmax(bar_value)
    bar_value[bar_index] = 0
    big_seg[0] = bar_frames[bar_index]
    bar_index = np.argmax(bar_value)
    big_seg[1] = bar_frames[bar_index]
    big_seg = np.sort(big_seg)
    big_seg_time = librosa.frames_to_time(big_seg, sr=sr, hop_length=512)

    save_file(bar_times, pathname, '_bar')

    save_file(big_seg_time, pathname, '_seg')

    save_time_value_tofile(power_data, pathname, '_pow')

    plt.show()
    outname = outname + postfix + '.csv'
    with open(outname, 'w', newline='') as csvfile:
        spamwriter = csv.writer(csvfile,
                                delimiter=',',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)
        list(map(spamwriter.writerow, data))


##filenames = filenames[-2:-1]
##for f in filenames:
##    pathname = path + f····
##    do_file(pathname)


def dummy(f):
    do_file(f)


if __name__ == '__main__':
    sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')

    path = 'd:/librosa/炫舞自动关卡生成/测试歌曲/'
    files = [
        path + f for f in listdir(path)
        if os.path.splitext(f)[1] == '.mp3' or os.path.splitext(f)[1] == '.m4a'
    ]
    logger.info(files)

    list(map(dummy, files))
# dummy(file)
Пример #40
0
def get_server_resources(return_presence=False,
                         return_server=False,
                         return_info=False,
                         **kwargs):
    if not return_presence and not return_info:
        logger.info("Tautulli PlexTV :: Requesting resources for server...")

    server = {
        'pms_name': plexpy.CONFIG.PMS_NAME,
        'pms_version': plexpy.CONFIG.PMS_VERSION,
        'pms_platform': plexpy.CONFIG.PMS_PLATFORM,
        'pms_ip': plexpy.CONFIG.PMS_IP,
        'pms_port': plexpy.CONFIG.PMS_PORT,
        'pms_ssl': plexpy.CONFIG.PMS_SSL,
        'pms_is_remote': plexpy.CONFIG.PMS_IS_REMOTE,
        'pms_is_cloud': plexpy.CONFIG.PMS_IS_CLOUD,
        'pms_url': plexpy.CONFIG.PMS_URL,
        'pms_url_manual': plexpy.CONFIG.PMS_URL_MANUAL,
        'pms_identifier': plexpy.CONFIG.PMS_IDENTIFIER,
        'pms_plexpass': plexpy.CONFIG.PMS_PLEXPASS
    }

    if return_info:
        return server

    if kwargs:
        server.update(kwargs)
        for k in [
                'pms_ssl', 'pms_is_remote', 'pms_is_cloud', 'pms_url_manual'
        ]:
            server[k] = int(server[k])

    if server['pms_url_manual'] and server['pms_ssl'] or server['pms_is_cloud']:
        scheme = 'https'
    else:
        scheme = 'http'

    fallback_url = '{scheme}://{hostname}:{port}'.format(
        scheme=scheme, hostname=server['pms_ip'], port=server['pms_port'])

    plex_tv = PlexTV()
    result = plex_tv.get_server_connections(
        pms_identifier=server['pms_identifier'],
        pms_ip=server['pms_ip'],
        pms_port=server['pms_port'],
        include_https=server['pms_ssl'])

    if result:
        connections = result.pop('connections', [])
        server.update(result)
        presence = server.pop('pms_presence', 0)
    else:
        connections = []
        presence = 0

    if return_presence:
        return presence

    plexpass = plex_tv.get_plexpass_status()
    server['pms_plexpass'] = int(plexpass)

    # Only need to retrieve PMS_URL if using SSL
    if not server['pms_url_manual'] and server['pms_ssl']:
        if connections:
            if server['pms_is_remote']:
                # Get all remote connections
                conns = [
                    c for c in connections if c['local'] == '0' and
                    ('plex.direct' in c['uri'] or 'plex.service' in c['uri'])
                ]
            else:
                # Get all local connections
                conns = [
                    c for c in connections if c['local'] == '1' and
                    ('plex.direct' in c['uri'] or 'plex.service' in c['uri'])
                ]

            if conns:
                # Get connection with matching address, otherwise return first connection
                conn = next(
                    (c for c in conns if c['address'] == server['pms_ip']
                     and c['port'] == str(server['pms_port'])), conns[0])
                server['pms_url'] = conn['uri']
                logger.info("Tautulli PlexTV :: Server URL retrieved.")

        # get_server_urls() failed or PMS_URL not found, fallback url doesn't use SSL
        if not server['pms_url']:
            server['pms_url'] = fallback_url
            logger.warn(
                "Tautulli PlexTV :: Unable to retrieve server URLs. Using user-defined value without SSL."
            )

        # Not using SSL, remote has no effect
    else:
        server['pms_url'] = fallback_url
        logger.info("Tautulli PlexTV :: Using user-defined URL.")

    if return_server:
        return server

    logger.info(
        "Tautulli PlexTV :: Selected server: %s (%s) (%s - Version %s)",
        server['pms_name'], server['pms_url'], server['pms_platform'],
        server['pms_version'])

    plexpy.CONFIG.process_kwargs(server)
    plexpy.CONFIG.write()
Пример #41
0
def testModel(modelname, estimator, X_test, config):  
    info("Testing a {0} estimator".format(modelname), ind=0)
    info("X data is {0}".format(getDim(X_test)), ind=2)
    
    problemType = config['problem']
    results = {"good": True, "label": None, "prob": None, "pred": None}
    
    if isinstance(estimator, dict):
        estimator = estimator['estimator']
        
    
    if estimator is None:
        error("The {0} estimator is NULL".format(modelname))
        results['good'] = False
        return results
    
    
    if isClassification(problemType):
        info("Predicting classification labels/classes for {0}".format(modelname), ind=4)
        try:
            results['label'] = estimator.predict(X_test)
        except:
            results['good'] = False
            error("There is a problem getting labels for {0}".format(modelname), ind=4)
        
        info("Predicting classification probabilities for {0}".format(modelname), ind=4)
        try:
            proba = estimator.predict_proba(X_test)
            results['prob'] = proba[:,1]
        except:
            results['good'] = False
            error("There is a problem getting probabilities for {0}".format(modelname), ind=4)
            

    if isRegression(problemType):
        info("Predicting regression score/output for {0}".format(modelname), ind=4)
        try:
            results['pred'] = estimator.predict(X_test)
        except:
            results['good'] = False
            error("There is a problem getting prediction for {0}".format(modelname), ind=4)


    if results['good'] == True:
        info("Everything looks good for the {0} estimator".format(modelname), ind=4)
    else:        
        info("There is a problem with the {0} estimator".format(modelname), ind=4)


    return results
def save_file(beats, mp3filename, postfix=''):
    outname = os.path.splitext(mp3filename)[0]
    outname = outname + postfix + '.csv'
    librosa.output.times_csv(outname, beats)
    logger.info('output beat time file ' + outname)
Пример #43
0
    def plc_task(self):
        logger.info("Starting...", self.name, self.env.now)
        # Initialize the proximity variables
        last_s1_prox = False
        last_s2_prox = False
        last_s3_prox = False
        last_s4_prox = False

        # Process loop
        while True:
            # Iterate through each link to obtain the current station status.
            # TODO: This should probably be rewritten as a function (see: prox_update)
            # NOTE: Each link to a station has two sockets:
            #   "station_status" : Station State Messages
            #   "prox"           : Robot Prox Messages
            for link in self.links:
                # Only continue if the station status is busy (has a packet)
                if not link.socket_busy("station_status"):
                    # Test if we received a response; else break
                    if link.socket_response("station_status"):
                        # Store a local reference to the packet
                        pkt = link.get_packet("station_status")
                        #print link.name + " " + str(pkt.reply)
                        # Update the dictionary station entry with the updated status
                        self.station_status.update({link.name: pkt.reply})
                        #print self.station_status
                    else:
                        # The link isn't busy, so send a new request
                        link.send_packet("station_status", "get_mbtcp")

                # Since we are iterating through the links, take this opportunity
                # to update the proximity status on each station.
                if link.name == "s1":
                    last_s1_prox = self.prox_update(link, self.s1_prox,
                                                    last_s1_prox)
                elif link.name == "s2":
                    last_s2_prox = self.prox_update(link, self.s2_prox,
                                                    last_s2_prox)
                elif link.name == "s3":
                    last_s3_prox = self.prox_update(link, self.s3_prox,
                                                    last_s3_prox)
                elif link.name == "s4":
                    last_s4_prox = self.prox_update(link, self.s4_prox,
                                                    last_s4_prox)

            # DO NOT MODIFY! -- Robot 1 logic, as exists within the testbed PLC
            if (not self.station("s1") == "unloaded") and self.station(
                    "s2") == "finished" and self.station("s3") == "unloaded":
                self.r1_job = "101"
                #logger.info("Robot job 101",self.name,self.env.now)
            elif self.queue.part_available() and self.station(
                    "s1") == "unloaded":
                self.r1_job = "102"
                #logger.info("Robot job 102",self.name,self.env.now)
            elif self.station("s1") == "finished" and self.station(
                    "s2") == "unloaded":
                self.r1_job = "103"
                #logger.info("Robot job 103",self.name,self.env.now)
            else:
                self.r1_job = "None"

            # DO NOT MODIFY! -- Robot 2 logic, as exists within the testbed PLC
            if self.testbed_obj["r1"].handoff == True and self.station(
                    "s3") == "unloaded":
                self.r2_job = "201"
                #logger.info("Robot job 201",self.name,self.env.now)
            elif self.station("s3") == "finished" and self.station(
                    "s4") == "unloaded":
                self.r2_job = "202"
                #logger.info("Robot job 202",self.name,self.env.now)
            elif self.station("s4") == "finished":
                self.r2_job = "203"
                #logger.info("Robot job 203",self.name,self.env.now)
            else:
                self.r2_job = "None"

            # Yield the PLC process until the next scheduled iteration (100 Hz)
            # TODO: Make this a configurable value
            yield self.env.timeout(0.01)
Пример #44
0
def trainModel(modelname, X_train, y_train, config):    
    info("Training a {0} estimator".format(modelname), ind=0)
    info("X data is {0}".format(getDim(X_train)), ind=2)
    info("y data is {0}".format(getDim(y_train)), ind=2)
    
    problemType = config['problem']
    info("This is a {0} problem".format(problemType), ind=2)
    
    modelData = getModelData(config, modelname)
    tuneForParams = True
    refitModel = False
    goodModel = True
    if modelData is not None:
        if modelData.get('tune') is False:
            tuneForParams = False
        if modelData.get('fit') is True:
            tuneForParams = False
        if modelData.get('cv') is True:
            tuneForParams = False
        if modelData.get('refit') is True:
            refitModel = True
        if modelData.get('error') is True:
            goodModel = False
    else:
        info("No model parameters were given. Using default {0} estimator".format(modelname), ind=4)
        tuneForParams = False

    if goodModel is False:
        error("Model {0} is no good and will not run it.".format(modelname))
        return None
    

    #################################################################
    # Get Model
    #################################################################
    retval = getModel(config, modelname)


    #################################################################
    # Tune Parameters
    #################################################################
    estimator = retval['estimator']
    params    = retval['params']
    

    if tuneForParams:
        tuneResults = tuneModel(modelname, estimator, params, X_train, y_train, config)
        estimator   = tuneResults['estimator']
        params      = tuneResults['params']
        
        if refitModel:
            try:
                estimator.set_params(probability=True)
                info("Set probability to True for model refit", ind=4)
            except:
                info("Could not set probability to True for model refit")
            info("Re-fitting for {0} model parameters with probability".format(modelname), ind=4)
            estimator = estimator.fit(X_train, y_train)
            info("Finished re-fitting {0} model parameters with probability".format(modelname), ind=4)
    else:
        if estimator is not None:
            info("Fitting for {0} model parameters".format(modelname), ind=2)
            estimator = estimator.fit(X_train, y_train)
            info("Finished fitting {0} model parameters".format(modelname), ind=4)
        else:
            error("No model with name {0} was trained".format(modelname))


    return estimator
Пример #45
0
def printMatches(matches):
    i = 0
    for match in matches:
        logger.info("%d %s" % (i, match))
        i = i + 1
Пример #46
0
def tuneModel(modelname, estimator, params, X_train, y_train, config):  
    info("Tuning a {0} estimator".format(modelname), ind=0)
    
    if estimator is None or params is None:
        error("There is no estimator with parameters information.", ind=2)
        return {"estimator": None, "params": None, "cv": None}

    problemType    = config['problem']
    try:
        modelData = getModelData(config, modelname)
    except:
        error("There is no model parameter data for the {0} estimator".format(modelname))

    if isClassification(problemType):
        scorers = ["accuracy", "average_precision", "f1", "f1_micro",
                   "f1_macro", "f1_weighted", "f1_samples", "neg_log_loss",
                   "precision", "recall", "roc_auc"]
        scorer = "roc_auc"
    
    if isClustering(problemType):
        scorers = ["adjusted_mutual_info_score", "adjusted_rand_score",
                   "completeness_score", "fowlkes_mallows_score",
                   "homogeneity_score", "mutual_info_score",
                   "normalized_mutual_info_score", "v_measure_score"]
        scorer = "adjusted_mutual_info_score"
    
    if isRegression(problemType):
        scorers = ["explained_variance", "neg_mean_absolute_error",
                   "neg_mean_squared_error", "neg_mean_squared_log_error",
                   "neg_median_absolute_error", "r2"]
        scorer = "neg_mean_absolute_error"

    if scorer not in scorers:
        raise ValueError("Scorer {0} is not allowed".format(scorer))

    searchType = "random"    
    if searchType == "grid":
        param_grid = params['grid']
        tuneEstimator = GridSearchCV(estimator, param_grid=param_grid, cv=2,
                                     scoring=scorer, verbose=1)
    elif searchType == "random":        
        n_iter_search = modelData.get('iter')
        if n_iter_search is None:
            n_iter_search = 10
        param_dist = params['dist']
        tuneEstimator = RandomizedSearchCV(estimator, param_distributions=param_dist, 
                                           cv=2, n_iter=n_iter_search, 
                                           verbose=1, n_jobs=-1,
                                           return_train_score=True)
    else:
        raise ValueError("Search type {0} is not allowed".format(searchType))


    info("Running {0} parameter search".format(searchType), ind=2)        
    tuneEstimator.fit(X_train, y_train)
    bestEstimator = tuneEstimator.best_estimator_        
    bestScore     = tuneEstimator.best_score_
    bestParams    = tuneEstimator.best_params_
    cvResults     = tuneEstimator.cv_results_
    cvScores      = cvResults['mean_test_score']
    fitTimes      = cvResults['mean_fit_time']

    info("Tested {0} Parameter Sets".format(len(fitTimes)), ind=4)
    info("CV Fit Time Info (Mean,Std): ({0} , {1})".format(round(fitTimes.mean(),1), round(fitTimes.std(),1)), ind=4)
    info("Best Score                 : {0}".format(round(bestScore, 3)), ind=4)
    info("CV Test Scores (Mean,Std)  : ({0} , {1})".format(round(cvScores.mean(),1), round(cvScores.std(),1)), ind=4)
    info("Best Parameters", ind=4)
    for paramName, paramVal in bestParams.iteritems():
        info("Param: {0} = {1}".format(paramName, paramVal), ind=6)
    

    return {"estimator": bestEstimator, "params": bestParams, "cv": cvResults}
Пример #47
0
def performsearch(texto):
    logger.info("[tumejortv.py] performsearch")
    url = "http://www.tumejortv.com/buscar/?s=" + texto + "&x=0&y=0"

    # Descarga la página
    resultados = []
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # Extrae las películas
    patron = "<h3>Pel.iacute.culas online</h3><ul class='alphaList'>(.*?)</ul>"
    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)
    data2 = ""
    if len(matches) > 0:
        data2 = matches[0]

    patron = '<li><div class="movieTitle">[^<]+</div><div class="covershot"><a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data2)
    if DEBUG: scrapertools.printMatches(matches)

    for match in matches:
        scrapedtitle = match[1]
        scrapedurl = match[0]
        scrapedthumbnail = match[2]
        scrapedplot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        # Añade al listado de XBMC
        resultados.append([
            CHANNELNAME, "findvideos", "buscador", scrapedtitle, scrapedurl,
            scrapedthumbnail, scrapedplot
        ])

    # Extrae las películas
    patron = "<h3>Series online</h3><ul class='alphaList'>(.*?)</ul>"
    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)
    if len(matches) > 0:
        data2 = matches[0]

    patron = '<li><div class="movieTitle">[^<]+</div><div class="covershot"><a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data2)
    if DEBUG: scrapertools.printMatches(matches)

    for match in matches:
        scrapedtitle = match[1]
        scrapedurl = match[0]
        scrapedthumbnail = match[2]
        scrapedplot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        # Añade al listado de XBMC
        resultados.append([
            CHANNELNAME, "detailserie", "buscador", scrapedtitle, scrapedurl,
            scrapedthumbnail, scrapedplot
        ])

    return resultados
Пример #48
0
 def renderSnapshotsPlugin(self):
     '''
     Render snapshots using the plugins imported modules
     '''
     logger.info("Render snapshot plugin", self)
     renderSnapshotsToDirectory()
Пример #49
0
def newlist(item):
    logger.info("[tumejortv.py] movielist")

    url = item.url
    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # Extrae las películas
    patron = '<div class="item " style="clear:both;">[^<]+'
    patron += '<div class="covershot[^<]+'
    patron += '<a href="([^"]+)"[^<]+<img src="([^"]+)"[^<]+</a>[^<]+'
    patron += '</div>[^<]+'
    patron += '<div class="post-title">[^<]+'
    patron += '<h3><a[^<]+>(.*?)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    itemlist = []
    for match in matches:
        scrapedtitle = match[2]
        scrapedtitle = scrapedtitle.replace("<span class=\'smallTitle'>", "(")
        scrapedtitle = scrapedtitle.replace("</span>", ")")
        scrapedurl = match[0]
        scrapedthumbnail = match[1]
        scrapedplot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        itemlist.append(
            Item(channel=CHANNELNAME,
                 action="findvideos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot))

    # Extrae la página siguiente
    patron = '<a href="([^"]+)" >&raquo;</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG:
        scrapertools.printMatches(matches)

    if len(matches) > 0:
        scrapedtitle = "!Pagina siguiente"
        scrapedurl = matches[0]
        scrapedthumbnail = ""
        scrapeddescription = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        itemlist.append(
            Item(channel=CHANNELNAME,
                 action="newlist",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot))

    return itemlist
Пример #50
0
    def process(self, context):
        self.shell = context.client
        applicationOsh = context.application.getOsh()
        applicationServerName = str(applicationOsh.getAttributeValue('name'))
        applicationServerIp = str(applicationOsh.getAttributeValue('application_ip'))
        applicationProfile = str(applicationOsh.getAttributeValue('sap_instance_profile'))
        logger.info('SAP ABAP Application Server Name: ', applicationServerName)
        logger.info('SAP ABAP Application Server application_ip: ', applicationServerIp)
        logger.info('SAP ABAP Application Server Profile: ', applicationProfile)

        # SAP ABAP Application Server CIT should be created with Name attribute in format
        # - ${HOSTNAME}_${SAPSYSTEMNAME}_${INSTANCE_NUMBER}

        # get the hostname from file
        hostnameF = None
        # get the hostname from command
        hostnameC = None
        # get the hostname from ip address
        hostname = None

        underLines = applicationServerName.count('_')
        serverName = applicationServerName.split('_', underLines - 1)

        fileContent = sap_discoverer_by_shell.read_pf(self.shell, applicationProfile)
        if fileContent and fileContent[1]:
            hostnameF = fileContent[1].get('SAPLOCALHOST') or fileContent[1].get(u'SAPLOCALHOST')
        if hostnameF:
            logger.info('SAP ABAP Application Server hostname, fetch from profile: ', hostnameF)
            applicationServerName = str(hostnameF).lower() + '_' + serverName[-1]
            applicationOsh.setStringAttribute('name', applicationServerName)
            return
        
        # if there is no SAPLOCALHOST from profile, try to get the hostname from command
        try:
            hostnameC = str(self.shell.execCmd('hostname'))
            if hostnameC:
                applicationServerName = hostnameC.lower() + '_' + serverName[-1]
                applicationOsh.setStringAttribute('name', applicationServerName)
                logger.info('SAP ABAP Application Server hostname, get from command: ', hostnameC)
                return
        except:
            logger.debug('cannot get hostname by command')

        # if cannot get the hostname by command, try to resolve it by IP address
        if applicationServerIp:
            hostname = netutils.getHostName(applicationServerIp)

        if hostname:
            applicationServerName = hostname.lower() + '_' + serverName[-1]
            logger.info('SAP ABAP Application Server hostname, resolved by ip: ', hostname)
            applicationOsh.setStringAttribute('name', applicationServerName)
        else:
            logger.debug('there is no valid ip address or hostname')
Пример #51
0
def updatechannel(channel_name):
    logger.info("pelisalacarta.core.updater updatechannel('" + channel_name +
                "')")

    # Canal remoto
    remote_channel_url, remote_version_url = get_channel_remote_url(
        channel_name)

    # Canal local
    local_channel_path, local_version_path, local_compiled_path = get_channel_local_path(
        channel_name)

    #if not os.path.exists(local_channel_path):
    #    return False;

    # Version remota
    try:
        data = scrapertools.cachePage(remote_version_url)
        logger.info("pelisalacarta.core.updater remote_data=" + data)

        if "<tag>" in data:
            patronvideos = '<tag>([^<]+)</tag>'
        elif "<version>" in data:
            patronvideos = '<version>([^<]+)</version>'

        matches = re.compile(patronvideos, re.DOTALL).findall(data)
        remote_version = int(matches[0])
    except:
        remote_version = 0

    logger.info("pelisalacarta.core.updater remote_version=%d" %
                remote_version)

    # Version local
    if os.path.exists(local_version_path):
        infile = open(local_version_path)
        data = infile.read()
        infile.close()
        logger.info("pelisalacarta.core.updater local_data=" + data)

        if "<tag>" in data:
            patronvideos = '<tag>([^<]+)</tag>'
        elif "<version>" in data:
            patronvideos = '<version>([^<]+)</version>'

        matches = re.compile(patronvideos, re.DOTALL).findall(data)
        local_version = int(matches[0])
    else:
        local_version = 0

    logger.info("pelisalacarta.core.updater local_version=%d" % local_version)

    # Comprueba si ha cambiado
    updated = remote_version > local_version

    if updated:
        logger.info("pelisalacarta.core.updater updated")
        download_channel(channel_name)

    return updated
Пример #52
0
def search(item):
    logger.info("[tumejortv.py] search")

    buscador.listar_busquedas(params, url, category)
Пример #53
0
# XBMC Plugin
#------------------------------------------------------------

import urlparse, urllib2, urllib, re
import os
import sys
import scrapertools
import time
import config
import logger

# FIXME: Esto está repetido en el channelselector, debería ir a config
thumbnail_type = config.get_setting("thumbnail_type")
if thumbnail_type == "":
    thumbnail_type = "2"
logger.info("thumbnail_type=" + thumbnail_type)
if thumbnail_type == "0":
    IMAGES_PATH = 'http://media.tvalacarta.info/pelisalacarta/posters/'
elif thumbnail_type == "1":
    IMAGES_PATH = 'http://media.tvalacarta.info/pelisalacarta/banners/'
elif thumbnail_type == "2":
    IMAGES_PATH = 'http://media.tvalacarta.info/pelisalacarta/squares/'

ROOT_DIR = config.get_runtime_path()

REMOTE_VERSION_FILE = "http://descargas.tvalacarta.info/" + config.PLUGIN_NAME + "-version.xml"
LOCAL_VERSION_FILE = os.path.join(ROOT_DIR, "version.xml")
LOCAL_FILE = os.path.join(ROOT_DIR, config.PLUGIN_NAME + "-")

try:
    # Añadida a la opcion : si plataforma xbmcdharma es "True", no debe ser con la plataforma de la xbox
Пример #54
0
def checkforupdates(plugin_mode=True):
    logger.info("pelisalacarta.core.updater checkforupdates")

    # Descarga el fichero con la versión en la web
    logger.info("pelisalacarta.core.updater Verificando actualizaciones...")
    logger.info("pelisalacarta.core.updater Version remota: " +
                REMOTE_VERSION_FILE)
    data = scrapertools.cachePage(REMOTE_VERSION_FILE)
    '''    
    <?xml version="1.0" encoding="utf-8" standalone="yes"?>
    <version>
            <name>pelisalacarta</name>
            <tag>4.0     </tag>
            <version>4000</tag>
            <date>20/03/2015</date>
            <changes>New release</changes>
    </version>
    '''

    version_publicada = scrapertools.find_single_match(
        data, "<version>([^<]+)</version>").strip()
    tag_publicada = scrapertools.find_single_match(
        data, "<tag>([^<]+)</tag>").strip()
    logger.info("pelisalacarta.core.updater version remota=" + tag_publicada +
                " " + version_publicada)

    # Lee el fichero con la versión instalada
    localFileName = LOCAL_VERSION_FILE
    logger.info("pelisalacarta.core.updater fichero local version: " +
                localFileName)
    infile = open(localFileName)
    data = infile.read()
    infile.close()
    #logger.info("xml local="+data)

    version_local = scrapertools.find_single_match(
        data, "<version>([^<]+)</version>").strip()
    tag_local = scrapertools.find_single_match(data,
                                               "<tag>([^<]+)</tag>").strip()
    logger.info("pelisalacarta.core.updater version local=" + tag_local + " " +
                version_local)

    try:
        numero_version_publicada = int(version_publicada)
        numero_version_local = int(version_local)
    except:
        import traceback
        logger.info(traceback.format_exc())
        version_publicada = ""
        version_local = ""

    if version_publicada == "" or version_local == "":
        arraydescargada = tag_publicada.split(".")
        arraylocal = tag_local.split(".")

        # local 2.8.0 - descargada 2.8.0 -> no descargar
        # local 2.9.0 - descargada 2.8.0 -> no descargar
        # local 2.8.0 - descargada 2.9.0 -> descargar
        if len(arraylocal) == len(arraydescargada):
            logger.info("caso 1")
            hayqueactualizar = False
            for i in range(0, len(arraylocal)):
                print arraylocal[i], arraydescargada[i], int(
                    arraydescargada[i]) > int(arraylocal[i])
                if int(arraydescargada[i]) > int(arraylocal[i]):
                    hayqueactualizar = True
        # local 2.8.0 - descargada 2.8 -> no descargar
        # local 2.9.0 - descargada 2.8 -> no descargar
        # local 2.8.0 - descargada 2.9 -> descargar
        if len(arraylocal) > len(arraydescargada):
            logger.info("caso 2")
            hayqueactualizar = False
            for i in range(0, len(arraydescargada)):
                #print arraylocal[i], arraydescargada[i], int(arraydescargada[i]) > int(arraylocal[i])
                if int(arraydescargada[i]) > int(arraylocal[i]):
                    hayqueactualizar = True
        # local 2.8 - descargada 2.8.8 -> descargar
        # local 2.9 - descargada 2.8.8 -> no descargar
        # local 2.10 - descargada 2.9.9 -> no descargar
        # local 2.5 - descargada 3.0.0
        if len(arraylocal) < len(arraydescargada):
            logger.info("caso 3")
            hayqueactualizar = True
            for i in range(0, len(arraylocal)):
                #print arraylocal[i], arraydescargada[i], int(arraylocal[i])>int(arraydescargada[i])
                if int(arraylocal[i]) > int(arraydescargada[i]):
                    hayqueactualizar = False
                elif int(arraylocal[i]) < int(arraydescargada[i]):
                    hayqueactualizar = True
                    break
    else:
        hayqueactualizar = (numero_version_publicada > numero_version_local)

    if hayqueactualizar:

        if plugin_mode:

            logger.info("pelisalacarta.core.updater actualizacion disponible")

            # Añade al listado de XBMC
            import xbmcgui
            thumbnail = IMAGES_PATH + "Crystal_Clear_action_info.png"
            logger.info("thumbnail=" + thumbnail)
            listitem = xbmcgui.ListItem("Descargar version " + tag_publicada,
                                        thumbnailImage=thumbnail)
            itemurl = '%s?action=update&version=%s' % (sys.argv[0],
                                                       tag_publicada)
            import xbmcplugin
            xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
                                        url=itemurl,
                                        listitem=listitem,
                                        isFolder=True)

            # Avisa con un popup
            dialog = xbmcgui.Dialog()
            dialog.ok(
                "Versión " + tag_publicada + " disponible",
                "Ya puedes descargar la nueva versión del plugin\ndesde el listado principal"
            )

        else:

            import xbmcgui
            yes_pressed = xbmcgui.Dialog().yesno(
                "Versión " + tag_publicada + " disponible",
                "¿Quieres instalarla?")

            if yes_pressed:
                params = {"version": tag_publicada}
                update(params)
    '''
Пример #55
0
if __name__ == '__main__':
    
#     D:\workspace\AppWeb-A-8089\bin
#     D:\workspace\AppWeb-A-8090\bin 
# 或
#     D:\workspace\AppWeb-B-8085\bin
#     D:\workspace\AppWeb-B-8086\bin

    # 后端编译打包
    complie_and_zip()
    time.sleep(2)
    # # # 部署服务器1
    conn_and_deploy(APP_SERVER1[0], APP_SERVER1[1], APP_SERVER1[2],PATH_INFO['ABZip_path'], PATH_INFO['target_path'])
    exc(APP_SERVER1[0], APP_SERVER1[1], APP_SERVER1[2])
    print(info('server1(17) deployed'))

    conn_and_deploy(APP_SERVER2[0], APP_SERVER2[1], APP_SERVER2[2],PATH_INFO['ABZip_path'], PATH_INFO['target_path'])
    exc(APP_SERVER2[0], APP_SERVER2[1], APP_SERVER2[2])
    print(info('server1(31) deployed'))

    conn_and_deploy(APP_SERVER3[0], APP_SERVER3[1], APP_SERVER3[2],PATH_INFO['ABZip_path'], PATH_INFO['target_path'])
    exc(APP_SERVER3[0], APP_SERVER3[1], APP_SERVER3[2])
    print(info('server1(86) deployed'))

    conn_and_deploy(APP_SERVER4[0], APP_SERVER4[1], APP_SERVER4[2],PATH_INFO['ABZip_path'], PATH_INFO['target_path'])
    exc(APP_SERVER4[0], APP_SERVER4[1], APP_SERVER4[2])
    print(info('server1(87) deployed'))
    # # 部署服务器2
    # conn_and_deploy(APP_SERVER2[0], APP_SERVER2[1], APP_SERVER2[2],PATH_INFO['ABZip_path'], PATH_INFO['target_path'])
    # print(info('server1(31) deployed'))
Пример #56
0
def update(params):
    # Descarga el ZIP
    logger.info("pelisalacarta.core.updater update")
    remotefilename = REMOTE_FILE + params.get("version") + ".zip"
    localfilename = LOCAL_FILE + params.get("version") + ".zip"
    logger.info("pelisalacarta.core.updater remotefilename=%s" %
                remotefilename)
    logger.info("pelisalacarta.core.updater localfilename=%s" % localfilename)
    logger.info("pelisalacarta.core.updater descarga fichero...")
    inicio = time.clock()

    #urllib.urlretrieve(remotefilename,localfilename)
    from core import downloadtools
    downloadtools.downloadfile(remotefilename, localfilename, continuar=False)

    fin = time.clock()
    logger.info("pelisalacarta.core.updater Descargado en %d segundos " %
                (fin - inicio + 1))

    # Lo descomprime
    logger.info("pelisalacarta.core.updater descomprime fichero...")
    import ziptools
    unzipper = ziptools.ziptools()
    destpathname = DESTINATION_FOLDER
    logger.info("pelisalacarta.core.updater destpathname=%s" % destpathname)
    unzipper.extract(localfilename, destpathname)

    # Borra el zip descargado
    logger.info("pelisalacarta.core.updater borra fichero...")
    os.remove(localfilename)
    logger.info("pelisalacarta.core.updater ...fichero borrado")
Пример #57
0
import servertools
import binascii
import xbmctools
import config
import logger

CHANNELNAME = "sevillista"

# Esto permite su ejecución en modo emulado
try:
    pluginhandle = int( sys.argv[ 1 ] )
except:
    pluginhandle = ""

# Traza el inicio del canal
logger.info("[sevillista.py] init")

DEBUG = True

def mainlist(params,url,category):
    logger.info("[sevillista.py] mainlist")
    xbmctools.addnewfolder( CHANNELNAME , "novedades" , CHANNELNAME , "Películas - Novedades" , "http://www.pelis-sevillista56.org/" , "", "" )
    xbmctools.addnewfolder( CHANNELNAME , "categorias" , CHANNELNAME , "Películas - Por categoría" , "http://www.pelis-sevillista56.org/" , "", "" )
    xbmctools.addnewfolder( CHANNELNAME , "novedades" , CHANNELNAME , "Series - Novedades" , "http://www.pelis-sevillista56.org/search/label/Series" , "", "" )

    # Label (top-right)...
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )

def categorias(params,url,category):
Пример #58
0
 def __init__(self, url, sql):
     self.url = url
     self.url_obj = urlparse(self.url)
     self.sql = sql
     logger.info(True, json.dumps(self.url_obj))
Пример #59
0
def checkforupdates():
    logger.info("[updater.py] checkforupdates")

    # Descarga el fichero con la versión en la web
    logger.info("[updater.py] Verificando actualizaciones...")
    logger.info("[updater.py] Version remota: " + REMOTE_VERSION_FILE)
    data = scrapertools.cachePage(REMOTE_VERSION_FILE)
    #logger.info("xml descargado="+data)
    patronvideos = '<tag>([^<]+)</tag>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    versiondescargada = matches[0]
    logger.info("[updater.py] version descargada=" + versiondescargada)

    # Lee el fichero con la versión instalada
    localFileName = LOCAL_VERSION_FILE
    logger.info("[updater.py] Version local: " + localFileName)
    infile = open(localFileName)
    data = infile.read()
    infile.close()
    #logger.info("xml local="+data)
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    versionlocal = matches[0]
    logger.info("[updater.py] version local=" + versionlocal)

    arraydescargada = versiondescargada.split(".")
    arraylocal = versionlocal.split(".")

    # local 2.8.0 - descargada 2.8.0 -> no descargar
    # local 2.9.0 - descargada 2.8.0 -> no descargar
    # local 2.8.0 - descargada 2.9.0 -> descargar
    if len(arraylocal) == len(arraydescargada):
        #logger.info("caso 1")
        hayqueactualizar = False
        for i in range(0, len(arraylocal)):
            #print arraylocal[i], arraydescargada[i], int(arraydescargada[i]) > int(arraylocal[i])
            if int(arraydescargada[i]) > int(arraylocal[i]):
                hayqueactualizar = True
    # local 2.8.0 - descargada 2.8 -> no descargar
    # local 2.9.0 - descargada 2.8 -> no descargar
    # local 2.8.0 - descargada 2.9 -> descargar
    if len(arraylocal) > len(arraydescargada):
        #logger.info("caso 2")
        hayqueactualizar = False
        for i in range(0, len(arraydescargada)):
            #print arraylocal[i], arraydescargada[i], int(arraydescargada[i]) > int(arraylocal[i])
            if int(arraydescargada[i]) > int(arraylocal[i]):
                hayqueactualizar = True
    # local 2.8 - descargada 2.8.8 -> descargar
    # local 2.9 - descargada 2.8.8 -> no descargar
    # local 2.10 - descargada 2.9.9 -> no descargar
    # local 2.5 - descargada 3.0.0
    if len(arraylocal) < len(arraydescargada):
        #logger.info("caso 3")
        hayqueactualizar = True
        for i in range(0, len(arraylocal)):
            #print arraylocal[i], arraydescargada[i], int(arraylocal[i])>int(arraydescargada[i])
            if int(arraylocal[i]) > int(arraydescargada[i]):
                hayqueactualizar = False
            elif int(arraylocal[i]) < int(arraydescargada[i]):
                hayqueactualizar = True
                break

    if (hayqueactualizar):
        logger.info("[updater.py] actualizacion disponible")

        # Añade al listado de XBMC
        import xbmcgui
        listitem = xbmcgui.ListItem(
            "Descargar version " + versiondescargada,
            iconImage=os.path.join(IMAGES_PATH, "poster",
                                   "Crystal_Clear_action_info.png"),
            thumbnailImage=os.path.join(IMAGES_PATH,
                                        "Crystal_Clear_action_info.png"))
        itemurl = '%s?action=update&version=%s' % (sys.argv[0],
                                                   versiondescargada)
        import xbmcplugin
        xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
                                    url=itemurl,
                                    listitem=listitem,
                                    isFolder=True)

        # Avisa con un popup
        dialog = xbmcgui.Dialog()
        dialog.ok(
            "Versión " + versiondescargada + " disponible",
            "Ya puedes descargar la nueva versión del plugin\ndesde el listado principal"
        )
    '''
Пример #60
0
		return "Erreur Argument !"
	parkingList = functions.closestParking(depLat, depLon)
	for parking in parkingList:
		park_iti = sendRequest(depLat, depLon, parking['posx'], parking['posy'], "toPark")
		dest_iti = sendRequest(parking['posx'], parking['posy'], endLat, endLon)
		itiList.append(functions.merge([park_iti, dest_iti]))
	return jsonify(results=itiList)

@app.route('/direct_iti')
def direct_iti():
	departurePoint = request.args.get('fromPlace', '')
	depLat,depLon = parse_commas(departurePoint)
	endPoint = request.args.get('toPlace', '')
	endLat,endLon = parse_commas(endPoint)
	if 'err' == depLat or 'err' == endLat:
		return "Erreur Argument !"
	itiList = []
	iti = sendRequest(depLat, depLon, endLat, endLon, "toPark")
	itiList.append(functions.merge([iti]))
	return jsonify(results=itiList)


if __name__ == '__main__':
	logger.info("Starting ...")
	if not URL_OPEN_TRIP_PLANNER:
		logger.error("Please specify URL_OPEN_TRIP_PLANNER in config.py")
	if DEBUG:
		app.run(host='0.0.0.0',port=8080,debug=True)
	else:
		app.run(host='0.0.0.0',port=8080,threaded=True)