Пример #1
0
def make_backup(cleanup=False, scheduler=False):
    """ Makes a backup of config file, removes all but the last 5 backups """

    if scheduler:
        backup_file = 'config.backup-%s.sched.ini' % arrow.now().format('YYYYMMDDHHmmss')
    else:
        backup_file = 'config.backup-%s.ini' % arrow.now().format('YYYYMMDDHHmmss')
    backup_folder = plexpy.CONFIG.BACKUP_DIR
    backup_file_fp = os.path.join(backup_folder, backup_file)

    # In case the user has deleted it manually
    if not os.path.exists(backup_folder):
        os.makedirs(backup_folder)

    plexpy.CONFIG.write()
    shutil.copyfile(plexpy.CONFIG_FILE, backup_file_fp)

    if cleanup:
        # Delete all scheduled backup files except from the last 5.
        for root, dirs, files in os.walk(backup_folder):
            db_files = [os.path.join(root, f) for f in files if f.endswith('.sched.ini')]
            if len(db_files) > 5:
                backups_sorted_on_age = sorted(db_files, key=os.path.getctime, reverse=True)
                for file_ in backups_sorted_on_age[5:]:
                    try:
                        os.remove(file_)
                    except OSError as e:
                        logger.error(u"PlexPy Config :: Failed to delete %s from the backup folder: %s" % (file_, e))

    if backup_file in os.listdir(backup_folder):
        logger.debug(u"PlexPy Config :: Successfully backed up %s to %s" % (plexpy.CONFIG_FILE, backup_file))
        return True
    else:
        logger.warn(u"PlexPy Config :: Failed to backup %s to %s" % (plexpy.CONFIG_FILE, backup_file))
        return False
Пример #2
0
def getDirections(routeParams):
    origin = routeParams["departureCoord"]
    destination = routeParams["destinationCoord"]
    #TODO add mode (defaults to driving)
    #TODO add waypoints/vias https://developers.google.com/maps/documentation/directions/#Waypoints
    #     waypoints=optimize:false|Charlestown,MA|via:Lexington,MA
    #TODO add language - en-GB
    #TODO add units - metric
    #TODO add region=nl ??
    url = "http://maps.googleapis.com/maps/api/directions/json?" +\
          "origin=" + origin +\
          "&destination=" + destination +\
          "&alternatives=true" +\
          "&sensor=false"

    logger.dbg("Sending HTTP request for url='" + url + "'")

    response = urllib.urlopen(url)
    jsonResponse = json.loads(response.read())
    #logger.dbg(str(json.dumps(jsonResponse, sort_keys=True, indent=4, separators=(',', ': '))))

    status = jsonResponse.get("status", "UNKNOWN_ERROR")
    routes = jsonResponse.get("routes", [])
    if status != "OK" or len(routes) == 0:
        logger.error("Request failed. status='%s', len(routes)=%d" % (status, len(routes)))
        return "Error"
    else:
        logger.dbg("status=OK, nofRoutes=%d" % len(routes))

    return routes
Пример #3
0
def cache_image(url, image=None):
    """
    Saves an image to the cache directory.
    If no image is provided, tries to return the image from the cache directory.
    """
    # Create image directory if it doesn't exist
    imgdir = os.path.join(plexpy.CONFIG.CACHE_DIR, 'images/')
    if not os.path.exists(imgdir):
        logger.debug(u"PlexPy Helpers :: Creating image cache directory at %s" % imgdir)
        os.makedirs(imgdir)

    # Create a hash of the url to use as the filename
    imghash = hashlib.md5(url).hexdigest()
    imagefile = os.path.join(imgdir, imghash)

    # If an image is provided, save it to the cache directory
    if image:
        try:
            with open(imagefile, 'wb') as cache_file:
                cache_file.write(image)
        except IOError as e:
            logger.error(u"PlexPy Helpers :: Failed to cache image %s: %s" % (imagefile, e))

    # Try to return the image from the cache directory
    if os.path.isfile(imagefile):
        imagetype = 'image/' + imghdr.what(os.path.abspath(imagefile))
    else:
        imagefile = None
        imagetype = 'image/jpeg'

    return imagefile, imagetype
Пример #4
0
    def write(self):
        """ Make a copy of the stored config and write it to the configured file """
        new_config = ConfigObj(encoding="UTF-8")
        new_config.filename = self._config_file

        # first copy over everything from the old config, even if it is not
        # correctly defined to keep from losing data
        for key, subkeys in self._config.items():
            if key not in new_config:
                new_config[key] = {}
            for subkey, value in subkeys.items():
                new_config[key][subkey] = value

        # next make sure that everything we expect to have defined is so
        for key in _CONFIG_DEFINITIONS.keys():
            key, definition_type, section, ini_key, default = self._define(key)
            self.check_setting(key)
            if section not in new_config:
                new_config[section] = {}
            new_config[section][ini_key] = self._config[section][ini_key]

        # Write it to file
        logger.info(u"PlexPy Config :: Writing configuration to file")

        try:
            new_config.write()
        except IOError as e:
            logger.error(u"PlexPy Config :: Error writing configuration file: %s", e)

        self._blacklist()
def resolveIpFromDnsPortipPortconcepts(Framework, OSHVResult, ipPortconcepts, localShell, dnsServers = None):
    logger.debug('Resolving concepts')
    for ipPortConceptEntry in ipPortconcepts.entrySet():
        conceptName = ipPortConceptEntry.getKey()
        conceptFields = ipPortConceptEntry.getValue()
        logger.debug('processing [', conceptName, ']')
        PROVIDER_IPs = None
        PROVIDER_PORTs = None
        for conceptFieldEntry in conceptFields.entrySet():
            fieldName = conceptFieldEntry.getKey().upper()
            if fieldName == PROVIDER_IP:
                PROVIDER_IPs = conceptFieldEntry.getValue()
            elif fieldName == PROVIDER_PORT:
                PROVIDER_PORTs = conceptFieldEntry.getValue()

        if PROVIDER_IPs is not None:
            logger.debug('for concept [', conceptName, '].[', PROVIDER_IP, '] found [', str(len(PROVIDER_IPs)), '] values')
            if PROVIDER_PORTs is None:
                processIps(Framework, OSHVResult, PROVIDER_IPs, localShell)
            elif len(PROVIDER_IPs) != len(PROVIDER_PORTs):
                errorMessage = 'There is a mismatch between the number of IP addresses and the number of ports that were found. The concept [' + conceptName + '].[' + PROVIDER_IP + '] found [' + str(len(PROVIDER_IPs)) + '] values while for [' + conceptName + '].[' + PROVIDER_PORT + '] found [' + str(len(PROVIDER_PORTs)) + '] values'
                Framework.reportWarning(errorMessage)
                logger.warn(errorMessage)
                processIps(Framework, OSHVResult, PROVIDER_IPs, localShell)
            else:
                for index in range(len(PROVIDER_IPs)):
                    resolvedIp = resolveIpFromDns(Framework, PROVIDER_IPs[index], localShell, dnsServers)
                    if resolvedIp is not None:
                        processIpPort(Framework, OSHVResult, resolvedIp, PROVIDER_PORTs[index], localShell, dnsServers)
        else:
            logger.error('No ' + PROVIDER_IP + ' field returned for concept [', conceptName, ']')
Пример #6
0
def multicast_pings( failover_id, failovers, last_info, timeout ):
    http = httplib2.Http( timeout = timeout )
    for failover in failovers:
        try:
            http.request( "http://" + ":".join( map( str, failover ) ) + "/info?id=" + failover_id + "&data=" + dump_info( last_info ) )
        except Exception, e:
            logger.error( "Multicast pings problem: %s" % str( e ) )
Пример #7
0
def create_https_certificates(ssl_cert, ssl_key):
    """
    Create a self-signed HTTPS certificate and store in it in
    'ssl_cert' and 'ssl_key'. Method assumes pyOpenSSL is installed.

    This code is stolen from SickBeard (http://github.com/midgetspy/Sick-Beard).
    """
    from OpenSSL import crypto
    from certgen import createKeyPair, createSelfSignedCertificate, TYPE_RSA

    serial = int(time.time())
    domains = ['DNS:' + d.strip() for d in plexpy.CONFIG.HTTPS_DOMAIN.split(',') if d]
    ips = ['IP:' + d.strip() for d in plexpy.CONFIG.HTTPS_IP.split(',') if d]
    altNames = ','.join(domains + ips)

    # Create the self-signed PlexPy certificate
    logger.debug(u"Generating self-signed SSL certificate.")
    pkey = createKeyPair(TYPE_RSA, 2048)
    cert = createSelfSignedCertificate(("PlexPy", pkey), serial, (0, 60 * 60 * 24 * 365 * 10), altNames) # ten years

    # Save the key and certificate to disk
    try:
        with open(ssl_cert, "w") as fp:
            fp.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
        with open(ssl_key, "w") as fp:
            fp.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
    except IOError as e:
        logger.error("Error creating SSL key and certificate: %s", e)
        return False

    return True
Пример #8
0
    def run(self):
        logger.info("Starting Command loop ...")
        while True:
            try:
                logger.info("Geting command state")
                resp = requests.get('http://homemonitor.esy.es/api.php?request=get_status&system_id=1234567890')
                if resp.status_code == 200:
                    logger.info("Success")
                   
                    switch_states = resp.json()
                    for item in switch_states:
                        logger.info(item['switch_id'])
                        logger.info(item['status'])
                        logger.info(item['current_cmd'])

                       ###############
                       # Execute command

                       #item['current_cmd'] = 0 #Reset current_cmd to 0

                   #print "Sending:" + json.dumps(switch_states)
                   #resp = requests.post('http://homemonitor.esy.es/api.php?request=set_command&system_id=1234567890',json=switch_states)
                   #if resp.status_code == 200:
                   #    logger.info("Command state updated")
                   #else:
                   #    logger.error("Error updating command state")
                else:
                    logger.error("Error getting command status")
            except requests.exceptions.RequestException as e:
                logger.info(e);
            except:
                logger.info("Unexpected error: ", sys.exc_info()[0])

            time.sleep(10)
Пример #9
0
def __getHtmlContent(sUrl = None, sSecurityValue = None):
    oParams = ParameterHandler()
    # Test if a url is available and set it
    if sUrl is None and not oParams.exist('sUrl'):
        logger.error("There is no url we can request.")
        return False
    else:
        if sUrl is None:
            sUrl = oParams.getValue('sUrl')
    # Test if a security value is available
    if sSecurityValue is None:
        if oParams.exist("securityCookie"):
            sSecurityValue = oParams.getValue("securityCookie")
    if not sSecurityValue:
        sSecurityValue = ''
    # preferred language
    sPrefLang = __getPreferredLanguage()
    # Make the request
    oRequest = cRequestHandler(sUrl)
    oRequest.addHeaderEntry('Cookie', sPrefLang+sSecurityValue+'ListDisplayYears=Always;')
    oRequest.addHeaderEntry('Referer', URL_MAIN)
    oRequest.addHeaderEntry('Accept', '*/*')
    oRequest.addHeaderEntry('Host', domain)

    return oRequest.request()
Пример #10
0
    def extract(self, file, dir):
        logger.debug("file=%s" % file)
        logger.debug("dir=%s" % dir)
        
        if not dir.endswith(':') and not os.path.exists(dir):
            os.mkdir(dir)

        zf = zipfile.ZipFile(file)
        self._createstructure(file, dir)
        num_files = len(zf.namelist())

        for name in zf.namelist():
            logger.debug("name=%s" % name)
            if not name.endswith('/'):
                logger.debug("continue with file: "+name)
                try:
                    (path,filename) = os.path.split(os.path.join(dir, name))
                    logger.debug("path=%s" % path)
                    logger.debug("name=%s" % name)
                    os.makedirs( path )
                except:
                    pass
                outfilename = os.path.join(dir, name)
                logger.debug("outfilename=%s" % outfilename)
                try:
                    outfile = open(outfilename, 'wb')
                    outfile.write(zf.read(name))
                except:
                    logger.error("Something happened in file: "+name)
Пример #11
0
    def action(self, query, args=None, return_last_id=False):
        if query is None:
            return

        with db_lock:
            sql_result = None
            attempts = 0

            while attempts < 5:
                try:
                    with self.connection as c:
                        if args is None:
                            sql_result = c.execute(query)
                        else:
                            sql_result = c.execute(query, args)
                    # Our transaction was successful, leave the loop
                    break

                except sqlite3.OperationalError as e:
                    if "unable to open database file" in e or "database is locked" in e:
                        logger.warn(u"PlexPy Database :: Database Error: %s", e)
                        attempts += 1
                        time.sleep(1)
                    else:
                        logger.error(u"PlexPy Database :: Database error: %s", e)
                        raise

                except sqlite3.DatabaseError as e:
                    logger.error(u"PlexPy Database :: Fatal Error executing %s :: %s", query, e)
                    raise

            return sql_result
Пример #12
0
def mainlist(params,url,category):
    logger.info("[descargadoslist.py] mainlist")

    import xbmctools

    # Crea un listado con las entradas de la lista
    if usingsamba:
        ficheros = samba.get_files(DOWNLOAD_PATH)
    else:
        ficheros = os.listdir(DOWNLOAD_PATH)
    ficheros.sort()
    
    for fichero in ficheros:
        #xbmc.output("fichero="+fichero)
        try:
            # Lee el bookmark
            titulo,thumbnail,plot,server,url = readbookmark(fichero)

            # Crea la entrada
            # En la categoría va el nombre del fichero para poder borrarlo
            xbmctools.addnewvideo( CHANNELNAME , "play" , os.path.join( DOWNLOAD_PATH, fichero ) , server , titulo , url , thumbnail, plot )
        except:
            pass
            logger.info("[downloadall.py] error al leer bookmark")
            for line in sys.exc_info():
                logger.error( "%s" % line )

    xbmctools.addnewvideo( CHANNELNAME , "downloadall" , "category" , "server" , "(Empezar la descarga de la lista)" , "" , os.path.join(IMAGES_PATH, "Crystal_Clear_action_db_update.png"), "" )

    # Label (top-right)...
    xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
    xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
Пример #13
0
    def download(self,url, sTitle):
        self.__processIsCanceled = False
        # extract header
        try: header = dict([item.split('=') for item in (url.split('|')[1]).split('&')])
        except: header = {}
        logger.info('Header for download: %s' % (header))

        url = url.split('|')[0]    
        sTitle = self.__createTitle(url, sTitle)
        self.__sTitle = self.__createDownloadFilename(sTitle)
        
        oGui = cGui()
        self.__sTitle = oGui.showKeyBoard(self.__sTitle)
        if (self.__sTitle != False and len(self.__sTitle) > 0):
            sPath = cConfig().getSetting('download-folder')

            if sPath == '':
                dialog = xbmcgui.Dialog()
                sPath = dialog.browse(3, 'Downloadfolder', 'files', '')

            if (sPath != ''):                
                sDownloadPath = xbmc.translatePath(sPath +  '%s' % (self.__sTitle, ))
                try:
                    logger.info('download file: ' + str(url) + ' to ' + str(sDownloadPath))
                    self.__createProcessDialog()
                    request = urllib2.Request(url, headers=header)
                    self.__download(urllib2.urlopen(request), sDownloadPath)   
                except Exception as e:
                    logger.error(e)

                self.__oDialog.close()
Пример #14
0
def getIPSubnetObjects(api, filters):
	found = 0
	nwMap = {}
	try:
		nwStub = api.getStub(NnmServicesEnum().IPSubnet)
		for filter in filters:
			allSubnetsArray = nwStub.getIPSubnets(filter)
			allSubnets = allSubnetsArray.getItem()
			if allSubnets != None:
				found = 1
				logger.debug("Retrieved %s IPSubnet Objects" % (len(allSubnets)))
				for i in range(len(allSubnets)):
					if (notNull(allSubnets[i].getId())
							and allSubnets[i].getPrefixLength() >= 0
							and allSubnets[i].getPrefixLength() <= 32
							and notNull(allSubnets[i].getCreated()) and notNull(allSubnets[i].getModified())):
						nwMap[allSubnets[i].getId()] = USubnet(allSubnets[i].getId(), allSubnets[i].getName(), allSubnets[i].getPrefixLength(),
											allSubnets[i].getPrefix(),
											'', '')
			else:
				break
	except:
		stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
		errMsg = 'Exception:\n %s' % stacktrace
		logger.error(errMsg)
		api.Framework.reportWarning(errMsg)
	if found:
		logger.debug('Created a dictionary of %d IPSubnet objects' % (len(nwMap)))
	else:
		errMsg = 'Did not find any IPSubnet objects'
		logger.debug(errMsg)
		api.Framework.reportWarning(errMsg)

	return nwMap
Пример #15
0
def getL2ConnectionLinks(api, filters):
	found = 0
	l2Map = {}
	try:
		l2Stub = api.getStub(NnmServicesEnum().L2Connection)
		for filter in filters:
			allL2Array = l2Stub.getL2Connections(filter)
			allL2s = allL2Array.getItem()
			if allL2s != None:
				found = 1
				logger.debug("Retrieved %s L2Connection Links" % (len(allL2s)))
				for i in range(len(allL2s)):
					if (notNull(allL2s[i].getId())
							and notNull(allL2s[i].getName())
							and notNull(allL2s[i].getCreated()) and notNull(allL2s[i].getModified())):
						l2Map[allL2s[i].getId()] = UL2(allL2s[i].getId(), allL2s[i].getName(), None, '', '')
			else:
				break
	except:
		stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
		errMsg = 'Exception:\n %s' % stacktrace
		logger.error(errMsg)
		api.Framework.reportWarning(errMsg)
	if found:
		logger.debug('Created a dictionary of %d L2Connection Links' % (len(l2Map)))
	else:
		errMsg = 'Did not find any L2Connection Links'
		logger.debug(errMsg)
		api.Framework.reportWarning(errMsg)

	return l2Map
Пример #16
0
    def _parse_links(self, dom, container_selector, link_selector, snippet_selector):
        links = []
        # Try to extract all links of non-ad results, including their snippets(descriptions) and titles.
        try:
            li_g_results = dom.xpath(self._xp(container_selector))
            for e in li_g_results:
                try:
                    link_element = e.xpath(self._xp(link_selector))
                    link = link_element[0].get('href')
                    title = link_element[0].text_content()
                except IndexError as err:
                    logger.debug(
                        'Error while parsing link/title element with selector={}: {}'.format(link_selector, err))
                    continue
                try:
                    snippet_element = e.xpath(self._xp(snippet_selector))
                    snippet = snippet_element[0].text_content()
                except IndexError as err:
                    try:
                        previous_element = links[-1]
                    except IndexError as ie:
                        previous_element = None
                    logger.debug('Error in parsing snippet with selector={}. Previous element: {}.Error: {}'.format(
                        snippet_selector, previous_element, repr(e), err))
                    continue

                links.append(self.Result(link_title=title, link_url=link, link_snippet=snippet))
        # Catch further errors besides parsing errors that take shape as IndexErrors
        except Exception as err:
            logger.error('Error in parsing result links with selector={}: {}'.format(container_selector, err))
            logger.info(li_g_results)

        return links or []
Пример #17
0
def _parseMedia(sUrl,sRootUrl,iPage,sPattern, oGui):
    logger.error("parse %s with pattern %s" % (sUrl, sPattern))

    oRequestHandler = cRequestHandler(sUrl)
    sHtmlContent = oRequestHandler.request()

    _parseMovie(sHtmlContent,sUrl,sRootUrl,iPage,sPattern,oGui)
Пример #18
0
def getSiteCachePath(url):
    # Obtiene el dominio principal de la URL    
    dominio = urlparse.urlparse(url)[1]
    logger.debug("[scrapertools.py] dominio="+dominio)
    nombres = dominio.split(".")
    if len(nombres)>1:
        dominio = nombres[len(nombres)-2]+"."+nombres[len(nombres)-1]
    else:
        dominio = nombres[0]
    logger.debug("[scrapertools.py] dominio="+dominio)
    
    # Crea un directorio en la cache para direcciones de ese dominio
    siteCachePath = os.path.join( CACHE_PATH , dominio )
    if not os.path.exists(CACHE_PATH):
        try:
            os.mkdir( CACHE_PATH )
        except:
            logger.error("[scrapertools.py] Error al crear directorio "+CACHE_PATH)

    if not os.path.exists(siteCachePath):
        try:
            os.mkdir( siteCachePath )
        except:
            logger.error("[scrapertools.py] Error al crear directorio "+siteCachePath)
    
    logger.debug("[scrapertools.py] siteCachePath="+siteCachePath)

    return siteCachePath
def mainlist(item):
    logger.info("[favoritos.py] mainlist")
    itemlist=[]

    # Crea un listado con las entradas de favoritos
    if usingsamba(BOOKMARK_PATH):
        ficheros = samba.get_files(BOOKMARK_PATH)
    else:
        ficheros = os.listdir(BOOKMARK_PATH)
    
    # Ordena el listado por nombre de fichero (orden de incorporación)
    ficheros.sort()
    
    # Rellena el listado
    for fichero in ficheros:

        try:
            # Lee el bookmark
            canal,titulo,thumbnail,plot,server,url,fulltitle = readbookmark(fichero)
            if canal=="":
                canal="favoritos"

            # Crea la entrada
            # En extra va el nombre del fichero para poder borrarlo
            ## <-- Añado fulltitle con el titulo de la peli
            itemlist.append( Item( channel=canal , action="play" , url=url , server=server, title=fulltitle, thumbnail=thumbnail, plot=plot, fanart=thumbnail, extra=os.path.join( BOOKMARK_PATH, fichero ), fulltitle=fulltitle, folder=False ))
        except:
            for line in sys.exc_info():
                logger.error( "%s" % line )
    
    return itemlist
Пример #20
0
    def add(self, text, file):
        """
        Add to collection new voice
        :param text: text for search
        :param file: gif image
        :return: Picture entity
        """

        # Не соханяем запись без контента:
        if file is None:
            return None

        # check what text is doesn't exists, else to return  the found entity
        duplicate = self.get(text)
        if duplicate:
            return duplicate

        ss = ServiceLocator.resolve(ServiceLocator.SESSIONS)

        voice = self.db.Voice()
        voice.text = text

        voice.author = ss.get().login

        try:
            voice.validate()
            voice.save()

            # Сохраняем картинку:
            voice.fs.content = file

            return voice
        except Exception as ex:
            error(u'Voice.add', ex)
            return None
def findDatabases(localClient, procToPortDict, dbInstanceDict, isWindows='true', wmiRegistryClient=None):
    try:
        ## DB2 cannot be discovered through an SNMP/WMI agent
        localClientType = localClient.getClientType()
        if localClientType not in ['telnet', 'ssh', 'ntadmin']:
            logger.error('[' + SCRIPT_NAME + ':findDatabase] DB2 discovery requires SSH/Telnet/NTCMD')
            return

        ## The best approach to find DB2 instances is to make a list of
        ## locations where DB2 may be installed and search through them.
        searchLocations = []
        ## Extract information from process to port dictionary first
        ## For DB2, it is not possible to get database details from this
        ## dictionary. the best approach is to get possible install
        ## locations of DB2 and find databases later
        processProcToPortDict(localClient, isWindows, procToPortDict, searchLocations)

        ## Use the list of possible install locations to identify valid
        ## install locations
        instanceLocations = getInstancePaths(localClient, isWindows, searchLocations)

        # used for debugging purposes only - Daniel La
        for instancePath in instanceLocations:
            logger.debug('***********instance path is: ' + instancePath)

        ## Get databases using instance locations
        if instanceLocations:
            getDatabases(localClient, isWindows, instanceLocations, dbInstanceDict)
    except:
        excInfo = logger.prepareJythonStackTrace('')
        dbconnect_utils.debugPrint('[' + SCRIPT_NAME + ':findDatabases] Exception: <%s>' % excInfo)
        pass
Пример #22
0
def module_run(req):
    import rpyc

    put_string = ""
    if not "ModuleID" in req.GET.get():
        Module_Id = ""
    else:
        Module_Id = req.GET.get("ModuleID")
        put_string += Module_Id + "@@"
    if not "hosts" in req.GET.get():
        Hosts = ""
    else:
        Hosts = req.GET.get("hosts")
        put_string = Hosts + "@@"
    """
	module extend params recive
	"""
    if not "sys_param_1" in req.GET.get():
        Sys_param_1 = ""
    else:
        Sys_param_1 = req.GET.get("sys_param_1")
        put_string = Sys_param_1 + "@@"
    if not "sys_param_2" in req.GET.get():
        Sys_param_2 = ""
    else:
        Sys_param_2 = req.GET.get("sys_param_1")
        put_string = Sys_param_2 + "@@"
    try:
        conn = rpyc.connect("127.0.0.1", 11511)
        conn.root.login("sean", "htbenet100")
    except Exception, e:
        logger.error("connect to rpyc server failed" + str(e))
        return HttpResponse("connect to rpyc server failed" + str(e))
Пример #23
0
def discoverOAMEndpoint(shell, configFile):
    """
    Discover OAM endpoint in ObAccessClient.xml
    @types: str -> Endpoint
    """
    logger.debug('find OAM server')
    root = _buildDocumentForXpath(configFile, 0)
    xpath = _getXpath()
    servers = xpath.evaluate('//CompoundList/ValNameList[@ListName="primaryServer1"]', root, XPathConstants.NODESET)
    for i in range(0, servers.getLength()):
        server = servers.item(i)
        host = xpath.evaluate('//NameValPair[@ParamName="host"]/@Value', server, XPathConstants.STRING)
        port = xpath.evaluate('//NameValPair[@ParamName="port"]/@Value', server, XPathConstants.STRING)
        if host and port:
            logger.debug('got OAM server: %s:%s' % (host, port))
            if netutils.isValidIp(host):
                ip = host
            else:
                ip = _resolveHostName(shell, host)
            if ip:
                return netutils.createTcpEndpoint(ip, port)
            else:
                logger.error('Cannot resolve ip from host name "%s"' % host)
        else:
            logger.error('failed to get OAM server')
    return None
Пример #24
0
Файл: ast.py Проект: bcmd/BCMD
def dependency_sort(names, exprs):
    # this machine kills infinite loops (I hope)
    stopper = {}
    ordered_names = []
    ordered_exprs = []
    
    while names:
        name = names[0]
        del names[0]
        expr = exprs[0]
        del exprs[0]
        
        if len(names) >= stopper.get(name, len(names)+1):
            # we're now going around in circles
            logger.error('Unresolved circular dependency in assignments (at symbol ' \
                          + name + '), model may not non-viable')
            ordered_names.append(name)
            ordered_names += names
            ordered_exprs.append(expr)
            ordered_exprs += exprs
            return ordered_names, ordered_exprs
        
        stopper[name] = len(names)
            
        for dep in expr['depends']:
            if dep in names:
                names.append(name)
                exprs.append(expr)
                break
        
        if name not in names:
            ordered_names.append(name)
            ordered_exprs.append(expr)
    
    return ordered_names, ordered_exprs
Пример #25
0
def errorlist(params,url,category):
	logger.info("[descargadoslist.py] errorlist")

	# Crea el directorio de la lista de descargas con error si no existe
	try:
		os.mkdir(DOWNLOAD_PATH)
	except:
		pass
	try:
		os.mkdir(ERROR_PATH)
	except:
		pass

	# Crea un listado con las entradas de favoritos
	logger.info("[downloadall.py] ERROR_PATH="+ERROR_PATH)
	ficheros = os.listdir(ERROR_PATH)
	for fichero in ficheros:
		logger.info("[downloadall.py] fichero="+fichero)
		try:
			# Lee el bookmark
			titulo,thumbnail,plot,server,url = readbookmarkfile(fichero,ERROR_PATH)

			# Crea la entrada
			# En la categoría va el nombre del fichero para poder borrarlo
			xbmctools.addnewvideo( CHANNELNAME , "playerror" , os.path.join( ERROR_PATH, fichero ) , server , titulo , url , thumbnail, plot )
		except:
			pass
			logger.info("[downloadall.py] error al leer bookmark")
			for line in sys.exc_info():
				logger.error( "%s" % line )

	# Label (top-right)...
	xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
	xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
Пример #26
0
 def getConfigsFromHelp(self, fileMonitor):
     mysqlCommandList = []
     for cmd in self.HELP_OPTIONS_LIST:
         if self.shell.isWinOs():
             mysqlCommand = '"%s" %s' % (self.processPath, cmd)
         else:
             mysqlCommand = '%s %s' % (self.processPath, cmd)
         mysqlCommandList.append(mysqlCommand)
     try:
         help = self.shell.execAlternateCmdsList(mysqlCommandList)
         if self.shell.getLastCmdReturnCode() != 0:
             logger.error('Failed to get MySql help info.')            
     except:
         logger.error('Failed to get MySql help info. %s' % sys.exc_info()[1])
     else: 
         pathsFound = re.search('Default options.*order:.*\n(.*\n)', help)
         if pathsFound:
             filesLine = pathsFound.group(1)
             logger.debug('verifying existence of %s' % filesLine)
             #suppose that file have to be ended by .ext
             lookupConfPaths = re.findall('((([a-zA-Z]:)|(~?\/)).*?[\\\/]?\.?\w+\.\w+)(\s|$)', filesLine)
             if lookupConfPaths:
                 for lookupConfPath in lookupConfPaths:
                     if fileMonitor.checkPath(lookupConfPath[0]):
                         self.configPath = lookupConfPath[0]
                         return self.configPath
     if not self.configPath:
         logger.warn('MySQL configuration file was not found in mysql help')
     return self.configPath
Пример #27
0
def verifyDB(localDbClient, dbName):
    try:
        returnVal = -1
        dbStateQuery = 'SELECT db_name()'
        debugPrint(4, '[' + SCRIPT_NAME + ':verifyDB] Running query <%s>' % dbStateQuery)
        dbStateResultSet = doQuery(localDbClient, dbStateQuery)

        ## Return if query returns no results
        if dbStateResultSet == None:
            logger.warn('[' + SCRIPT_NAME + ':verifyDB] Unable to get database name!')
            return returnVal

        ## We have query results!
        while dbStateResultSet.next():
            databaseName = dbStateResultSet.getString(1).strip()
            if databaseName.lower().strip() == dbName.lower().strip():
                debugPrint(5, '[' + SCRIPT_NAME + ':verifyDB] Database name <%s> OK' % dbName)
                returnVal = 1
            else:
                logger.error('[' + SCRIPT_NAME + ':verifyDB] Database name mismatch!! Should be <%s>, got <%s>...' % (dbName, databaseName))
                return returnVal

        return returnVal
    except:
        excInfo = logger.prepareJythonStackTrace('')
        logger.warn('[' + SCRIPT_NAME + ':verifyDB] Exception: <%s>' % excInfo)
        pass
Пример #28
0
def request_json(url, **kwargs):
    """
    Wrapper for `request_response', which will decode the response as JSON
    object and return the result, if no exceptions are raised.

    As an option, a validator callback can be given, which should return True
    if the result is valid.
    """

    validator = kwargs.pop("validator", None)
    response = request_response(url, **kwargs)

    if response is not None:
        try:
            result = response.json()

            if validator and not validator(result):
                logger.error("JSON validation result failed")
            else:
                return result
        except ValueError:
            logger.error("Response returned invalid JSON data")

            # Debug response
            if plexpy.VERBOSE:
                server_message(response)
Пример #29
0
 def log_error(self, plugin, e):
     logger.error(
         "Plugin %s %s failed: %s"
         % (plugin.__module__, sys._getframe(1).f_code.co_name, str(e)),  # plugin name  # method name  # exception
         self,
         1,
     )
Пример #30
0
def makeTable(tableText):
	retTable = None
	if containsError(tableText):
		logger.error('error listing table:\n', tableText, '\n')
		return retTable
	rows = string.split(tableText, '\n')
	# look for the line of dashes representing the "table line"
	# a row above that is the header row, and data starts a row
	# below the dashed line.
	dashedLineIndex = findDashedLine(rows)
	header = rows[dashedLineIndex-1]
	# TODO: might want to check the headers haven't changed in the future
##    self.verifyServerHeader(header)
	indices = buildHeaderIndices(header)
	# actual data starts at the 4th row (0 indexed)
	dataRows = rows[dashedLineIndex+1:]
	retTable = []
	for dataRow in dataRows:
		if string.find(dataRow, STR_TABLE_END) > -1:
			logger.debug('reached end of table:', dataRow)
			break
		if string.strip(dataRow) != '':
			logger.debug('row:', dataRow)
			fields = getFields(dataRow, indices)
			logger.debug('fields:', string.join(fields))
			# add dataRow at the end for error conditions
			retTable += [[fields] + [dataRow]]
		else:
			continue
	logger.debug('finished parsing output table')
	return retTable
Пример #31
0
def validate_inputs(config, args, unknown_args):
    error_arr = []
    try:
        if args.configpath:
            if os.path.isfile(args.configpath):
                pil.config_path = args.configpath
            else:
                if os.path.isfile(pil.config_path):
                    logger.warn(
                        "Custom config path is invalid, falling back to default path: {:s}"
                        .format(pil.config_path))
                    logger.separator()
                else:  # Create new config if it doesn't exist
                    logger.banner()
                    helpers.new_config()
                    return False
        pil.config_path = os.path.realpath(pil.config_path)
        config.read(pil.config_path)

        if args.download:
            pil.dl_user = args.download
            if args.downloadfollowing or args.batchfile:
                logger.banner()
                logger.warn(
                    "Please use only one download method. Use -h for more information."
                )
                logger.separator()
                return False
        elif not args.clean and not args.info and not args.assemble and not args.downloadfollowing and not args.batchfile and not args.organize:
            logger.banner()
            logger.error(
                "Please use a download method. Use -h for more information.")
            logger.separator()
            return False

        if helpers.bool_str_parse(config.get('pyinstalive',
                                             'log_to_file')) == "Invalid":
            pil.log_to_file = True
            error_arr.append(['log_to_file', 'True'])
        elif helpers.bool_str_parse(config.get('pyinstalive', 'log_to_file')):
            pil.log_to_file = True
        else:
            pil.log_to_file = False

        logger.banner()

        if args.batchfile:
            if os.path.isfile(args.batchfile):
                pil.dl_batchusers = [
                    user.rstrip('\n') for user in open(args.batchfile)
                ]
                if not pil.dl_batchusers:
                    logger.error("The specified file is empty.")
                    logger.separator()
                    return False
                else:
                    logger.info(
                        "Downloading {:d} users from batch file.".format(
                            len(pil.dl_batchusers)))
                    logger.separator()
            else:
                logger.error('The specified file does not exist.')
                logger.separator()
                return False

        if unknown_args:
            pil.uargs = unknown_args
            logger.warn(
                "The following unknown argument(s) were provided and will be ignored: "
            )
            logger.warn('    ' + ' '.join(unknown_args))
            logger.separator()

        pil.ig_user = config.get('pyinstalive', 'username')
        pil.ig_pass = config.get('pyinstalive', 'password')
        pil.dl_path = config.get('pyinstalive', 'download_path')
        pil.run_at_start = config.get('pyinstalive', 'run_at_start')
        pil.run_at_finish = config.get('pyinstalive', 'run_at_finish')
        pil.ffmpeg_path = config.get('pyinstalive', 'ffmpeg_path')
        pil.verbose = config.get('pyinstalive', 'verbose')
        pil.skip_merge = config.get('pyinstalive', 'skip_merge')
        pil.args = args
        pil.config = config
        pil.proxy = config.get('pyinstalive', 'proxy')

        if args.dlpath:
            pil.dl_path = args.dlpath

        if helpers.bool_str_parse(
                config.get('pyinstalive', 'show_cookie_expiry')) == "Invalid":
            pil.show_cookie_expiry = False
            error_arr.append(['show_cookie_expiry', 'False'])
        elif helpers.bool_str_parse(
                config.get('pyinstalive', 'show_cookie_expiry')):
            pil.show_cookie_expiry = True
        else:
            pil.show_cookie_expiry = False

        if helpers.bool_str_parse(config.get('pyinstalive',
                                             'verbose')) == "Invalid":
            pil.verbose = False
            error_arr.append(['verbose', 'False'])
        elif helpers.bool_str_parse(config.get('pyinstalive', 'verbose')):
            pil.verbose = True
        else:
            pil.verbose = False

        if helpers.bool_str_parse(config.get('pyinstalive',
                                             'skip_merge')) == "Invalid":
            pil.skip_merge = False
            error_arr.append(['skip_merge', 'False'])
        elif helpers.bool_str_parse(config.get('pyinstalive', 'skip_merge')):
            pil.skip_merge = True
        else:
            pil.skip_merge = False

        if helpers.bool_str_parse(config.get('pyinstalive',
                                             'use_locks')) == "Invalid":
            pil.use_locks = False
            error_arr.append(['use_locks', 'False'])
        elif helpers.bool_str_parse(config.get('pyinstalive', 'use_locks')):
            pil.use_locks = True
        else:
            pil.use_locks = False

        if helpers.bool_str_parse(config.get('pyinstalive',
                                             'clear_temp_files')) == "Invalid":
            pil.clear_temp_files = False
            error_arr.append(['clear_temp_files', 'False'])
        elif helpers.bool_str_parse(
                config.get('pyinstalive', 'clear_temp_files')):
            pil.clear_temp_files = True
        else:
            pil.clear_temp_files = False

        if helpers.bool_str_parse(config.get('pyinstalive',
                                             'do_heartbeat')) == "Invalid":
            pil.do_heartbeat = True
            error_arr.append(['do_heartbeat', 'True'])
        if helpers.bool_str_parse(config.get('pyinstalive', 'do_heartbeat')):
            pil.do_heartbeat = True
        if args.noheartbeat or not helpers.bool_str_parse(
                config.get('pyinstalive', 'do_heartbeat')):
            pil.do_heartbeat = False
            logger.warn(
                "Getting livestream heartbeat is disabled, this may cause degraded performance."
            )
            logger.separator()

        if not args.nolives and helpers.bool_str_parse(
                config.get('pyinstalive', 'download_lives')) == "Invalid":
            pil.dl_lives = True
            error_arr.append(['download_lives', 'True'])
        elif helpers.bool_str_parse(config.get('pyinstalive',
                                               'download_lives')):
            pil.dl_lives = True
        else:
            pil.dl_lives = False

        if not args.noreplays and helpers.bool_str_parse(
                config.get('pyinstalive', 'download_replays')) == "Invalid":
            pil.dl_replays = True
            error_arr.append(['download_replays', 'True'])
        elif helpers.bool_str_parse(
                config.get('pyinstalive', 'download_replays')):
            pil.dl_replays = True
        else:
            pil.dl_replays = False

        if helpers.bool_str_parse(
                config.get('pyinstalive', 'download_comments')) == "Invalid":
            pil.dl_comments = True
            error_arr.append(['download_comments', 'True'])
        elif helpers.bool_str_parse(
                config.get('pyinstalive', 'download_comments')):
            pil.dl_comments = True
        else:
            pil.dl_comments = False

        if args.nolives:
            pil.dl_lives = False

        if args.noreplays:
            pil.dl_replays = False

        if args.verbose:
            pil.verbose = True
        if args.skip_merge:
            pil.skip_merge = True

        if not pil.dl_lives and not pil.dl_replays:
            logger.error(
                "You have disabled both livestream and replay downloading.")
            logger.error("Please enable at least one of them and try again.")
            logger.separator()
            return False

        if pil.ffmpeg_path:
            if not os.path.isfile(pil.ffmpeg_path):
                pil.ffmpeg_path = None
                cmd = "where" if platform.system() == "Windows" else "which"
                logger.warn(
                    "Custom FFmpeg binary path is invalid, falling back to environment variable."
                )
            else:
                logger.binfo("Overriding FFmpeg binary path: {:s}".format(
                    pil.ffmpeg_path))
        else:
            if not helpers.command_exists('ffmpeg') and not args.info:
                logger.error("FFmpeg framework not found, exiting.")
                logger.separator()
                return False

        if not pil.ig_user or not len(pil.ig_user):
            raise Exception(
                "Invalid value for 'username'. This value is required.")

        if not pil.ig_pass or not len(pil.ig_pass):
            raise Exception(
                "Invalid value for 'password'. This value is required.")

        if not pil.dl_path.endswith('/'):
            pil.dl_path = pil.dl_path + '/'
        if not pil.dl_path or not os.path.exists(pil.dl_path):
            pil.dl_path = os.getcwd() + "/"
            if not args.dlpath:
                error_arr.append(['download_path', os.getcwd() + "/"])
            else:
                logger.warn(
                    "Custom config path is invalid, falling back to default path: {:s}"
                    .format(pil.dl_path))
                logger.separator()

        if pil.proxy and pil.proxy != '':
            parsed_url = urlparse(pil.proxy)
            if not parsed_url.netloc or not parsed_url.scheme:
                error_arr.append(['proxy', 'None'])
                pil.proxy = None

        if error_arr:
            for error in error_arr:
                logger.warn(
                    "Invalid value for '{:s}'. Using default value: {:s}".
                    format(error[0], error[1]))
                logger.separator()

        if args.info:
            helpers.show_info()
            return False
        elif args.clean:
            helpers.clean_download_dir()
            return False
        elif args.assemble:
            pil.assemble_arg = args.assemble
            assembler.assemble()
            return False
        elif args.organize:
            organize.organize_videos()
            return False

        return True
    except Exception as e:
        logger.error("An error occurred: {:s}".format(str(e)))
        logger.error(
            "Make sure the config file and given arguments are valid and try again."
        )
        logger.separator()
        return False
def generate_elements(agents, topics, elemental):
    board = elemental.get_board()
    elements = board.get_elements()
    # list all elements on board
    for element in elements:
        data = element.get_name()
        agent = ""
        try:
            # get tag of current element
            agent = data[data.index(TAG_OPEN):data.index(TAG_CLOSE)] + ": "
        except ValueError as ex:
            logger.exception('element_factory', 'topics exception')

        # if such tag is in our list
        if agent in agents:
            # remove it from our list
            agents.remove(agent)
            # this tag already have some topics
            existing_topics = read_tags_str(agent, element)
            # difference between ours
            diff = set(topics).difference(existing_topics)
            # union with ours
            union = set(existing_topics).union(topics)
            # if there is difference
            if diff:
                # caption is our tag
                caption = agent
                # and all topics
                for i in union:
                    caption += i + ", "
                caption = caption[:len(caption) - 2]
                # update element on board
                board.update_element(element.get_posX(), element.get_posY(),
                                     element.get_id(), element.get_sizeX(),
                                     element.get_sizeY(), caption)
    # if some agents still not added
    if agents:
        used = []
        for agent in agents:
            # get non empty and empty places
            element_matrix = board.get_elements_matrix()

            # find empty position
            pos_x = elemental.get_posX()
            pos_y = elemental.get_posY()
            size_x = 1
            size_y = 1

            k = 1
            should_go = True

            while should_go:
                for j, sublist in enumerate(element_matrix, start=1):
                    for i, element in enumerate(sublist, start=1):
                        if math.fabs(j - pos_y) <= k and math.fabs(i - pos_x) <= k and element == 0 \
                                and not (i, j) in used:
                            pos_y_ = j
                            pos_x_ = i
                            used.append((i, j))
                            should_go = False
                            break
                    if not should_go:
                        break
                k += 1
                if k > 10:
                    logger.error('element_factory', "no free space left")

            # caption is our tag
            caption = agent
            # and all topics
            topics1 = ', '.join(topics)
            caption = caption + topics1

            # create element on board
            board.add_element(pos_x_, pos_y_, size_x, size_y, caption)
Пример #33
0
from tqdm import tqdm
import open3d
import logger
import copy
import mrob

from logger import get_configured_logger_by_name
logger = get_configured_logger_by_name(__file__)

if __name__ == '__main__':
    traj = np.load("traj.npy")
    print(traj.shape)

    pcds_dir = sys.argv[1]
    if not os.path.exists(pcds_dir):
        logger.error('Folder {0} for result does not exist.'.format())

    pcds_files = glob.glob(pcds_dir + '/*.pcd')
    pcds_files.sort()
    pcds = []
    for file in tqdm(pcds_files):
        pcds.append(open3d.io.read_point_cloud(file))
    logger.info('Read {0} PCDs from {1}'.format(len(pcds_files), pcds_dir))

    pcd_full = [pcds[0]]
    for i in tqdm(range(traj.shape[0] - 2)):
        source = copy.deepcopy(pcds[i + 1]).transform(
            (mrob.SE3(traj[i + 1]).T()))
        pcd_full.append(source)

    open3d.visualization.draw_geometries(pcd_full)
Пример #34
0
def Buscar(params, url, category):
    '''Searches globally through tvshack and shows results list
    '''
    logger.info("[tvshack.py] Buscar")

    keyboard = xbmc.Keyboard()
    keyboard.doModal()
    if not (keyboard.isConfirmed()):
        return
    text = keyboard.getText()
    if len(text) < 3:
        return

    #Clean search text to avoid web errors
    text = string.capwords(text).replace(" ", "+")
    searchUrl = url + text

    # Get the search results
    data = ""
    try:
        furl = urllib.urlopen(searchUrl)
        newurl = furl.geturl()
        if newurl != searchUrl:
            # This means we got only one result and jumped directly to it.
            # We have to analyze the result page to figure out the category
            dlog('[tvshack] buscar: single result: ' + newurl)
            if newurl.find('/tv/') == 18:  #TV Serie
                data = '<li><a href="%s">Television - <strong>%s</strong></a><a href="%s"><span>0 episodes</span></a></li>' % (
                    newurl, newurl[22:-1], newurl)
            elif newurl.find("/movies/") == 18:  #Film
                data = '<li><a href="%s">Movies - <strong>%s</strong></a><a href="%s"><span>%s</span></a></li>' % (
                    newurl, newurl[26:-8], newurl, newurl[-6:-2])
            elif newurl.find("/music/") == 18:  #Singer
                data = '<li><a href="%s">Music - <strong>%s</strong></a><a href="%s"></a></li>' % (
                    newurl, newurl[25:-1], newurl)
        else:
            # Multiple search results
            data = furl.read()
        furl.close()
    except:
        # Probably Internet connection problems or web changes. Nothing we can do :(
        pass
    if len(data) == 0:
        logger.error("[tvshac.py] Buscar - No search results :" + text)
        error = xbmcgui.Dialog()
        error.ok('pelisalacarta - TVShack',
                 getStr(30907))  #"The search did not find anything"
        return

# Ej. TV Series: <li><a href="http://tvshack.bz/tv/The_Big_Bang_Theory/">Television - <strong>The Big Bang Theory</strong></a><a href="http://tvshack.bz/tv/The_Big_Bang_Theory/"><span>57 episodes</span></a></li>
# Ej. Movie:     <li><a href="http://tvshack.bz/movies/Bang_Bang_You_re_Dead__2002_/">Movies - <strong>Bang Bang You're Dead</strong></a><a href="http://tvshack.bz/movies/Bang_Bang_You_re_Dead__2002_/"><span>2002</span></a></li>
# Ej. Music:     <li><a href="http://tvshack.bz/music/Mr__Big/">Music - <strong>Mr. Big</strong></a><a href="http://tvshack.bz/music/Mr__Big/"></a></li>
    patronvideos = '''(?x)       #      VERBOSE option active
        <li><a\ href="           #      Trash
        (?P<url>[^"]+)">         # $0 = media url
        ([^\ ]+)\ -\             # $1 = media Category: TV, Movie or Music
        <strong>                 #      Trash
        ([^<]+)                  # $2 = Media Name
        </strong></a>            #      Trash
        (?:<a\ href=")           #      Trash
        (?P=url)">               # $0 = media url (again)
        (?:<span>)?              #      Trash
        ([0-9]+)?                # $3 = Number of episodes or Production Year
        (?:\ episodes)?          #      Trash
        (?:</span>)?</a></li>    #      Trash
        '''
    matches = re.findall(patronvideos, data)

    totalmatches = len(matches)
    if totalmatches == 0:
        logger.error("[tvshac.py] Buscar - No matches found: " + text)
        error = xbmcgui.Dialog()
        error.ok('pelisalacarta - TVShack', getStr(30907))  #'No matches found'
        return

    for match in matches:
        if match[1] == 'Television':
            # Add to the directory listing
            if match[3] != '0':
                scrapedtitle = getStr(30908) % (match[2], match[3]
                                                )  #'Serie - %s (%s episodios)'
            else:
                scrapedtitle = getStr(30909) + match[2]  #'Serie - '
            xbmctools.addnewfolder(CHANNELNAME,
                                   "ListaEpisodios",
                                   "Series",
                                   scrapedtitle,
                                   match[0],
                                   "",
                                   "",
                                   Serie=match[2],
                                   totalItems=totalmatches)
        elif match[1] == 'Movies':
            scrapedtitle = getStr(30910) % (match[2], match[3]
                                            )  #'Cine - %s (%s)'
            xbmctools.addnewfolder(CHANNELNAME,
                                   "listaVideosEpisodio",
                                   "Cine",
                                   scrapedtitle,
                                   match[0],
                                   "",
                                   "",
                                   totalItems=totalmatches)
        else:  #Music
            xbmctools.addnewfolder(CHANNELNAME,
                                   "ListaEpisodios",
                                   "Musica",
                                   getStr(30911) + match[2],
                                   match[0],
                                   "",
                                   "",
                                   totalItems=totalmatches)  #"M�sica - "

    FinalizaPlugin(pluginhandle, category)
Пример #35
0
def verify_directories_created():
    import logger
    import filetools
    from resources.lib.platformcode import xbmc_library

    config_paths = [["librarypath", "library"], ["downloadpath", "downloads"],
                    ["downloadlistpath", "downloads/list"],
                    ["settings_path", "settings_channels"]]

    for path, default in config_paths:
        saved_path = get_setting(path)

        # Biblioteca
        if path == "librarypath":
            set_setting("library_version", "v4")
            if not saved_path:
                saved_path = xbmc_library.search_library_path()
                if saved_path:
                    set_setting(path, saved_path)

        if not saved_path:
            saved_path = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/" + default
            set_setting(path, saved_path)

        if get_setting("library_set_content") == True and path in [
                "librarypath", "downloadpath"
        ]:
            # logger.debug("library_set_content %s" % get_setting("library_set_content"))
            xbmc_library.add_sources(saved_path)

        saved_path = xbmc.translatePath(saved_path)
        if not filetools.exists(saved_path):
            logger.debug("Creating %s: %s" % (path, saved_path))
            filetools.mkdir(saved_path)

    config_paths = [["folder_movies", "CINE"], ["folder_tvshows", "SERIES"]]

    for path, default in config_paths:
        saved_path = get_setting(path)

        if not saved_path:
            saved_path = default
            set_setting(path, saved_path)

        content_path = filetools.join(get_library_path(), saved_path)
        if not filetools.exists(content_path):
            logger.debug("Creating %s: %s" % (path, content_path))
            if filetools.mkdir(content_path) and get_setting(
                    "library_set_content") == True:
                xbmc_library.set_content(default)

        elif get_setting("library_ask_set_content") == 2:
            xbmc_library.set_content(default)

    try:
        import scrapertools
        # Buscamos el archivo addon.xml del skin activo
        skindir = filetools.join(xbmc.translatePath("special://home"),
                                 'addons', xbmc.getSkinDir(), 'addon.xml')
        # Extraemos el nombre de la carpeta de resolución por defecto
        folder = ""
        data = filetools.read(skindir)
        res = scrapertools.find_multiple_matches(data, '(<res .*?>)')
        for r in res:
            if 'default="true"' in r:
                folder = scrapertools.find_single_match(r, 'folder="([^"]+)"')
                break

        # Comprobamos si existe en pelisalacarta y sino es así, la creamos
        default = filetools.join(get_runtime_path(), 'resources', 'skins',
                                 'Default')
        if folder and not filetools.exists(filetools.join(default, folder)):
            filetools.mkdir(filetools.join(default, folder))

        # Copiamos el archivo a dicha carpeta desde la de 720p si éste no existe o si el tamaño es diferente
        if folder and folder != '720p':
            for root, folders, files in filetools.walk(
                    filetools.join(default, '720p')):
                for f in files:
                    if not filetools.exists(filetools.join(default, folder, f)) or \
                          (filetools.getsize(filetools.join(default, folder, f)) !=
                           filetools.getsize(filetools.join(default, '720p', f))):
                        filetools.copy(filetools.join(default, '720p', f),
                                       filetools.join(default, folder, f),
                                       True)
    except:
        import traceback
        logger.error("Al comprobar o crear la carpeta de resolución")
        logger.error(traceback.format_exc())
Пример #36
0
def download_blocks_from_communicator(comm_inst: "OnionrCommunicatorDaemon"):
    """Use communicator instance to download blocks in the comms's queue"""
    blacklist = onionrblacklist.OnionrBlackList()
    storage_counter = storagecounter.StorageCounter()
    LOG_SKIP_COUNT = 50 # for how many iterations we skip logging the counter
    count: int = 0
    metadata_validation_result: bool = False
    # Iterate the block queue in the communicator
    for blockHash in list(comm_inst.blockQueue):
        count += 1

        triedQueuePeers = [] # List of peers we've tried for a block
        try:
            blockPeers = list(comm_inst.blockQueue[blockHash])
        except KeyError:
            blockPeers = []
        removeFromQueue = True

        if not shoulddownload.should_download(comm_inst, blockHash):
            continue

        if comm_inst.shutdown or not comm_inst.isOnline or storage_counter.is_full():
            # Exit loop if shutting down or offline, or disk allocation reached
            break
        # Do not download blocks being downloaded
        if blockHash in comm_inst.currentDownloading:
            continue

        if len(comm_inst.onlinePeers) == 0:
            break

        comm_inst.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
        if len(blockPeers) == 0:
            try:
                peerUsed = onlinepeers.pick_online_peer(comm_inst)
            except onionrexceptions.OnlinePeerNeeded:
                continue
        else:
            blockPeers = onionrcrypto.cryptoutils.random_shuffle(blockPeers)
            peerUsed = blockPeers.pop(0)

        if not comm_inst.shutdown and peerUsed.strip() != '':
            logger.info("Attempting to download %s from %s..." % (blockHash[:12], peerUsed))
        content = peeraction.peer_action(comm_inst, peerUsed, 'getdata/' + blockHash, max_resp_size=3000000) # block content from random peer (includes metadata)

        if content is not False and len(content) > 0:
            try:
                content = content.encode()
            except AttributeError:
                pass

            realHash = onionrcrypto.hashers.sha3_hash(content)
            try:
                realHash = realHash.decode() # bytes on some versions for some reason
            except AttributeError:
                pass
            if realHash == blockHash:
                #content = content.decode() # decode here because sha3Hash needs bytes above
                metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
                metadata = metas[0]
                try:
                    metadata_validation_result = \
                        validatemetadata.validate_metadata(metadata, metas[2])
                except onionrexceptions.DataExists:
                    metadata_validation_result = False
                if metadata_validation_result: # check if metadata is valid, and verify nonce
                    if onionrcrypto.cryptoutils.verify_POW(content): # check if POW is enough/correct
                        logger.info('Attempting to save block %s...' % blockHash[:12])
                        try:
                            onionrstorage.set_data(content)
                        except onionrexceptions.DataExists:
                            logger.warn('Data is already set for %s ' % (blockHash,))
                        except onionrexceptions.DiskAllocationReached:
                            logger.error('Reached disk allocation allowance, cannot save block %s.' % (blockHash,))
                            removeFromQueue = False
                        else:
                            blockmetadb.add_to_block_DB(blockHash, dataSaved=True) # add block to meta db
                            blockmetadata.process_block_metadata(blockHash) # caches block metadata values to block database
                            spawn(
                                local_command,
                                f'/daemon-event/upload_event',
                                post=True,
                                is_json=True,
                                post_data={'block': blockHash}
                            )
                    else:
                        logger.warn('POW failed for block %s.' % (blockHash,))
                else:
                    if blacklist.inBlacklist(realHash):
                        logger.warn('Block %s is blacklisted.' % (realHash,))
                    else:
                        logger.warn('Metadata for block %s is invalid.' % (blockHash,))
                        blacklist.addToDB(blockHash)
            else:
                # if block didn't meet expected hash
                tempHash = onionrcrypto.hashers.sha3_hash(content) # lazy hack, TODO use var
                try:
                    tempHash = tempHash.decode()
                except AttributeError:
                    pass
                # Punish peer for sharing invalid block (not always malicious, but is bad regardless)
                onionrpeers.PeerProfiles(peerUsed).addScore(-50)
                if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
                    # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
                    logger.warn(
                        'Block hash validation failed for ' +
                        blockHash + ' got ' + tempHash)
                else:
                    removeFromQueue = False # Don't remove from queue if 404
            if removeFromQueue:
                try:
                    del comm_inst.blockQueue[blockHash] # remove from block queue both if success or false
                    if count == LOG_SKIP_COUNT:
                        logger.info('%s blocks remaining in queue' %
                        [len(comm_inst.blockQueue)], terminal=True)
                        count = 0
                except KeyError:
                    pass
        comm_inst.currentDownloading.remove(blockHash)
    comm_inst.decrementThreadCount('getBlocks')
Пример #37
0
def run():
    from websocket import create_connection

    if plexpy.CONFIG.PMS_SSL and plexpy.CONFIG.PMS_URL[:5] == 'https':
        uri = plexpy.CONFIG.PMS_URL.replace('https://', 'wss://') + '/:/websockets/notifications'
        secure = 'secure '
    else:
        uri = 'ws://%s:%s/:/websockets/notifications' % (
            plexpy.CONFIG.PMS_IP,
            plexpy.CONFIG.PMS_PORT
        )
        secure = ''

    # Set authentication token (if one is available)
    if plexpy.CONFIG.PMS_TOKEN:
        header = ["X-Plex-Token: %s" % plexpy.CONFIG.PMS_TOKEN]
    else:
        header = []

    global ws_reconnect
    ws_reconnect = False
    reconnects = 0

    # Try an open the websocket connection
    while not plexpy.WS_CONNECTED and reconnects < plexpy.CONFIG.WEBSOCKET_CONNECTION_ATTEMPTS:
        if reconnects == 0:
            logger.info(u"Tautulli WebSocket :: Opening %swebsocket." % secure)

        reconnects += 1

        # Sleep 5 between connection attempts
        if reconnects > 1:
            time.sleep(plexpy.CONFIG.WEBSOCKET_CONNECTION_TIMEOUT)

        logger.info(u"Tautulli WebSocket :: Connection attempt %s." % str(reconnects))

        try:
            ws = create_connection(uri, header=header)
            logger.info(u"Tautulli WebSocket :: Ready")
            plexpy.WS_CONNECTED = True
        except (websocket.WebSocketException, IOError, Exception) as e:
            logger.error(u"Tautulli WebSocket :: %s." % e)

    if plexpy.WS_CONNECTED:
        on_connect()

    while plexpy.WS_CONNECTED:
        try:
            process(*receive(ws))

            # successfully received data, reset reconnects counter
            reconnects = 0

        except websocket.WebSocketConnectionClosedException:
            if reconnects == 0:
                logger.warn(u"Tautulli WebSocket :: Connection has closed.")

            if not plexpy.CONFIG.PMS_IS_CLOUD and reconnects < plexpy.CONFIG.WEBSOCKET_CONNECTION_ATTEMPTS:
                reconnects += 1

                # Sleep 5 between connection attempts
                if reconnects > 1:
                    time.sleep(plexpy.CONFIG.WEBSOCKET_CONNECTION_TIMEOUT)

                logger.warn(u"Tautulli WebSocket :: Reconnection attempt %s." % str(reconnects))

                try:
                    ws = create_connection(uri, header=header)
                    logger.info(u"Tautulli WebSocket :: Ready")
                    plexpy.WS_CONNECTED = True
                except (websocket.WebSocketException, IOError, Exception) as e:
                    logger.error(u"Tautulli WebSocket :: %s." % e)

            else:
                ws.shutdown()
                plexpy.WS_CONNECTED = False
                break

        except (websocket.WebSocketException, Exception) as e:
            logger.error(u"Tautulli WebSocket :: %s." % e)
            ws.shutdown()
            plexpy.WS_CONNECTED = False
            break

        # Check if we recieved a restart notification and close websocket connection cleanly
        if ws_reconnect:
            logger.info(u"Tautulli WebSocket :: Reconnecting websocket...")
            ws.shutdown()
            plexpy.WS_CONNECTED = False
            start_thread()
    
    if not plexpy.WS_CONNECTED and not ws_reconnect:
        on_disconnect()

    logger.debug(u"Tautulli WebSocket :: Leaving thread.")
Пример #38
0
def mainFn():
    global unreadMessages
    try:
        for item in reddit.inbox.unread():

            #loading our posts analyzed file
            postsFile=open("postsanalyzed.txt","r+")
            pa=str(postsFile.readlines())

            #loading the banned subs file
            bannedSubs=open("bansublist.txt", "r+")
            bs=str(bannedSubs.readlines())

            #appending our unread messages list
            unreadMessages.append(item)
            reddit.inbox.mark_read(unreadMessages)


            #checking the message for a bot mention
            if "u/profanitycounter" in item.body:
                #making the message into a comment
                comment=reddit.comment(id=item.id)
                if comment.id not in pa:

                    #starting a user stats record
                    if "[startstats]" in comment.body.lower():
                        comAuthor=comment.author
                        #testing if the user is already in the stat System
                        if stats.testStat(comment.author) == False:
                            #registering the user
                            stats.startRec(comment.author)

                            #trying to comment normally
                            try:
                                #constructing a response
                                comment.reply(postC.postConstructor3(comment.author,0))
                                print(f"Debug Info: user {comment.author} began stat recording")
                                log.debug(f"{comment.author} began stat recording")

                            #commenting when the bot is banned
                            except Forbidden:
                                comAuthor.message(f"Used me on banned sub: r/{comment.subreddit}", postC.postConstructor3(comment.author,0))
                                print(f"Debug Info: user {comment.author} began stat recording")
                                log.debug(f"{comment.author} began stat recording")
                        #user is already registered
                        else:
                            try:
                                comment.reply(postC.postConstructor3(comment.author,1))
                                print(f"Debug Info: user {comment.author} attempted to register, already registered")
                                log.debug(f"{comment.author} tried registering, already registered")

                            except Forbidden:
                                comAuthor.message(f"Used me on banned sub: r/{comment.subreddit}", postC.postConstructor3(comment.author,1))
                                print(f"Debug Info: user {comment.author} began stat recording")
                                log.debug(f"{comment.author} began stat recording")


                    #retrieve a user's stat record
                    if "[stats]" in comment.body.lower():
                        comAuthor = comment.author
                        if stats.testStat(comment.author) == True:
                            print(f"Debug info: user {comment.author} checked their stats")
                            log.debug(f"Debug info: user {comment.author} checked their stats")
                            try:
                                comment.reply(postC.postConstructor4(str(comment.author),0))

                            except Forbidden:
                                comAuthor.message(f"I\'m banned on r/{comment.subreddit}!",postC.postConstructor4(str(comment.author),0))

                        if stats.testStat(comment.author) == False:
                            try:
                                comment.reply(postC.postConstructor4(str(comment.author),1))

                            except Forbidden:
                                comAuthor.message(f"I\'m banned on r/{comment.subreddit}!",postC.postConstructor4(str(comment.author),1))



                    #to allow a user to check one own's profanity usage
                    if "[self]" in comment.body.lower():
                        comAuthor=comment.author

                        if str(comment.subreddit) not in bs:
                            #getting and processing our string using the parent comment author
                            print(f"Debug Info: user:{comment.author}; sub:{comment.subreddit}; parent:{comment.parent_id}")
                            log.debug(f"System Used: user:{comment.author}; sub:{comment.subreddit}; parent:{comment.parent_id}")

                            if stats.testStat(comAuthor) == True:
                                stats.updateStatInc(comAuthor,"called",1)

                            #trying to post normally
                            try:
                                newComment=comment.reply(postC.postConstructor2(str(comment.author),0))
                                print(f"Bot commented at: {comment.parent_id}")
                                log.debug(f"Commented at: {comment.parent_id}")

                            #catching our 'Forbidden' error, meaning that we've been banned at this sub
                            except Forbidden:
                                print(f"Bot now banned at: {comment.subreddit}")
                                log.warning(f"Banned now at: {comment.subreddit}")

                                try:
                                    #sending a message to the user who created the error
                                    comAuthor.message(f"You used me on r/{comment.subreddit}",postC.postConstructor2(str(comment.author),1))

                                except Forbidden:
                                    print(f"{comAuthor}\'s account has been suspended, continuing without action.")
                                    log.warning(f"{comAuthor} used with a suspended account, continuing")

                            if str(comment.subreddit) in bs:
                                print(f"Bot run on know banned sub: {comment.subreddit}. PMing user.")
                                log.debug(f"Bot used on KBS: {comment.subreddit}")

                                try:
                                    comAuthor.message(f"You used me on r/{comment.subreddit}",postC.postConstructor2(str(comment.author),1))
                                    print(f"User {comment.author} was successfully PMed")
                                    log.debug(f"User {comment.author} was successfully PMed")

                                except Forbidden:
                                    print(f"{comAuthor}\'s account has been suspended, continuing without action.")
                                    log.warning(f"{comAuthor} used with a suspended account, continuing")


                    #plain profanity checking
                    if "[startstats]" not in comment.body.lower() and "[stats]" not in comment.body.lower() and "[self]" not in comment.body.lower():
                        #checking if our parent is a comment or post, and then proceeding
                        #parent is comment
                        if comment.parent_id.startswith("t1_"):
                            #getting some info about the parent
                            parentcomment=reddit.comment(id=comment.parent_id.split("_")[1])
                            comAuthor=comment.author
                            parentauthor=parentcomment.author


                            #checking if the bot is being used in a free sub
                            if str(parentcomment.subreddit) not in bs:

                                #getting and processing our string using the parent comment author
                                print(f"Debug Info: user:{comment.author}; sub:{comment.subreddit}; parent:{comment.parent_id}")
                                log.debug(f"System Used: user:{comment.author}; sub:{comment.subreddit}; parent:{comment.parent_id}")

                                #trying to post our comment
                                try:
                                    newComment=comment.reply(postC.postConstructor(str(parentauthor),comment.author,0))
                                    print(f"Bot commented at: {comment.parent_id}")
                                    log.debug(f"Commented at: {comment.parent_id}")


                                #catching our 'Forbidden' error, meaning that we've been banned at this sub
                                except Forbidden:
                                    print(f"Bot now banned at: {comment.subreddit}")
                                    log.warning(f"Banned now at: {comment.subreddit}")

                                    try:
                                        #sending a message to the user who created the error
                                        comAuthor.message(f"You used me on r/{comment.subreddit}",postC.postConstructor(str(parentauthor),comment.author,1))

                                    except Forbidden:
                                        print(f"{comAuthor}\'s account has been suspended, continuing without action.")
                                        log.warning(f"{comAuthor} used with a suspended account, continuing")


                            if str(parentcomment.subreddit) in bs:
                                print(f"Bot run on know banned sub: {comment.subreddit}. PMing user.")
                                log.debug(f"Bot used on KBS: {comment.subreddit}")

                                try:
                                    comAuthor.message(f"You used me on r/{comment.subreddit}",postC.postConstructor(str(parentauthor),comment.author,1))
                                    print(f"User {comment.author} was successfully PMed")
                                    log.debug(f"User {comment.author} was successfully PMed")

                                except Forbidden:
                                    print(f"{comAuthor}\'s account has been suspended, continuing without action.")
                                    log.warning(f"{comAuthor} used with a suspended account, continuing")



                        #parent is post
                        if comment.parent_id.startswith("t3_"):
                            #getting info on the parent post
                            parentPost=reddit.submission(id=comment.parent_id.split("_")[1])
                            comAuthor=comment.author
                            parentauthor=parentPost.author

                            if str(parentPost.subreddit) not in bs:
                                #getting and processing our string using the parent comment author
                                print(f"Debug Info: user:{comment.author}; sub:{comment.subreddit}; parent:{comment.parent_id}")
                                log.debug(f"System Used: user:{comment.author}; sub:{comment.subreddit}; parent:{comment.parent_id}")

                                #trying to post our comment
                                try:
                                    newComment=comment.reply(postC.postConstructor(str(parentauthor),comment.author,0))
                                    print(f"Bot commented at: {comment.parent_id}")
                                    log.debug(f"Commented at: {comment.parent_id}")


                                #catching our 'Forbidden' error, meaning that we've been banned at this sub
                                except Forbidden:
                                    print(f"Bot now banned at: {comment.subreddit}")
                                    log.warning(f"Banned now at: {comment.subreddit}")

                                    try:
                                        #sending a message to the user who created the error
                                        comAuthor.message(f"You used me on r/{comment.subreddit}",postC.postConstructor(str(parentauthor),str(comment.author),1))

                                    except Forbidden:
                                        print(f"{comAuthor}\'s account has been suspended, continuing without action.")
                                        log.warning(f"{comAuthor} used with a suspended account, continuing")


                            #bot is banned
                            if str(parentPost.subreddit) in bs:
                                print(f"Bot run on know banned sub: {comment.subreddit}. PMing user.")
                                log.debug(f"Bot used on KBS: {comment.subreddit}")

                                try:
                                    comAuthor.message(f"You used me on r/{comment.subreddit}",postC.postConstructor(str(parentauthor),str(comment.author),1))
                                    print(f"User {comment.author} was successfully PMed")
                                    log.debug(f"User {comment.author} was successfully PMed")

                                except Forbidden:
                                    print(f"{comAuthor}\'s account has been suspended, continuing without action.")
                                    log.warning(f"{comAuthor} used with a suspended account, continuing")

                        if stats.testStat(str(comment.author)) == True:
                            stats.updateStatInc(str(comment.author),"called",1)





                postsFile.write(f"{comment.id}\n")

                log.debug("System executed successfully.")

    except RequestException:
        print("Connection to the API was dropped - Reconnecting in 30 seconds")
        log.error("Connection to the API was dropped - Reconnecting in 30 seconds")
        time.sleep(30)

    except APIException:
        print("The bot was ratelimited by the API - Reconnecting")
        log.error("The bot was ratelimited by the API - Reconnecting")
        time.sleep(10)

    except ServerError:
        print("Error encountered while communicating with the server - Reconnecting in 1 minute")
        log.error("Error encountered while communicating with the server - Reconnecting in 1 minute")
        time.sleep(60)

    except ClientException:
        log.error("Client Exception encountered - Continuing")
        time.sleep(10)

    except Forbidden:
        print("Out of loop forbidden error - Continuing")
        log.error("Out of loop forbidden error - Continuing")

    #sleeping to prevent api dropouts
    time.sleep(5)

    unreadMessages=[]
Пример #39
0
  import json
except ImportError:
  logger.info("json incluido en el interprete **NO** disponible")
  
  try:
    logger.info("Probando simplejson incluido en el interprete")
    import simplejson as json
  except ImportError:
    logger.info("simplejson incluido en el interprete **NO** disponible")
    
    try:
      logger.info("Probando simplejson en el directorio lib")
      from lib import simplejson as json
    except ImportError:
      logger.info("simplejson en el directorio lib **NO** disponible")
      logger.error("No se ha encontrado un parser de JSON valido")
      json = None



def load_json(*args, **kwargs):
    if not "object_hook" in kwargs:
        kwargs["object_hook"] = to_utf8
        
    try:    
      value = json.loads(*args, **kwargs)
    except:
      logger.error("**NO** se ha podido cargar el JSON")
      value = {}
     
    return value
Пример #40
0
def run():
    pil.initialize()
    logging.disable(logging.CRITICAL)
    config = configparser.ConfigParser()
    parser = argparse.ArgumentParser(
        description="You are running PyInstaLive {:s} using Python {:s}".
        format(Constants.SCRIPT_VER, Constants.PYTHON_VER))

    parser.add_argument('-u',
                        '--username',
                        dest='username',
                        type=str,
                        required=False,
                        help="Instagram username to login with.")
    parser.add_argument('-p',
                        '--password',
                        dest='password',
                        type=str,
                        required=False,
                        help="Instagram password to login with.")
    parser.add_argument(
        '-d',
        '--download',
        dest='download',
        type=str,
        required=False,
        help=
        "The username of the user whose livestream or replay you want to save."
    )
    parser.add_argument(
        '-b,',
        '--batch-file',
        dest='batchfile',
        type=str,
        required=False,
        help=
        "Read a text file of usernames to download livestreams or replays from."
    )
    parser.add_argument('-i',
                        '--info',
                        dest='info',
                        action='store_true',
                        help="View information about PyInstaLive.")
    parser.add_argument(
        '-nr',
        '--no-replays',
        dest='noreplays',
        action='store_true',
        help="When used, do not check for any available replays.")
    parser.add_argument(
        '-nl',
        '--no-lives',
        dest='nolives',
        action='store_true',
        help="When used, do not check for any available livestreams.")
    parser.add_argument(
        '-cl',
        '--clean',
        dest='clean',
        action='store_true',
        help=
        "PyInstaLive will clean the current download folder of all leftover files."
    )
    parser.add_argument('-cp',
                        '--config-path',
                        dest='configpath',
                        type=str,
                        required=False,
                        help="Path to a PyInstaLive configuration file.")
    parser.add_argument(
        '-dp',
        '--download-path',
        dest='dlpath',
        type=str,
        required=False,
        help=
        "Path to folder where PyInstaLive should save livestreams and replays."
    )
    parser.add_argument(
        '-as',
        '--assemble',
        dest='assemble',
        type=str,
        required=False,
        help=
        "Path to json file required by the assembler to generate a video file from the segments."
    )
    parser.add_argument(
        '-df',
        '--download-following',
        dest='downloadfollowing',
        action='store_true',
        help=
        "PyInstaLive will check for available livestreams and replays from users the account "
        "used to login follows.")
    parser.add_argument('-nhb',
                        '--no-heartbeat',
                        dest='noheartbeat',
                        action='store_true',
                        help="Disable heartbeat "
                        "check for "
                        "livestreams.")
    parser.add_argument('-v',
                        '--verbose',
                        dest='verbose',
                        action='store_true',
                        help="PyInstaLive will output JSON "
                        "responses and some misc "
                        "variables.")
    parser.add_argument(
        '-sm',
        '--skip-merge',
        dest='skip_merge',
        action='store_true',
        help="PyInstaLive will not merge the downloaded livestream files.")
    parser.add_argument(
        '-o',
        '--organize',
        action='store_true',
        help=
        "Create a folder for each user whose livestream(s) you have downloaded. The names of the folders will be their usernames. Then move the video(s) of each user into their associated folder."
    )

    # Workaround to 'disable' argument abbreviations
    parser.add_argument('--usernamx', help=argparse.SUPPRESS, metavar='IGNORE')
    parser.add_argument('--passworx', help=argparse.SUPPRESS, metavar='IGNORE')
    parser.add_argument('--infx', help=argparse.SUPPRESS, metavar='IGNORE')
    parser.add_argument('--noreplayx',
                        help=argparse.SUPPRESS,
                        metavar='IGNORE')
    parser.add_argument('--cleax', help=argparse.SUPPRESS, metavar='IGNORE')
    parser.add_argument('--downloadfollowinx',
                        help=argparse.SUPPRESS,
                        metavar='IGNORE')
    parser.add_argument('--configpatx',
                        help=argparse.SUPPRESS,
                        metavar='IGNORE')
    parser.add_argument('--confix', help=argparse.SUPPRESS, metavar='IGNORE')
    parser.add_argument('--organizx', help=argparse.SUPPRESS, metavar='IGNORE')

    parser.add_argument('-cx', help=argparse.SUPPRESS, metavar='IGNORE')
    parser.add_argument('-nx', help=argparse.SUPPRESS, metavar='IGNORE')
    parser.add_argument('-dx', help=argparse.SUPPRESS, metavar='IGNORE')

    args, unknown_args = parser.parse_known_args()  # Parse arguments

    if validate_inputs(config, args, unknown_args):
        if not args.username and not args.password:
            pil.ig_api = auth.authenticate(username=pil.ig_user,
                                           password=pil.ig_pass)
        elif (args.username and not args.password) or (args.password
                                                       and not args.username):
            logger.warn(
                "Missing --username or --password argument. Falling back to config file."
            )
            logger.separator()
            pil.ig_api = auth.authenticate(username=pil.ig_user,
                                           password=pil.ig_pass)
        elif args.username and args.password:
            pil.ig_api = auth.authenticate(username=args.username,
                                           password=args.password,
                                           force_use_login_args=True)

        if pil.ig_api:
            if pil.dl_user or pil.args.downloadfollowing:
                downloader.start()
            elif pil.dl_batchusers:
                if not helpers.command_exists("pyinstalive"):
                    logger.error(
                        "PyInstaLive must be properly installed when using the -b argument."
                    )
                    logger.separator()
                else:
                    dlfuncs.iterate_users(pil.dl_batchusers)
Пример #41
0
def read_changelog(latest_only=False, since_prev_release=False):
    changelog_file = os.path.join(plexpy.PROG_DIR, 'CHANGELOG.md')

    if not os.path.isfile(changelog_file):
        return '<h4>Missing changelog file</h4>'

    try:
        output = ['']
        prev_level = 0

        latest_version_found = False

        header_pattern = re.compile(r'(^#+)\s(.+)')
        list_pattern = re.compile(r'(^[ \t]*\*\s)(.+)')

        beta_release = False
        prev_release = str(plexpy.PREV_RELEASE)

        with open(changelog_file, "r") as logfile:
            for line in logfile:
                line_header_match = re.search(header_pattern, line)
                line_list_match = re.search(list_pattern, line)

                if line_header_match:
                    header_level = str(len(line_header_match.group(1)))
                    header_text = line_header_match.group(2)

                    if header_text.lower() == 'changelog':
                        continue

                    if latest_version_found:
                        break
                    elif latest_only:
                        latest_version_found = True
                    # Add a space to the end of the release to match tags
                    elif since_prev_release:
                        if prev_release.endswith('-beta') and not beta_release:
                            if prev_release + ' ' in header_text:
                                break
                            elif prev_release.replace('-beta',
                                                      '') + ' ' in header_text:
                                beta_release = True
                        elif prev_release.endswith('-beta') and beta_release:
                            break
                        elif prev_release + ' ' in header_text:
                            break

                    output[
                        -1] += '<h' + header_level + '>' + header_text + '</h' + header_level + '>'

                elif line_list_match:
                    line_level = len(line_list_match.group(1)) // 2
                    line_text = line_list_match.group(2)

                    if line_level > prev_level:
                        output[-1] += '<ul>' * (
                            line_level -
                            prev_level) + '<li>' + line_text + '</li>'
                    elif line_level < prev_level:
                        output[-1] += '</ul>' * (
                            prev_level -
                            line_level) + '<li>' + line_text + '</li>'
                    else:
                        output[-1] += '<li>' + line_text + '</li>'

                    prev_level = line_level

                elif line.strip() == '' and prev_level:
                    output[-1] += '</ul>' * (prev_level)
                    output.append('')
                    prev_level = 0

        if since_prev_release:
            output.reverse()

        return ''.join(output)

    except IOError as e:
        logger.error(
            'Tautulli Version Checker :: Unable to open changelog file. %s' %
            e)
        return '<h4>Unable to open changelog file</h4>'
#(1)要记录所有级别的日志,因此日志器的有效level需要设置为最低级别  --DEBUG
#(2)日志需要被发送到两个不同的目的地,因此需要为日志设置两个handler;另外。两个目的地都是磁盘文件。因此这两个handler都是与fileHander
#(3)all.log要求按照时间进行日志切割,因此它需要logging.handler.TimeRotatingFileHandler;而error.log没有要求日志切割。因此
#(4)两个日志文件的格式不同,因此需要对两个handler分别进行设置格式器

import logger
import logging.handlers
import datetime


#定义Logger
logger = logging.getLogger("mylogger")
logging.setLevel(logging.DEBUG)

rf_handler = logging.handlers.TimedRotatingFileHandler("all.log",when="midnight",interval=1,backupCount=7,atTime=None)
rf_handler.setFormatter(logging.Formatter("%(asctime)s-%(levelname)s-%(message)s"))

f_handler = logging.FileHandler("error.log")
f_handler = setLevel(logging.ERROR)
f_handler.setFormat(logging.Formatter("%(asctime)s-%(levelname)s-%(filename)s[:%(lineno)d-%(message)s)")

#把相应的处理器组装到logger上
logger.addHandler(rf_handler)
logger.addHandler(f_handler)

logger.debug("debug message")
logger.info("info message")
logger.warning("warning message")
logger.error("error message")
logger.critical("critical message")
Пример #43
0
def check_recently_added():

    with monitor_lock:
        # add delay to allow for metadata processing
        delay = plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY
        time_threshold = int(time.time()) - delay
        time_interval = plexpy.CONFIG.MONITORING_INTERVAL

        pms_connect = pmsconnect.PmsConnect()
        recently_added_list = pms_connect.get_recently_added_details(
            count='10')

        library_data = libraries.Libraries()
        if recently_added_list:
            recently_added = recently_added_list['recently_added']

            for item in recently_added:
                library_details = library_data.get_details(
                    section_id=item['section_id'])

                if not library_details['do_notify_created']:
                    continue

                metadata = []

                if 0 < time_threshold - int(item['added_at']) <= time_interval:
                    if item['media_type'] == 'movie':
                        metadata_list = pms_connect.get_metadata_details(
                            item['rating_key'])
                        if metadata_list:
                            metadata = [metadata_list['metadata']]
                        else:
                            logger.error(u"PlexPy Monitor :: Unable to retrieve metadata for rating_key %s" \
                                         % str(item['rating_key']))

                    else:
                        metadata_list = pms_connect.get_metadata_children_details(
                            item['rating_key'])
                        if metadata_list:
                            metadata = metadata_list['metadata']
                        else:
                            logger.error(u"PlexPy Monitor :: Unable to retrieve children metadata for rating_key %s" \
                                         % str(item['rating_key']))

                if metadata:

                    if not plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_GRANDPARENT:
                        for item in metadata:

                            library_details = library_data.get_details(
                                section_id=item['section_id'])

                            if 0 < time_threshold - int(
                                    item['added_at']) <= time_interval:
                                logger.debug(
                                    u"PlexPy Monitor :: Library item %s has been added to Plex."
                                    % str(item['rating_key']))

                                # Check if any notification agents have notifications enabled
                                if any(d['on_created'] for d in notifiers.
                                       available_notification_agents()):
                                    # Fire off notifications
                                    threading.Thread(
                                        target=notification_handler.
                                        notify_timeline,
                                        kwargs=dict(
                                            timeline_data=item,
                                            notify_action='created')).start()

                    else:
                        item = max(metadata, key=lambda x: x['added_at'])

                        if 0 < time_threshold - int(
                                item['added_at']) <= time_interval:
                            if item['media_type'] == 'episode' or item[
                                    'media_type'] == 'track':
                                metadata_list = pms_connect.get_metadata_details(
                                    item['grandparent_rating_key'])

                                if metadata_list:
                                    item = metadata_list['metadata']
                                else:
                                    logger.error(u"PlexPy Monitor :: Unable to retrieve grandparent metadata for grandparent_rating_key %s" \
                                                 % str(item['rating_key']))

                            logger.debug(
                                u"PlexPy Monitor :: Library item %s has been added to Plex."
                                % str(item['rating_key']))

                            # Check if any notification agents have notifications enabled
                            if any(d['on_created'] for d in
                                   notifiers.available_notification_agents()):
                                # Fire off notifications
                                threading.Thread(
                                    target=notification_handler.
                                    notify_timeline,
                                    kwargs=dict(
                                        timeline_data=item,
                                        notify_action='created')).start()
Пример #44
0
def get_version():

    if plexpy.FROZEN and common.PLATFORM == 'Windows':
        plexpy.INSTALL_TYPE = 'windows'
        current_version, current_branch = get_version_from_file()
        return current_version, 'origin', current_branch

    elif plexpy.FROZEN and common.PLATFORM == 'Darwin':
        plexpy.INSTALL_TYPE = 'macos'
        current_version, current_branch = get_version_from_file()
        return current_version, 'origin', current_branch

    elif os.path.isdir(os.path.join(plexpy.PROG_DIR, '.git')):
        plexpy.INSTALL_TYPE = 'git'
        output, err = runGit('rev-parse HEAD')

        if not output:
            logger.error('Could not find latest installed version.')
            cur_commit_hash = None
        else:
            cur_commit_hash = str(output)

        if not re.match('^[a-z0-9]+$', cur_commit_hash):
            logger.error('Output does not look like a hash, not using it.')
            cur_commit_hash = None

        if plexpy.CONFIG.DO_NOT_OVERRIDE_GIT_BRANCH and plexpy.CONFIG.GIT_BRANCH:
            remote_name = None
            branch_name = plexpy.CONFIG.GIT_BRANCH

        else:
            remote_branch, err = runGit(
                'rev-parse --abbrev-ref --symbolic-full-name @{u}')
            remote_branch = remote_branch.rsplit('/',
                                                 1) if remote_branch else []
            if len(remote_branch) == 2:
                remote_name, branch_name = remote_branch
            else:
                remote_name = branch_name = None

            if not remote_name and plexpy.CONFIG.GIT_REMOTE:
                logger.error(
                    'Could not retrieve remote name from git. Falling back to %s.'
                    % plexpy.CONFIG.GIT_REMOTE)
                remote_name = plexpy.CONFIG.GIT_REMOTE
            if not remote_name:
                logger.error(
                    'Could not retrieve remote name from git. Defaulting to origin.'
                )
                branch_name = 'origin'

            if not branch_name and plexpy.CONFIG.GIT_BRANCH:
                logger.error(
                    'Could not retrieve branch name from git. Falling back to %s.'
                    % plexpy.CONFIG.GIT_BRANCH)
                branch_name = plexpy.CONFIG.GIT_BRANCH
            if not branch_name:
                logger.error(
                    'Could not retrieve branch name from git. Defaulting to master.'
                )
                branch_name = 'master'

        return cur_commit_hash, remote_name, branch_name

    else:
        if plexpy.DOCKER:
            plexpy.INSTALL_TYPE = 'docker'
        elif plexpy.SNAP:
            plexpy.INSTALL_TYPE = 'snap'
        else:
            plexpy.INSTALL_TYPE = 'source'

        current_version, current_branch = get_version_from_file()
        return current_version, 'origin', current_branch
Пример #45
0
    def get_server_connections(self,
                               pms_identifier='',
                               pms_ip='',
                               pms_port=32400,
                               include_https=True):

        if not pms_identifier:
            logger.error(
                u"Tautulli PlexTV :: Unable to retrieve server connections: no pms_identifier provided."
            )
            return {}

        plextv_resources = self.get_plextv_resources(
            include_https=include_https, output_format='xml')
        try:
            xml_head = plextv_resources.getElementsByTagName('Device')
        except Exception as e:
            logger.warn(
                u"Tautulli PlexTV :: Unable to parse XML for get_server_urls: %s."
                % e)
            return {}

        # Function to get all connections for a device
        def get_connections(device):
            conn = []
            connections = device.getElementsByTagName('Connection')

            server = {
                'pms_identifier':
                helpers.get_xml_attr(device, 'clientIdentifier'),
                'pms_name':
                helpers.get_xml_attr(device, 'name'),
                'pms_version':
                helpers.get_xml_attr(device, 'productVersion'),
                'pms_platform':
                helpers.get_xml_attr(device, 'platform'),
                'pms_presence':
                helpers.get_xml_attr(device, 'presence'),
                'pms_is_cloud':
                1 if helpers.get_xml_attr(device, 'platform') == 'Cloud' else 0
            }

            for c in connections:
                server_details = {
                    'protocol': helpers.get_xml_attr(c, 'protocol'),
                    'address': helpers.get_xml_attr(c, 'address'),
                    'port': helpers.get_xml_attr(c, 'port'),
                    'uri': helpers.get_xml_attr(c, 'uri'),
                    'local': helpers.get_xml_attr(c, 'local')
                }
                conn.append(server_details)

            server['connections'] = conn
            return server

        server = {}

        # Try to match the device
        for a in xml_head:
            if helpers.get_xml_attr(a, 'clientIdentifier') == pms_identifier:
                server = get_connections(a)
                break

        # Else no device match found
        if not server:
            # Try to match the PMS_IP and PMS_PORT
            for a in xml_head:
                if helpers.get_xml_attr(a, 'provides') == 'server':
                    connections = a.getElementsByTagName('Connection')

                    for connection in connections:
                        if helpers.get_xml_attr(connection, 'address') == pms_ip and \
                                helpers.get_xml_attr(connection, 'port') == str(pms_port):
                            server = get_connections(a)
                            break

                    if server.get('connections'):
                        break

        return server
Пример #46
0
def update():
    if plexpy.PYTHON2:
        logger.warn('Tautulli is running using Python 2. Unable to update.')
        return

    if not plexpy.UPDATE_AVAILABLE:
        return

    if plexpy.INSTALL_TYPE in ('docker', 'snap', 'windows', 'macos'):
        return

    elif plexpy.INSTALL_TYPE == 'git':
        output, err = runGit('pull --ff-only {} {}'.format(
            plexpy.CONFIG.GIT_REMOTE, plexpy.CONFIG.GIT_BRANCH))

        if not output:
            logger.error('Unable to download latest version')
            return

        for line in output.split('\n'):
            if 'Already up-to-date.' in line or 'Already up to date.' in line:
                logger.info('No update available, not updating')
            elif line.endswith(('Aborting', 'Aborting.')):
                logger.error('Unable to update from git: ' + line)

    elif plexpy.INSTALL_TYPE == 'source':
        tar_download_url = 'https://github.com/{}/{}/tarball/{}'.format(
            plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO,
            plexpy.CONFIG.GIT_BRANCH)
        update_dir = os.path.join(plexpy.DATA_DIR, 'update')
        version_path = os.path.join(plexpy.PROG_DIR, 'version.txt')

        logger.info('Downloading update from: ' + tar_download_url)
        data = request.request_content(tar_download_url)

        if not data:
            logger.error(
                "Unable to retrieve new version from '%s', can't update",
                tar_download_url)
            return

        download_name = plexpy.CONFIG.GIT_BRANCH + '-github'
        tar_download_path = os.path.join(plexpy.DATA_DIR, download_name)

        # Save tar to disk
        with open(tar_download_path, 'wb') as f:
            f.write(data)

        # Extract the tar to update folder
        logger.info('Extracting file: ' + tar_download_path)
        tar = tarfile.open(tar_download_path)
        tar.extractall(update_dir)
        tar.close()

        # Delete the tar.gz
        logger.info('Deleting file: ' + tar_download_path)
        os.remove(tar_download_path)

        # Find update dir name
        update_dir_contents = [
            x for x in os.listdir(update_dir)
            if os.path.isdir(os.path.join(update_dir, x))
        ]
        if len(update_dir_contents) != 1:
            logger.error("Invalid update data, update failed: " +
                         str(update_dir_contents))
            return
        content_dir = os.path.join(update_dir, update_dir_contents[0])

        # walk temp folder and move files to main folder
        for dirname, dirnames, filenames in os.walk(content_dir):
            dirname = dirname[len(content_dir) + 1:]
            for curfile in filenames:
                old_path = os.path.join(content_dir, dirname, curfile)
                new_path = os.path.join(plexpy.PROG_DIR, dirname, curfile)

                if os.path.isfile(new_path):
                    os.remove(new_path)
                os.renames(old_path, new_path)

        # Update version.txt
        try:
            with open(version_path, 'w') as f:
                f.write(str(plexpy.LATEST_VERSION))
        except IOError as e:
            logger.error(
                "Unable to write current version to version.txt, update not complete: %s",
                e)
            return
Пример #47
0
                name = songs_json['name'] if (
                    'name' in songs_json) else songs_json['listname']
                addtags(location, song, name)
                print('\n')
        except Exception as e:
            logger.error('Download Error' + str(e))


if __name__ == '__main__':
    input_url = input('Enter the url:').strip()
    album_name = "songs"
    try:
        proxies, headers = setProxy()
        res = requests.get(input_url, proxies=proxies, headers=headers)
    except Exception as e:
        logger.error('Error accessing website error: ' + e)

    soup = BeautifulSoup(res.text, "lxml")

    try:
        getPlayListID = soup.select(".flip-layout")[0]["data-listid"]
        if getPlayListID is not None:
            print("Initiating PlayList Downloading")
            downloadSongs(getPlayList(getPlayListID))
            sys.exit()
    except Exception as e:
        print('...')
    try:
        getAlbumID = soup.select(".play")[0]["onclick"]
        getAlbumID = ast.literal_eval(
            re.search("\[(.*?)\]", getAlbumID).group())[1]
Пример #48
0
    def get_plex_downloads(self):
        logger.debug(u"Tautulli PlexTV :: Retrieving current server version.")

        pms_connect = pmsconnect.PmsConnect()
        pms_connect.set_server_version()

        update_channel = pms_connect.get_server_update_channel()

        logger.debug(u"Tautulli PlexTV :: Plex update channel is %s." %
                     update_channel)
        plex_downloads = self.get_plextv_downloads(
            plexpass=(update_channel == 'beta'))

        try:
            available_downloads = json.loads(plex_downloads)
        except Exception as e:
            logger.warn(
                u"Tautulli PlexTV :: Unable to load JSON for get_plex_updates."
            )
            return {}

        # Get the updates for the platform
        pms_platform = common.PMS_PLATFORM_NAME_OVERRIDES.get(
            plexpy.CONFIG.PMS_PLATFORM, plexpy.CONFIG.PMS_PLATFORM)
        platform_downloads = available_downloads.get('computer').get(pms_platform) or \
            available_downloads.get('nas').get(pms_platform)

        if not platform_downloads:
            logger.error(
                u"Tautulli PlexTV :: Unable to retrieve Plex updates: Could not match server platform: %s."
                % pms_platform)
            return {}

        v_old = helpers.cast_to_int("".join(
            v.zfill(4)
            for v in plexpy.CONFIG.PMS_VERSION.split('-')[0].split('.')[:4]))
        v_new = helpers.cast_to_int("".join(
            v.zfill(4) for v in platform_downloads.get('version', '').split(
                '-')[0].split('.')[:4]))

        if not v_old:
            logger.error(
                u"Tautulli PlexTV :: Unable to retrieve Plex updates: Invalid current server version: %s."
                % plexpy.CONFIG.PMS_VERSION)
            return {}
        if not v_new:
            logger.error(
                u"Tautulli PlexTV :: Unable to retrieve Plex updates: Invalid new server version: %s."
                % platform_downloads.get('version'))
            return {}

        # Get proper download
        releases = platform_downloads.get('releases', [{}])
        release = next(
            (r for r in releases
             if r['distro'] == plexpy.CONFIG.PMS_UPDATE_DISTRO
             and r['build'] == plexpy.CONFIG.PMS_UPDATE_DISTRO_BUILD),
            releases[0])

        download_info = {
            'update_available': v_new > v_old,
            'platform': platform_downloads.get('name'),
            'release_date': platform_downloads.get('release_date'),
            'version': platform_downloads.get('version'),
            'requirements': platform_downloads.get('requirements'),
            'extra_info': platform_downloads.get('extra_info'),
            'changelog_added': platform_downloads.get('items_added'),
            'changelog_fixed': platform_downloads.get('items_fixed'),
            'label': release.get('label'),
            'distro': release.get('distro'),
            'distro_build': release.get('build'),
            'download_url': release.get('url'),
        }

        return download_info
Пример #49
0
import time
import logger
logger(time.time(), logger.INFO)

# last commit would make it easy to understand

try:
    import pip.req.internal
except ImportError:
    import pip._internal

logger.info("pip version will set to latest")

# exceptions are added to the version parsing
text = parse_requirement("/home/vivonk/aws/parsing_config.txt")
logger.warn("parsing have following requirements\n%s" % text)
logger.info("info will pushed to jenkins")

from time import time

logger.error("time version was not set to latest path")
def import_from_plexivity(database=None, table_name=None, import_ignore_interval=0):

    try:
        connection = sqlite3.connect(database, timeout=20)
        connection.row_factory = sqlite3.Row
    except sqlite3.OperationalError:
        logger.error(u"PlexPy Importer :: Invalid filename.")
        return None
    except ValueError:
        logger.error(u"PlexPy Importer :: Invalid filename.")
        return None

    try:
        connection.execute('SELECT xml from %s' % table_name)
    except sqlite3.OperationalError:
        logger.error(u"PlexPy Importer :: Database specified does not contain the required fields.")
        return None

    logger.debug(u"PlexPy Importer :: Plexivity data import in progress...")

    logger.debug(u"PlexPy Importer :: Disabling monitoring while import in progress.")
    plexpy.schedule_job(activity_pinger.check_active_sessions, 'Check for active sessions',
                        hours=0, minutes=0, seconds=0)
    plexpy.schedule_job(activity_pinger.check_recently_added, 'Check for recently added items',
                        hours=0, minutes=0, seconds=0)
    plexpy.schedule_job(activity_pinger.check_server_response, 'Check for Plex remote access',
                        hours=0, minutes=0, seconds=0)

    ap = activity_processor.ActivityProcessor()
    user_data = users.Users()

    # Get the latest friends list so we can pull user id's
    try:
        plextv.refresh_users()
    except:
        logger.debug(u"PlexPy Importer :: Unable to refresh the users list. Aborting import.")
        return None

    query = 'SELECT id AS id, ' \
            'time AS started, ' \
            'stopped, ' \
            'null AS user_id, ' \
            'user, ' \
            'ip_address, ' \
            'paused_counter, ' \
            'platform AS player, ' \
            'null AS platform, ' \
            'null as machine_id, ' \
            'null AS media_type, ' \
            'null AS view_offset, ' \
            'xml, ' \
            'rating as content_rating,' \
            'summary,' \
            'title AS full_title,' \
            '(case when orig_title_ep = "n/a" then orig_title else ' \
            'orig_title_ep end) as title,' \
            '(case when orig_title_ep != "n/a" then orig_title else ' \
            'null end) as grandparent_title ' \
            'FROM ' + table_name + ' ORDER BY id'

    result = connection.execute(query)

    for row in result:
        # Extract the xml from the Plexivity db xml field.
        extracted_xml = extract_plexivity_xml(row['xml'])

        # If we get back None from our xml extractor skip over the record and log error.
        if not extracted_xml:
            logger.error(u"PlexPy Importer :: Skipping record with id %s due to malformed xml."
                         % str(row['id']))
            continue

        # Skip line if we don't have a ratingKey to work with
        #if not row['rating_key']:
        #    logger.error(u"PlexPy Importer :: Skipping record due to null ratingKey.")
        #    continue

        # If the user_id no longer exists in the friends list, pull it from the xml.
        if user_data.get_user_id(user=row['user']):
            user_id = user_data.get_user_id(user=row['user'])
        else:
            user_id = extracted_xml['user_id']

        session_history = {'started': arrow.get(row['started']).timestamp,
                           'stopped': arrow.get(row['stopped']).timestamp,
                           'rating_key': extracted_xml['rating_key'],
                           'title': row['title'],
                           'parent_title': extracted_xml['parent_title'],
                           'grandparent_title': row['grandparent_title'],
                           'full_title': row['full_title'],
                           'user_id': user_id,
                           'user': row['user'],
                           'ip_address': row['ip_address'] if row['ip_address'] else extracted_xml['ip_address'],
                           'paused_counter': row['paused_counter'],
                           'player': row['player'],
                           'platform': extracted_xml['platform'],
                           'machine_id': extracted_xml['machine_id'],
                           'parent_rating_key': extracted_xml['parent_rating_key'],
                           'grandparent_rating_key': extracted_xml['grandparent_rating_key'],
                           'media_type': extracted_xml['media_type'],
                           'view_offset': extracted_xml['view_offset'],
                           'video_decision': extracted_xml['video_decision'],
                           'audio_decision': extracted_xml['audio_decision'],
                           'transcode_decision': extracted_xml['transcode_decision'],
                           'duration': extracted_xml['duration'],
                           'width': extracted_xml['width'],
                           'height': extracted_xml['height'],
                           'container': extracted_xml['container'],
                           'video_codec': extracted_xml['video_codec'],
                           'audio_codec': extracted_xml['audio_codec'],
                           'bitrate': extracted_xml['bitrate'],
                           'video_resolution': extracted_xml['video_resolution'],
                           'video_framerate': extracted_xml['video_framerate'],
                           'aspect_ratio': extracted_xml['aspect_ratio'],
                           'audio_channels': extracted_xml['audio_channels'],
                           'transcode_protocol': extracted_xml['transcode_protocol'],
                           'transcode_container': extracted_xml['transcode_container'],
                           'transcode_video_codec': extracted_xml['transcode_video_codec'],
                           'transcode_audio_codec': extracted_xml['transcode_audio_codec'],
                           'transcode_audio_channels': extracted_xml['transcode_audio_channels'],
                           'transcode_width': extracted_xml['transcode_width'],
                           'transcode_height': extracted_xml['transcode_height']
                           }

        session_history_metadata = {'rating_key': extracted_xml['rating_key'],
                                    'parent_rating_key': extracted_xml['parent_rating_key'],
                                    'grandparent_rating_key': extracted_xml['grandparent_rating_key'],
                                    'title': row['title'],
                                    'parent_title': extracted_xml['parent_title'],
                                    'grandparent_title': row['grandparent_title'],
                                    'media_index': extracted_xml['media_index'],
                                    'parent_media_index': extracted_xml['parent_media_index'],
                                    'thumb': extracted_xml['thumb'],
                                    'parent_thumb': extracted_xml['parent_thumb'],
                                    'grandparent_thumb': extracted_xml['grandparent_thumb'],
                                    'art': extracted_xml['art'],
                                    'media_type': extracted_xml['media_type'],
                                    'year': extracted_xml['year'],
                                    'originally_available_at': extracted_xml['originally_available_at'],
                                    'added_at': extracted_xml['added_at'],
                                    'updated_at': extracted_xml['updated_at'],
                                    'last_viewed_at': extracted_xml['last_viewed_at'],
                                    'content_rating': row['content_rating'],
                                    'summary': row['summary'],
                                    'tagline': extracted_xml['tagline'],
                                    'rating': extracted_xml['rating'],
                                    'duration': extracted_xml['duration'],
                                    'guid': extracted_xml['guid'],
                                    'section_id': extracted_xml['section_id'],
                                    'directors': extracted_xml['directors'],
                                    'writers': extracted_xml['writers'],
                                    'actors': extracted_xml['actors'],
                                    'genres': extracted_xml['genres'],
                                    'studio': extracted_xml['studio'],
                                    'labels': extracted_xml['labels'],
                                    'full_title': row['full_title'],
                                    'width': extracted_xml['width'],
                                    'height': extracted_xml['height'],
                                    'container': extracted_xml['container'],
                                    'video_codec': extracted_xml['video_codec'],
                                    'audio_codec': extracted_xml['audio_codec'],
                                    'bitrate': extracted_xml['bitrate'],
                                    'video_resolution': extracted_xml['video_resolution'],
                                    'video_framerate': extracted_xml['video_framerate'],
                                    'aspect_ratio': extracted_xml['aspect_ratio'],
                                    'audio_channels': extracted_xml['audio_channels']
                                    }

        # On older versions of PMS, "clip" items were still classified as "movie" and had bad ratingKey values
        # Just make sure that the ratingKey is indeed an integer
        if session_history_metadata['rating_key'].isdigit():
            ap.write_session_history(session=session_history,
                                     import_metadata=session_history_metadata,
                                     is_import=True,
                                     import_ignore_interval=import_ignore_interval)
        else:
            logger.debug(u"PlexPy Importer :: Item has bad rating_key: %s" % session_history_metadata['rating_key'])

    logger.debug(u"PlexPy Importer :: Plexivity data import complete.")
    import_users()

    logger.debug(u"PlexPy Importer :: Re-enabling monitoring.")
    plexpy.initialize_scheduler()
Пример #51
0
    def updateVehicle(self, shareJob):
        publicAccount = self.getAccount(shareJob)
        if len(publicAccount) == 0:
            return errorcode.AUTH_ERROR, errormsg.LOGIN_FAIL

        externalSpec = shareJob.get("external_vehicle_spec", None)
        if externalSpec is None:
            logger.error("external spec missing")
            return errorcode.SPEC_ERROR, errormsg.EXTERNAL_SPEC_EMPTY

        vehicle = shareJob.get("vehicle", None)
        if vehicle is None:
            logger.error("vehicle missing")
            return errorcode.DATA_ERROR, errormsg.VEHICLE_EMPTY

        price =vehicle.get('price', None)
        if price is None:
            logger.error('price missing')
            return errorcode.DATA_ERROR, errormsg.PRICE_EMPTY

        user = vehicle.get("user", None)
        if user is None:
            logger.error("user missing")
            return errorcode.DATA_ERROR, errormsg.USER_EMPTY

        '''
        address = user.get("address", None)
        if address is None:
            logger.error("address missing")
            return errorcode.DATA_ERROR, errormsg.ADDRESS_EMPTY
        '''
        merchant = vehicle.get('merchant', None)
        address = merchant.get('address', None)

        url = shareJob.get("url", None)
        if url is None:
            logger.error("url missing")
            return errorcode.DATA_ERROR, errormsg.VEHICLE_URL_EMPTY

        ##组合发车字段
        carinfo = {}
        #车辆唯一标识码(发车时为0): "carid":0,
        pRule = r'\d{7,}'
        carid = re.compile(pRule).findall(url)
        if len(carid) == 0:
            return errorcode.DATA_ERROR, errormsg.VEHICLE_URL_EMPTY
        carinfo['carid'] = int(carid[0])

        carinfo['view'] = 0

        #品牌id: "brandid": 33,
        brandid = externalSpec['brand']['id']
        carinfo['brandid'] = int(brandid)

        #车系id: "seriesid": 2951,
        seriesid = externalSpec['series']['id']
        carinfo['seriesid'] = int(seriesid)

        #车型id: "productid": 14411,
        productid = externalSpec['model']['id']
        carinfo['productid'] = int(productid)

        #车辆自定义名称: "carname":""
        carname = externalSpec['series']['name'] + ' ' + externalSpec['model']['name']
        carinfo['carname'] = urllib.quote(urllib.quote(carname.encode('utf-8')))

        #TODO:没有排量接口,有车型id会自动识别;没有车型id就需要手填;
        #排量,单位:L,数值字符串,如:1.2: "displacement":"",
        carinfo['displacement'] = "1.4"

        #变速箱(手动、自动): "gearbox":"",
        spec_details = shareJob['vehicle_spec_detail']['details']
        gearbox = "手动"
        if spec_details[42].count(u"自") > 0 :
            gearbox = "自动"
        carinfo['gearbox'] = urllib.quote(gearbox.encode('utf-8'))

        #是否包含过户费用:"isincludetransferfee":true,
        quoted_price_include_transfer_fee = price.get('quoted_price_include_transfer_fee', True)
        carinfo['isincludetransferfee'] = quoted_price_include_transfer_fee

        #预售价格(单位: 万元):"bookprice":15.5,
        bookprice = 0.0
        price = vehicle.get("price", None)
        if price is not None:
            quotedPrice = price.get("quoted_price", None)
            if quotedPrice is not None:
                bookprice = int(quotedPrice)*1.0/10000
        carinfo['bookprice'] = bookprice

        #是否一口价: "isfixprice":false,
        carinfo['isfixprice'] = False

        #省id: "provinceid":1,
        provinceid = 1
        provinceid = address.get("province_code", None)
        if provinceid is None:
            return errorcode.DATA_ERROR, errormsg.PROVINCE_EMPTY
        carinfo['provinceid'] = int(provinceid)

        #市id "cityid":1,
        cityid = 1
        cityid = address.get("city_code", None)
        if cityid is None:
            return errorcode.DATA_ERROR, errormsg.CITY_EMPTY
        carinfo['cityid'] = int(cityid)

        #行驶里程(单位: 万公里): "drivemileage":3.5,
        drivemileage = 0.1
        summary = vehicle.get("summary",None)
        if summary is None:
            return errorcode.DATA_ERROR, errormsg.VEHICLE_SUMMARY_EMPTY
        drivemileage = summary.get("mileage",None)
        if drivemileage is None:
            return errorcode.DATA_ERROR, errormsg.VEHICLE_SUMMARY_EMPTY
        carinfo['drivemileage'] = int(drivemileage)*1.0/10000

        #车辆用途id: "purposeid":1,
        purpose = summary.get("purpose", None)
        carinfo['purposeid'] = int(self.getPurposeID(purpose))

        #车辆颜色id: "colorid":0,
        color = summary.get("color", None)
        carinfo['colorid'] = int(self.getColorID(color))

        #首次上牌时间: "firstregtime":"2012-1",
        carinfo['firstregtime'] = str(self.getDate("registration_date", shareJob).year) + "-" + str(self.getDate("registration_date", shareJob).month)

        #车辆年审时间: "verifytime":"2012-2",
        carinfo['verifytime'] = str(self.getDate("inspection_date", shareJob).year) + "-" + str(self.getDate("inspection_date", shareJob).month)

        #车船使用税有效时间: "veticaltaxtime":"2012-3",
        carinfo['veticaltaxtime'] = carinfo['verifytime']

        #交强险日期: "insurancedate":"2012-4"
        carinfo['insurancedate'] = carinfo['firstregtime']
        '''
        vehicle_date = vehicle.get("vehicle_date", None)
        if vehicle_date is not None:
            compulsory_insurance_expire_date = vehicle_date.get("compulsory_insurance_expire_date", None)
            if compulsory_insurance_expire_date is not None:
                carinfo['insurancedate'] = compulsory_insurance_expire_date
        '''
        QualityAssDate = 0
        QualityAssMile = 0
        MerchantSubstituteConfig = shareJob.get('merchant_substitute_config', None)
        if MerchantSubstituteConfig is not None:
            merchant_summary = MerchantSubstituteConfig.get('summary', None)
            if merchant_summary is not None:
                QualityAssDate = MerchantSubstituteConfig.get('quality_assurance_time', 6)
                QualityAssMile = MerchantSubstituteConfig.get('quality_assurance_mile', 20000.0)
        #延长质保日期: "qualityassdate": 0 ;月,
        carinfo['qualityassdate'] = int(QualityAssDate)
        #延长质保里程: "qualityassmile ":0;万公里,
        carinfo['qualityassmile'] = QualityAssMile/10000.0


        #TODO:注意:vin码和行驶证照片链接同时有或者同时无 -> 行驶证照片用vin照片替换
        #车源行驶证的图片,1张: "driverlicenseimage":" http://www.autoimg.cn/2scimg/2013/8/19/u_505895724323298.jpg”,
        # summary = externalSpec.get("summary", None)
        # drivingLicenseUrl = summary.get('drivingLicensePicture', None)
        vin_picture_url = vehicle.get("summary").get("vin_picture", None)

        #"vincode": "aaaacxadfadf11334"
        vincode = vehicle.get("vin", None)
        print "vin_picture:", vin_picture_url
        print 'vin:', vincode
        if (vin_picture_url is None) or (vincode is None):
            carinfo['vincode'] = ""
            carinfo['driverlicenseimage'] = ""
        else:
            carinfo['vincode'] = str(vincode)
            carinfo['driverlicenseimage'] = self.uploadLicensePic(vin_picture_url, publicAccount)


        #图片地址, 以英文逗号分割: http://www.autoimg.cn/2scimg/2013/8/19/u_50589572323423174598.jpg ",
        #方法一: 上传图片流


        #方法二:上传地址[待改正,先要上传图片]
        '''
        #存放图片及地址信息session.py
        imgurls = self.getUrlList(vehicle, publicAccount)
        if imgurls is None:
            return errorcode.DATA_ERROR, u"上传图片url出错,请重试"
        carinfo['imgurls'] = imgurls
        #carinfo['imgurls'] = "http://www.autoimg.cn/2scimg/2013/8/19/u_505895724323298.jpg,http://www.autoimg.cn/2scimg/2013/8/19/u_50589572323423174598.jpg "
        '''
        gallery = vehicle.get("gallery", None)
        if gallery is None:
            logger.error("gallery missing")
            return errorcode.DATA_ERROR, errormsg.PHOTO_NOT_ENOUGH
        photoList = self.uploadPics(gallery.get("photos", []), publicAccount)
        carinfo['imgurls'] = photoList

        #商家附言: "usercomment":"测试的数据"
        Symbol="\r\n"
        lateral="——"*23
        carinfo['usercomment'] = urllib.quote(self.getContentVal(shareJob, Symbol, lateral).encode('utf-8'))

        #销售人员实体类: salesperson
        salesperson = self.getSiteContact(publicAccount)
        if salesperson is None:
            return errorcode.SPEC_ERROR, errormsg.SALEID_EMPTY
        carinfo['salesperson'] = salesperson

        carinfoJson = json.dumps(carinfo)
        logger.debug("carinfoJson:" + str(carinfoJson))

        #form = {"_appid": publicAccount["_appid"], "dealerid": publicAccount["dealerid"], "key": publicAccount["key"], "carinfo": carinfo}
        form = "_appid="+publicAccount["_appid"]+"&dealerid=%d"%(publicAccount["dealerid"])+"&key="+publicAccount["key"]+"&carinfo="+str(carinfoJson)

        #开始改价
        (success, msg) = self.postUpdateVehicle(form, publicAccount)
        if success:
            logger.debug("post success for che168public")
            return errorcode.SUCCESS, msg
        return errorcode.SITE_ERROR, msg
Пример #52
0
def error(update, context):
    """Log Errors caused by Updates."""
    logger.error('Update "%s" caused error "%s"', update, context.error)
Пример #53
0
def run():
    """
    Scan given directory for new files.
    """
    while True:
        try:
            active = pacvert.thequeue.getActive()
            current = pacvert.thequeue.getPending()
            if (active == None) and (current != None):
                pacvert.thequeue.addActive(current)
                active = current

                try:
                    # setting up codec specific settings
                    video = {
                        'codec': pacvert.CONFIG.DEFAULT_CODEC_VIDEO
                    }  # set the targets codec
                    if pacvert.CONFIG.DEFAULT_CODEC_VIDEO_CROP:  # check if cropping is enabled
                        video['width'] = active.crop[0]  # set width
                        video['height'] = active.crop[1]  # set height
                        video['mode'] = 'crop'  # set crop mode

                    if pacvert.CONFIG.DEFAULT_CODEC_VIDEO == "h264":  # if target codec is h264
                        video[
                            'preset'] = pacvert.CONFIG.CODEC_AVC_PRESET  # set preset
                        video[
                            'profile'] = pacvert.CONFIG.CODEC_AVC_PROFILE  # set profile
                        video[
                            'quality'] = pacvert.CONFIG.CODEC_AVC_QUALITY  # set quality
                        video[
                            'tune'] = pacvert.CONFIG.CODEC_AVC_TUNE  # set tune
                        if pacvert.CONFIG.CODEC_AVC_AUTOMAXRATE:  # if automatic maxrate is enabled
                            if pacvert.CONFIG.CODEC_AVC_BUFSIZE < 0 or pacvert.CONFIG.CODEC_H264_MAXRATE < 0:
                                if 'bit_rate' in active.mediainfo['Video']:
                                    video['maxrate'] = cast_to_int(
                                        active.mediainfo['Video']['bit_rate']
                                    )  # set maxrate to video track bitrate
                                    video['bufsize'] = cast_to_int(
                                        active.mediainfo['Video']['bit_rate'] *
                                        3
                                    )  # set bufsize to three times the video bitrate
                            else:
                                video[
                                    'maxrate'] = pacvert.CONFIG.CODEC_AVC_MAXRATE  # set maxrate to given value
                                video[
                                    'bufsize'] = pacvert.CONFIG.CODEC_AVC_BUFSIZE  # set bufsize to given value
                        for anotheropt in pacvert.CONFIG.CODEC_AVC_ADDITIONALOPT:  # if additional options are specified
                            video[anotheropt] = pacvert.CONFIG.CODEC_AVC_ADDITIONALOPT[
                                anotheropt]  # add options to out encoding list
                    elif pacvert.CONFIG.DEFAULT_CODEC_VIDEO == "hevc":  # if target codec is hevc
                        video[
                            'preset'] = pacvert.CONFIG.CODEC_HEVC_PRESET  # set preset
                        video[
                            'quality'] = pacvert.CONFIG.CODEC_HEVC_QUALITY  # set quality
                        video[
                            'tune'] = pacvert.CONFIG.CODEC_HEVC_TUNE  # set tune
                        if pacvert.CONFIG.CODEC_HEVC_AUTOMAXRATE:  # set max rate
                            if pacvert.CONFIG.CODEC_HEVC_BUFSIZE < 0 or pacvert.CONFIG.CODEC_HEVC_MAXRATE < 0:
                                if 'bit_rate' in active.mediainfo['Video']:
                                    video['maxrate'] = cast_to_int(
                                        active.mediainfo['Video']['bit_rate']
                                    )  # set maxrate to video track bitrate
                                    video['bufsize'] = cast_to_int(
                                        active.mediainfo['Video']['bit_rate'] *
                                        3
                                    )  # set bufsize to three times the video bitrate
                            else:
                                video[
                                    'maxrate'] = pacvert.CONFIG.CODEC_HEVC_MAXRATE  # set maxrate to given value
                                video[
                                    'bufsize'] = pacvert.CONFIG.CODEC_HEVC_BUFSIZE  # set bufsize to given value
                        for anotheropt in pacvert.CONFIG.CODEC_HEVC_ADDITIONALOPT:  # if additional options are specified
                            video[anotheropt] = pacvert.CONFIG.CODEC_HEVC_ADDITIONALOPT[
                                anotheropt]  # add options to out encoding list
                    elif pacvert.CONFIG.DEFAULT_CODEC_VIDEO == "vp8":  # if target codec is vp8
                        video[
                            'quality'] = pacvert.CONFIG.CODEC_VP8_QUALITY  # set video quality
                        video[
                            'threads'] = pacvert.CONFIG.CODEC_VP8_THREADS  # set no of real cores
                    else:
                        logger.error("Codec not yet implemented")

                    conv = c.convert(
                        active.fullpath, active.outputfilename, {
                            'format': 'mkv',
                            'video': video,
                            'audio': {
                                'codec': pacvert.CONFIG.DEFAULT_CODEC_AUDIO,
                            },
                            'subtitle': {
                                'codec': pacvert.CONFIG.DEFAULT_CODEC_SUBTITLE,
                            },
                            'map': 0,
                        })
                    for timecode in conv:
                        logger.debug("Converting (" + str(timecode) + ")...")
                        active.progress = timecode
                    logger.info("Finished File: '" + active.fullpath + "'")
                    active.finished = now()
                    pacvert.thequeue.addFinished(
                        pacvert.thequeue.getActive())  # set status to finished
                except FFMpegConvertError as e:
                    logger.error("ffmpeg: " + e.message + " with command: " +
                                 e.cmd)

                    pacvert.thequeue.addFailed(
                        pacvert.thequeue.getActive())  # set status to failed
                time.sleep(1)
        except Exception as e:
            logger.error(e)
Пример #54
0
def import_tautulli_db(database=None, method=None, backup=False):
    if IS_IMPORTING:
        logger.warn(
            "Tautulli Database :: Another Tautulli database is currently being imported. "
            "Please wait until it is complete before importing another database."
        )
        return False

    db_validate = validate_database(database=database)
    if not db_validate == 'success':
        logger.error(
            "Tautulli Database :: Failed to import Tautulli database: %s",
            db_validate)
        return False

    if method not in ('merge', 'overwrite'):
        logger.error(
            "Tautulli Database :: Failed to import Tautulli database: invalid import method '%s'",
            method)
        return False

    if backup:
        # Make a backup of the current database first
        logger.info(
            "Tautulli Database :: Creating a database backup before importing."
        )
        if not make_backup():
            logger.error(
                "Tautulli Database :: Failed to import Tautulli database: failed to create database backup"
            )
            return False

    logger.info(
        "Tautulli Database :: Importing Tautulli database '%s' with import method '%s'...",
        database, method)
    set_is_importing(True)

    db = MonitorDatabase()
    db.connection.execute('BEGIN IMMEDIATE')
    db.connection.execute('ATTACH ? AS import_db', [database])

    # Get the current number of used ids in the session_history table
    session_history_seq = db.select_single(
        'SELECT seq FROM sqlite_sequence WHERE name = "session_history"')
    session_history_rows = session_history_seq.get('seq', 0)

    session_history_tables = ('session_history', 'session_history_metadata',
                              'session_history_media_info')

    if method == 'merge':
        logger.info(
            "Tautulli Database :: Creating temporary database tables to re-index grouped session history."
        )
        for table_name in session_history_tables:
            db.action(
                'CREATE TABLE {table}_copy AS SELECT * FROM import_db.{table}'.
                format(table=table_name))
            db.action(
                'UPDATE {table}_copy SET id = id + ?'.format(table=table_name),
                [session_history_rows])
            if table_name == 'session_history':
                db.action(
                    'UPDATE {table}_copy SET reference_id = reference_id + ?'.
                    format(table=table_name), [session_history_rows])

    # Keep track of all table columns so that duplicates can be removed after importing
    table_columns = {}

    tables = db.select('SELECT name FROM import_db.sqlite_master '
                       'WHERE type = "table" AND name NOT LIKE "sqlite_%"'
                       'ORDER BY name')
    for table in tables:
        table_name = table['name']
        if table_name == 'sessions':
            # Skip temporary sessions table
            continue

        current_table = db.select(
            'PRAGMA main.table_info({table})'.format(table=table_name))
        if not current_table:
            # Skip table does not exits
            continue

        logger.info("Tautulli Database :: Importing database table '%s'.",
                    table_name)

        if method == 'overwrite':
            # Clear the table and reset the autoincrement ids
            db.action('DELETE FROM {table}'.format(table=table_name))
            db.action('DELETE FROM sqlite_sequence WHERE name = ?',
                      [table_name])

        if method == 'merge' and table_name in session_history_tables:
            from_db_name = 'main'
            from_table_name = table_name + '_copy'
        else:
            from_db_name = 'import_db'
            from_table_name = table_name

        # Get the list of columns to import
        current_columns = [c['name'] for c in current_table]
        import_table = db.select(
            'PRAGMA {from_db}.table_info({from_table})'.format(
                from_db=from_db_name, from_table=from_table_name))

        if method == 'merge' and table_name not in session_history_tables:
            import_columns = [
                c['name'] for c in import_table
                if c['name'] in current_columns and not c['pk']
            ]
        else:
            import_columns = [
                c['name'] for c in import_table if c['name'] in current_columns
            ]

        table_columns[table_name] = import_columns
        insert_columns = ', '.join(import_columns)

        # Insert the data with ignore instead of replace to be safe
        db.action('INSERT OR IGNORE INTO {table} ({columns}) '
                  'SELECT {columns} FROM {from_db}.{from_table}'.format(
                      table=table_name,
                      columns=insert_columns,
                      from_db=from_db_name,
                      from_table=from_table_name))

    db.connection.execute('DETACH import_db')

    if method == 'merge':
        for table_name, columns in sorted(table_columns.items()):
            duplicate_columns = ', '.join(
                [c for c in columns if c not in ('id', 'reference_id')])
            logger.info(
                "Tautulli Database :: Removing duplicate rows from database table '%s'.",
                table_name)
            if table_name in session_history_tables[1:]:
                db.action('DELETE FROM {table} WHERE id NOT IN '
                          '(SELECT id FROM session_history)'.format(
                              table=table_name))
            else:
                db.action(
                    'DELETE FROM {table} WHERE id NOT IN '
                    '(SELECT MIN(id) FROM {table} GROUP BY {columns})'.format(
                        table=table_name, columns=duplicate_columns))

        logger.info("Tautulli Database :: Deleting temporary database tables.")
        for table_name in session_history_tables:
            db.action('DROP TABLE {table}_copy'.format(table=table_name))

    vacuum()

    logger.info("Tautulli Database :: Tautulli database import complete.")
    set_is_importing(False)
Пример #55
0
    def build_text(self):
        from plexpy.notification_handler import CustomFormatter
        custom_formatter = CustomFormatter()

        try:
            subject = custom_formatter.format(str(self.subject), **self.parameters)
        except LookupError as e:
            logger.error("Tautulli Newsletter :: Unable to parse parameter %s in newsletter subject. Using fallback." % e)
            subject = str(self._DEFAULT_SUBJECT).format(**self.parameters)
        except Exception as e:
            logger.error("Tautulli Newsletter :: Unable to parse custom newsletter subject: %s. Using fallback." % e)
            subject = str(self._DEFAULT_SUBJECT).format(**self.parameters)

        try:
            body = custom_formatter.format(str(self.body), **self.parameters)
        except LookupError as e:
            logger.error("Tautulli Newsletter :: Unable to parse parameter %s in newsletter body. Using fallback." % e)
            body = str(self._DEFAULT_BODY).format(**self.parameters)
        except Exception as e:
            logger.error("Tautulli Newsletter :: Unable to parse custom newsletter body: %s. Using fallback." % e)
            body = str(self._DEFAULT_BODY).format(**self.parameters)

        try:
            message = custom_formatter.format(str(self.message), **self.parameters)
        except LookupError as e:
            logger.error("Tautulli Newsletter :: Unable to parse parameter %s in newsletter message. Using fallback." % e)
            message = str(self._DEFAULT_MESSAGE).format(**self.parameters)
        except Exception as e:
            logger.error("Tautulli Newsletter :: Unable to parse custom newsletter message: %s. Using fallback." % e)
            message = str(self._DEFAULT_MESSAGE).format(**self.parameters)

        return subject, body, message
Пример #56
0
    def create_isl(self):
        path = self.payload['path']
        latency = int(self.payload['latency_ns'])
        a_switch = path[0]['switch_id']
        a_port = int(path[0]['port_no'])
        b_switch = path[1]['switch_id']
        b_port = int(path[1]['port_no'])
        speed = int(self.payload['speed'])
        available_bandwidth = int(self.payload['available_bandwidth'])

        a_switch_node = graph.find_one('switch',
                                       property_key='name',
                                       property_value='{}'.format(a_switch))
        if not a_switch_node:
            logger.error('Isl source was not found: %s', a_switch_node)
            return False

        b_switch_node = graph.find_one('switch',
                                       property_key='name',
                                       property_value='{}'.format(b_switch))
        if not b_switch_node:
            logger.error('Isl destination was not found: %s', b_switch_node)
            return False

        with guard.get_isl_lock(a_switch, a_port):
            try:
                isl_exists_query = ("MATCH (a:switch)-[r:isl {{"
                                    "src_switch: '{}', "
                                    "src_port: {}, "
                                    "dst_switch: '{}', "
                                    "dst_port: {}}}]->(b:switch) return r")
                isl_exists = graph.run(
                    isl_exists_query.format(a_switch, a_port, b_switch,
                                            b_port)).data()

                if not isl_exists:
                    logger.info('Isl %s_%d creation request: timestamp=%s',
                                a_switch, a_port, self.timestamp)

                    isl_query = ("MATCH (u:switch {{name:'{}'}}), "
                                 "(r:switch {{name:'{}'}}) "
                                 "MERGE (u)-[:isl {{"
                                 "src_port: {}, "
                                 "dst_port: {}, "
                                 "src_switch: '{}', "
                                 "dst_switch: '{}', "
                                 "latency: {}, "
                                 "speed: {}, "
                                 "status: 'active', "
                                 "available_bandwidth: {}}}]->(r)")
                    graph.run(
                        isl_query.format(a_switch_node['name'],
                                         b_switch_node['name'], a_port, b_port,
                                         a_switch, b_switch, latency, speed,
                                         available_bandwidth))

                    logger.info('ISL between %s and %s created',
                                a_switch_node['name'], b_switch_node['name'])
                else:
                    logger.debug('Isl %s_%d update request: timestamp=%s',
                                 a_switch, a_port, self.timestamp)

                    isl_update_query = ("MATCH (a:switch)-[r:isl {{"
                                        "src_switch: '{}', "
                                        "src_port: {}, "
                                        "dst_switch: '{}', "
                                        "dst_port: {}}}]->(b:switch) "
                                        "set r.latency = {} return r")
                    graph.run(
                        isl_update_query.format(a_switch, a_port, b_switch,
                                                b_port, latency)).data()

                    logger.debug('ISL between %s and %s updated',
                                 a_switch_node['name'], b_switch_node['name'])

            except Exception as e:
                logger.exception('ISL between %s and %s creation error: %s',
                                 a_switch_node['name'], b_switch_node['name'],
                                 e.message)

        return True
Пример #57
0
 def start(self):
     logger.info("Launching Windows system tray icon.")
     try:
         self.tray_icon.start()
     except Exception as e:
         logger.error("Unable to launch system tray icon: %s." % e)
Пример #58
0
                try:
                    dbClient = Framework.createClient(protocol)
                    sid = getDbSid(dbClient, hostname)
                    if sid is None:
                        continue
                    if ((sid in sidList) != 0):
                        logger.debug(
                            str('Database : ' + sid + ' already reported.'))
                        continue
                    databaseServer = createDatabaseOSH(
                        hostOSH, dbClient, sid, dbClient.getDbVersion(),
                        dbClient.getAppVersion())
                    OSHVResult.add(databaseServer)
                    sidList.append(sid)
                except SQLException, sqlex:
                    logger.debug(sqlex.getMessage())
                except:
                    msg = logger.prepareFullStackTrace('')
                    errormessages.resolveAndReport(
                        msg, ClientsConsts.SQL_PROTOCOL_NAME, Framework)
            finally:
                if dbClient != None:
                    dbClient.close()
        else:
            logger.debug('Protocol ', protocol, ' is of different type than ',
                         protocolType)
    if OSHVResult.size() == 0:
        Framework.reportWarning('Failed to connect using all protocols')
        logger.error('Failed to connect using all protocols')
    return OSHVResult
Пример #59
0
def set_startup():
    if plexpy.WIN_SYS_TRAY_ICON:
        plexpy.WIN_SYS_TRAY_ICON.change_tray_icons()

    startup_reg_path = "Software\\Microsoft\\Windows\\CurrentVersion\\Run"

    exe = sys.executable
    run_args = [arg for arg in plexpy.ARGS if arg != '--nolaunch']
    if plexpy.FROZEN:
        args = [exe] + run_args
    else:
        args = [exe, plexpy.FULL_PATH] + run_args

    registry_key_name = '{}_{}'.format(common.PRODUCT, plexpy.CONFIG.PMS_UUID)

    cmd = ' '.join(cmd_quote(arg)
                   for arg in args).replace('python.exe',
                                            'pythonw.exe').replace("'", '"')

    if plexpy.CONFIG.LAUNCH_STARTUP:
        # Rename old Tautulli registry key
        try:
            registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
                                          startup_reg_path, 0,
                                          winreg.KEY_ALL_ACCESS)
            winreg.QueryValueEx(registry_key, common.PRODUCT)
            reg_value_exists = True
        except WindowsError:
            reg_value_exists = False

        if reg_value_exists:
            try:
                winreg.DeleteValue(registry_key, common.PRODUCT)
                winreg.CloseKey(registry_key)
            except WindowsError:
                pass

        try:
            winreg.CreateKey(winreg.HKEY_CURRENT_USER, startup_reg_path)
            registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
                                          startup_reg_path, 0,
                                          winreg.KEY_WRITE)
            winreg.SetValueEx(registry_key, registry_key_name, 0,
                              winreg.REG_SZ, cmd)
            winreg.CloseKey(registry_key)
            logger.info(
                "Added Tautulli to Windows system startup registry key.")
            return True
        except WindowsError as e:
            logger.error(
                "Failed to create Windows system startup registry key: %s", e)
            return False

    else:
        # Check if registry value exists
        try:
            registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
                                          startup_reg_path, 0,
                                          winreg.KEY_ALL_ACCESS)
            winreg.QueryValueEx(registry_key, registry_key_name)
            reg_value_exists = True
        except WindowsError:
            reg_value_exists = False

        if reg_value_exists:
            try:
                winreg.DeleteValue(registry_key, registry_key_name)
                winreg.CloseKey(registry_key)
                logger.info(
                    "Removed Tautulli from Windows system startup registry key."
                )
                return True
            except WindowsError as e:
                logger.error(
                    "Failed to delete Windows system startup registry key: %s",
                    e)
                return False
Пример #60
0
def set_newsletter_config(newsletter_id=None, agent_id=None, **kwargs):
    if str(agent_id).isdigit():
        agent_id = int(agent_id)
    else:
        logger.error("Tautulli Newsletters :: Unable to set existing newsletter: invalid agent_id %s."
                     % agent_id)
        return False

    agent = next((a for a in available_newsletter_agents() if a['id'] == agent_id), None)

    if not agent:
        logger.error("Tautulli Newsletters :: Unable to retrieve existing newsletter agent: invalid agent_id %s."
                     % agent_id)
        return False

    config_prefix = 'newsletter_config_'
    email_config_prefix = 'newsletter_email_'

    newsletter_config = {k[len(config_prefix):]: kwargs.pop(k)
                         for k in list(kwargs.keys()) if k.startswith(config_prefix)}
    email_config = {k[len(email_config_prefix):]: kwargs.pop(k)
                    for k in list(kwargs.keys()) if k.startswith(email_config_prefix)}

    for cfg, val in email_config.items():
        # Check for a password config keys and a blank password from the HTML form
        if 'password' in cfg and val == '    ':
            # Get the previous password so we don't overwrite it with a blank value
            old_newsletter_config = get_newsletter_config(newsletter_id=newsletter_id)
            email_config[cfg] = old_newsletter_config['email_config'][cfg]

    subject = kwargs.pop('subject')
    body = kwargs.pop('body')
    message = kwargs.pop('message')

    agent_class = get_agent_class(agent_id=agent['id'],
                                  config=newsletter_config, email_config=email_config,
                                  subject=subject, body=body, message=message)

    keys = {'id': newsletter_id}
    values = {'agent_id': agent['id'],
              'agent_name': agent['name'],
              'agent_label': agent['label'],
              'id_name': kwargs.get('id_name', ''),
              'friendly_name': kwargs.get('friendly_name', ''),
              'newsletter_config': json.dumps(agent_class.config),
              'email_config': json.dumps(agent_class.email_config),
              'subject': agent_class.subject,
              'body': agent_class.body,
              'message': agent_class.message,
              'cron': kwargs.get('cron'),
              'active': kwargs.get('active')
              }

    db = database.MonitorDatabase()
    try:
        db.upsert(table_name='newsletters', key_dict=keys, value_dict=values)
        logger.info("Tautulli Newsletters :: Updated newsletter agent: %s (newsletter_id %s)."
                    % (agent['label'], newsletter_id))
        newsletter_handler.schedule_newsletters(newsletter_id=newsletter_id)
        blacklist_logger()
        return True
    except Exception as e:
        logger.warn("Tautulli Newsletters :: Unable to update newsletter agent: %s." % e)
        return False