示例#1
0
    def get_user_stats(self, section_id=None):
        if not session.allow_session_library(section_id):
            return []

        monitor_db = database.MonitorDatabase()

        user_stats = []

        try:
            if str(section_id).isdigit():
                query = 'SELECT (CASE WHEN users.friendly_name IS NULL THEN users.username ' \
                        'ELSE users.friendly_name END) AS friendly_name, users.user_id, users.thumb, COUNT(user) AS user_count ' \
                        'FROM session_history ' \
                        'JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
                        'JOIN users ON users.user_id = session_history.user_id ' \
                        'WHERE section_id = ? ' \
                        'GROUP BY users.user_id ' \
                        'ORDER BY user_count DESC'
                result = monitor_db.select(query, args=[section_id])
            else:
                result = []
        except Exception as e:
            logger.warn(u"PlexPy Libraries :: Unable to execute database query for get_user_stats: %s." % e)
            result = []
        
        for item in result:
            row = {'friendly_name': item['friendly_name'],
                   'user_id': item['user_id'],
                   'user_thumb': item['thumb'],
                   'total_plays': item['user_count']
                   }
            user_stats.append(row)
        
        return session.mask_session_info(user_stats, mask_metadata=False)
示例#2
0
 def discover(self):
     try:
         ips = self.filterValidIps(self.__getDnsServerIPs())
         logger.info("Found %s dns servers used by host" % (len(ips)))
         self.serversIpList.extend(ips)
     except Exception, ex:
         logger.warn('Failed to get DNS Servers information. %s' % ex)
示例#3
0
def verifyDB(localDbClient, dbName):
    try:
        returnVal = -1
        dbStateQuery = 'SELECT db_name()'
        debugPrint(4, '[' + SCRIPT_NAME + ':verifyDB] Running query <%s>' % dbStateQuery)
        dbStateResultSet = doQuery(localDbClient, dbStateQuery)

        ## Return if query returns no results
        if dbStateResultSet == None:
            logger.warn('[' + SCRIPT_NAME + ':verifyDB] Unable to get database name!')
            return returnVal

        ## We have query results!
        while dbStateResultSet.next():
            databaseName = dbStateResultSet.getString(1).strip()
            if databaseName.lower().strip() == dbName.lower().strip():
                debugPrint(5, '[' + SCRIPT_NAME + ':verifyDB] Database name <%s> OK' % dbName)
                returnVal = 1
            else:
                logger.error('[' + SCRIPT_NAME + ':verifyDB] Database name mismatch!! Should be <%s>, got <%s>...' % (dbName, databaseName))
                return returnVal

        return returnVal
    except:
        excInfo = logger.prepareJythonStackTrace('')
        logger.warn('[' + SCRIPT_NAME + ':verifyDB] Exception: <%s>' % excInfo)
        pass
示例#4
0
    def discoverReplication(self, mysqlOsh):
        """
        Tries to find config variables related to mysql replication 
        @param ObjectStateHolder mysqlOsh mysql osh
        @return list list of OSHs
        """
        masterHostIp = self.getProperty('master-host')
        if not masterHostIp:
            return 
        if not netutils.isValidIp(masterHostIp):
            try:
                resolver = netutils.DnsResolverByShell(self.shell)
                masterHostIp = resolver.resolveIpsByHostname(masterHostIp)[0]
            except netutils.ResolveException:
                logger.warn('Failed to resolve Master Host into IP')
                return
        masterPort = self.getProperty('master-port')
        mysqlReplicationOsh = ObjectStateHolder('mysql_replication')
        mysqlReplicationOsh.setAttribute('data_name', 'MySQL Replication')
        mysqlReplicationOsh.setContainer(mysqlOsh)
        self.setAttribute(mysqlReplicationOsh, 'master_user', self.REPL_ARGS_MAPPING)
        self.setAttribute(mysqlReplicationOsh, 'master_connect_retry', self.REPL_ARGS_MAPPING)
        masterHostOsh = modeling.createHostOSH(masterHostIp)
        serviceAddressOsh = modeling.createServiceAddressOsh(masterHostOsh, masterHostIp, masterPort, modeling.SERVICEADDRESS_TYPE_TCP)
        clientServerLink = modeling.createLinkOSH('client_server', mysqlReplicationOsh, serviceAddressOsh)
        clientServerLink.setStringAttribute('clientserver_protocol', 'TCP')
        clientServerLink.setLongAttribute('clientserver_destport', int(masterPort))
#        masterMysqlOsh = modeling.createDatabaseOSH('mysql', 'MySQL. Port ' + masterPort, masterPort, masterHostIp, masterHostOsh)
#        useLink = modeling.createLinkOSH('use', masterHostOsh, serviceAddressOsh)
        return [masterHostOsh, serviceAddressOsh, clientServerLink, mysqlReplicationOsh]
def resolveIpFromDns(Framework, ipOrDnsOrAlias, localShell, dnsServers = None):
    normalizedIp = str(ipOrDnsOrAlias).strip()

    if not normalizedIp or normalizedIp == "localhost" or (ip_addr.isValidIpAddress(normalizedIp) and (ip_addr.IPAddress(normalizedIp).is_loopback or ip_addr.IPAddress(normalizedIp).is_multicast)):
        logger.debug('Skipped ip [', normalizedIp, '] for next hop, because it is empty or loopback or not a valid ip address')
        return None

    if dnsServers is not None:
        logger.debug('Trying to resolve ip using provided dnsServers names')
        dnsResolver = netutils.DNSResolver(localShell)
        for dnsServer in dnsServers:
            logger.debug('Trying to resolve ip using DNS Server [', dnsServer, ']')
            try:
                resolvedIp = dnsResolver.resolveHostIp(normalizedIp, dnsServer)
                if resolvedIp is not None:
                    logger.debug('Resolved ip [', resolvedIp, '] from [', normalizedIp, '] using DNS Server [', dnsServer, ']')
                    return resolvedIp
            except:
                Framework.reportWarning(logger.prepareJythonStackTrace(''))
                logger.debug('Failed to resolve [', normalizedIp, ']')

    try:
        logger.debug('Trying to resolve ip using local DNS server')
        resolvedIp = netutils.resolveIP(localShell, normalizedIp)
        if resolvedIp is not None:
            logger.debug('Resolved ip [', resolvedIp, '] from [', normalizedIp, '] using configured local DNS Server or hosts file')
            return resolvedIp
        else:
            errorMessage = 'Failed to resolve ip from [' + normalizedIp + '] using configured local DNS Server or hosts file'
            Framework.reportWarning(errorMessage)
            logger.warn(errorMessage)
    except:
        Framework.reportWarning(logger.prepareJythonStackTrace(''))
        logger.warn(errorMessage)
    return resolvedIp
示例#6
0
文件: ast.py 项目: bcmd/BCMD
def process_flux(term, work):
    tag = term[1]    
    logger.detail('Processing ' + tag + ' reaction')
    label = term[2][1]
    if label == '': label = default_label(tag + '__')
    while label in work['reactions'].keys():
        newlabel = default_label(tag + '__')
        logger.warn("Duplicate reaction label '" + label
                     + "', substituting '" + newlabel + "'")
        label = newlabel
    
    # I have no idea whether this is reasonable, but:
    # outflux reactions have an LHS and can (if necessary)
    # be given MA rates; influx reactions don't and can't
    if tag == 'outflux':
        terms = process_chemterm(term[3], work, -1)
        rate = process_rateterm(term[4], work, terms)
        lhs = terms
        rhs = []
    else:
        terms = process_chemterm(term[3], work, 1)
        rate = process_rateterm(term[4], work, None)
        lhs = []
        rhs = terms

    work['reactions'][label] = { 'type' : tag,
                                 'terms' : terms,
                                 'rate' : rate,
                                 'lhs' : lhs,
                                 'rhs': rhs,
                                 'ratespec': term[4] }
    consolidate_chems(work['reactions'][label])
示例#7
0
文件: ast.py 项目: bcmd/BCMD
def process_explicit_rate(term, work, lhs):
    logger.detail("Processing explicit rateterm")
    if len(term[2]) > 2:
        logger.warn("More than 1 rate term supplied, ignoring excess")
    
    i_expr, expr, deps = process_mathterm(term[2][1][1], work)
    return { 'i_expr':i_expr, 'expr':expr, 'depends':deps, 'mathterm':term[2][1][1] }
示例#8
0
文件: ast.py 项目: bcmd/BCMD
def finalise_outputs(work):
    logger.warn(work['outputs'], True)
    present = [ x for x in work['outputs'] if x in work['symbols']]
    if present:
        work['outputs'] = present
    else:
        work['outputs'] = work['roots']
示例#9
0
def connectToDb(localFramework, ipAddress, dbPort):
    try:
        theDbClient = None
        ## Get protocols
        protocols = localFramework.getAvailableProtocols(ipAddress, ClientsConsts.SQL_PROTOCOL_NAME)
        for protocolID in protocols:
            ## If this protocol entry is not for a Sybase DB, ignore it
            if localFramework.getProtocolProperty(protocolID, CollectorsConstants.SQL_PROTOCOL_ATTRIBUTE_DBTYPE) != 'Sybase':
                debugPrint(5, '[' + SCRIPT_NAME + ':DiscoveryMain] Ignoring non Sybase protocol entry...')
                continue
            ## Don't bother reconnecting if a connection has already been established
            if not theDbClient:
                ## Set DB properties
                dbConnectionProperties = Properties()
                dbConnectionProperties.setProperty(CollectorsConstants.PROTOCOL_ATTRIBUTE_PORT, dbPort)
                # Establish JDBC connection
                debugPrint(5, '[' + SCRIPT_NAME + ':connectToDb] Attempting connection to CiscoWorks database at port <%s>...' % dbPort)
                try:
                    theDbClient = localFramework.createClient(protocolID, dbConnectionProperties)
                except:
                    theDbClient and theDBClient.close()
        return theDbClient
    except:
        excInfo = logger.prepareJythonStackTrace('')
        logger.warn('[' + SCRIPT_NAME + ':connectToDb] Exception: <%s>' % excInfo)
        pass
示例#10
0
 def getConfigsFromHelp(self, fileMonitor):
     mysqlCommandList = []
     for cmd in self.HELP_OPTIONS_LIST:
         if self.shell.isWinOs():
             mysqlCommand = '"%s" %s' % (self.processPath, cmd)
         else:
             mysqlCommand = '%s %s' % (self.processPath, cmd)
         mysqlCommandList.append(mysqlCommand)
     try:
         help = self.shell.execAlternateCmdsList(mysqlCommandList)
         if self.shell.getLastCmdReturnCode() != 0:
             logger.error('Failed to get MySql help info.')            
     except:
         logger.error('Failed to get MySql help info. %s' % sys.exc_info()[1])
     else: 
         pathsFound = re.search('Default options.*order:.*\n(.*\n)', help)
         if pathsFound:
             filesLine = pathsFound.group(1)
             logger.debug('verifying existence of %s' % filesLine)
             #suppose that file have to be ended by .ext
             lookupConfPaths = re.findall('((([a-zA-Z]:)|(~?\/)).*?[\\\/]?\.?\w+\.\w+)(\s|$)', filesLine)
             if lookupConfPaths:
                 for lookupConfPath in lookupConfPaths:
                     if fileMonitor.checkPath(lookupConfPath[0]):
                         self.configPath = lookupConfPath[0]
                         return self.configPath
     if not self.configPath:
         logger.warn('MySQL configuration file was not found in mysql help')
     return self.configPath
示例#11
0
def preparePatterns(excludePatternsList):
    result = []
    if excludePatternsList:
        patternList = excludePatternsList.split(";")

        wildcardValidationPattern = Pattern.compile("[\d*?.]+")

        wildcardSubstitutions = [(Pattern.compile("\."), "\\\\."),
                                 (Pattern.compile("\*+"), ".*"),
                                 (Pattern.compile("\?"), ".")
                                 ]

        for patternStr in patternList:
            if patternStr:
                patternStr = patternStr.strip()
                wildcardValidationMatcher = wildcardValidationPattern.matcher(String(patternStr))
                if wildcardValidationMatcher.matches():

                    for (rPattern, rStr) in wildcardSubstitutions:
                        rMatcher = rPattern.matcher(String(patternStr))
                        patternStr = rMatcher.replaceAll(rStr)

                    try:
                        pattern = Pattern.compile(patternStr)
                        result.append(pattern)
                    except:
                        logger.warn("Exception '%s' when compiling pattern '%s', pattern is ignored" % (sys.exc_info()[0], patternStr))

                else:
                    logger.warn("Ignoring invalid wildcard pattern '%s'" % patternStr)

    return result
def getLanguage(framework):

    language = None
    try:
        defaultClient = None
        try:
            factory = hyperv.WmiClientFactory(framework)
            defaultClient = factory.createClient()
            if defaultClient is not None:
                wmiProvider = wmiutils.WmiAgentProvider(defaultClient)
                languageDiscoverer = wmiutils.LanguageDiscoverer(wmiProvider)
                language = languageDiscoverer.getLanguage()
        finally:
            if defaultClient is not None:
                try:
                    defaultClient.close()
                except:
                    pass
    except:
        logger.warnException("Exception while determining OS language")

    if language is None:
        logger.warn("Failed to determine language of target system, default language is used")
        language = wmiutils.DEFAULT_LANGUAGE
    
    return language
示例#13
0
    def getTopology(self):
        self.linksMap = defaultdict(list)
        for (linkMapping, link) in self.__links:
            targetType = linkMapping.getTargetType()
            sourceType1 = linkMapping.getSourceEnd1Type()
            sourceType2 = linkMapping.getSourceEnd2Type()
            sourceId1 = link.getEnd1Id()
            sourceId2 = link.getEnd2Id()
            targetType1 = linkMapping.getTargetEnd1Type()
            targetType2 = linkMapping.getTargetEnd2Type()
            isContainer = linkMapping.isContainer()

            targetEnd1Id = self.__createComplexId(sourceId1, sourceType1, targetType1)
            targetEnd2Id = self.__createComplexId(sourceId2, sourceType2, targetType2)

            msg = "%s -- %s --> %s" % (targetEnd1Id, targetType, targetEnd2Id)
            logger.warn(msg)
            if self.__hasOsh(targetEnd1Id) and self.__hasOsh(targetEnd2Id):
                logger.info(msg)

                (osh1, osh2) = (self.__getOsh(targetEnd1Id), self.__getOsh(targetEnd2Id))
                if linkMapping.isReverse():
                    (osh1, osh2) = (osh2, osh1)

                link_osh = modeling.createLinkOSH(targetType, osh1, osh2)
                self.__vector.add(link_osh)
                self.linksMap[osh1].append(link_osh)
                self.linksMap[osh2].append(link_osh)
                if targetType == 'composition' or isContainer:
                    osh2.setContainer(osh1)

        self.addValidCis()

        return self.__vector
示例#14
0
文件: dbfile.py 项目: ctb/pygr
def open_index(filename, flag='r', useHash=False, mode=0666):
    if bsddb is None:
        d = open_anydbm(filename, flag)
        if not useHash:
            logger.warn('Falling back to hash index: unable to import bsddb')
        return d
    return open_bsddb(filename, flag, useHash, mode)
示例#15
0
文件: plextv.py 项目: MikeDawg/plexpy
    def get_devices_list(self):
        devices = self.get_plextv_devices_list(output_format='xml')

        try:
            xml_head = devices.getElementsByTagName('Device')
        except Exception as e:
            logger.warn(u"PlexPy PlexTV :: Unable to parse XML for get_devices_list: %s." % e)
            return []

        devices_list = []
        for a in xml_head:
            device = {"device_name": helpers.get_xml_attr(a, 'name'),
                      "product": helpers.get_xml_attr(a, 'product'),
                      "product_version": helpers.get_xml_attr(a, 'productVersion'),
                      "platform": helpers.get_xml_attr(a, 'platform'),
                      "platform_version": helpers.get_xml_attr(a, 'platformVersion'),
                      "device": helpers.get_xml_attr(a, 'device'),
                      "model": helpers.get_xml_attr(a, 'model'),
                      "vendor": helpers.get_xml_attr(a, 'vendor'),
                      "provides": helpers.get_xml_attr(a, 'provides'),
                      "device_identifier": helpers.get_xml_attr(a, 'clientIdentifier'),
                      "device_id": helpers.get_xml_attr(a, 'id'),
                      "token": helpers.get_xml_attr(a, 'token')
                      }
            devices_list.append(device)

        return devices_list
示例#16
0
    def action(self, query, args=None, return_last_id=False):
        if query is None:
            return

        with db_lock:
            sql_result = None
            attempts = 0

            while attempts < 5:
                try:
                    with self.connection as c:
                        if args is None:
                            sql_result = c.execute(query)
                        else:
                            sql_result = c.execute(query, args)
                    # Our transaction was successful, leave the loop
                    break

                except sqlite3.OperationalError as e:
                    if "unable to open database file" in e or "database is locked" in e:
                        logger.warn(u"PlexPy Database :: Database Error: %s", e)
                        attempts += 1
                        time.sleep(1)
                    else:
                        logger.error(u"PlexPy Database :: Database error: %s", e)
                        raise

                except sqlite3.DatabaseError as e:
                    logger.error(u"PlexPy Database :: Fatal Error executing %s :: %s", query, e)
                    raise

            return sql_result
示例#17
0
def cvapi_check(web=None):
    import logger
    if web is None: logger.fdebug('[ComicVine API] ComicVine API Check Running...')
    if mylar.CVAPI_TIME is None or mylar.CVAPI_TIME == '':
        c_date = now()
        c_obj_date = datetime.datetime.strptime(c_date,"%Y-%m-%d %H:%M:%S")
        mylar.CVAPI_TIME = c_obj_date
    else:
        if isinstance(mylar.CVAPI_TIME, unicode):
            c_obj_date = datetime.datetime.strptime(mylar.CVAPI_TIME,"%Y-%m-%d %H:%M:%S")
        else:
            c_obj_date = mylar.CVAPI_TIME
    if web is None: logger.fdebug('[ComicVine API] API Start Monitoring Time (~15mins): ' + str(mylar.CVAPI_TIME))
    now_date = now()
    n_date = datetime.datetime.strptime(now_date,"%Y-%m-%d %H:%M:%S")
    if web is None: logger.fdebug('[ComicVine API] Time now: ' + str(n_date))
    absdiff = abs(n_date - c_obj_date)
    mins = round(((absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 60.0),2)
    if mins < 15:
        if web is None: logger.info('[ComicVine API] Comicvine API count now at : ' + str(mylar.CVAPI_COUNT) + ' / ' + str(mylar.CVAPI_MAX) + ' in ' + str(mins) + ' minutes.')
        if mylar.CVAPI_COUNT > mylar.CVAPI_MAX:
            cvleft = 15 - mins
            if web is None: logger.warn('[ComicVine API] You have already hit your API limit (' + str(mylar.CVAPI_MAX) + ' with ' + str(cvleft) + ' minutes. Best be slowing down, cowboy.')
    elif mins > 15:
        mylar.CVAPI_COUNT = 0
        c_date = now()
        mylar.CVAPI_TIME = datetime.datetime.strptime(c_date,"%Y-%m-%d %H:%M:%S")
        if web is None: logger.info('[ComicVine API] 15 minute API interval resetting [' + str(mylar.CVAPI_TIME) + ']. Resetting API count to : ' + str(mylar.CVAPI_COUNT))

    if web is None:
        return        
    else:
        line = str(mylar.CVAPI_COUNT) + ' hits / ' + str(mins) + ' minutes'
        return line
示例#18
0
def getProcessorsByUnitaryComputerSystem(client, unitaryComputerSystem):
    '''
    CimClient, UnitaryComputerSystem -> list(Processor)
    Get Processors by UnitaryComputerSystem
    '''
    processorInstances = client.getInstances("OMC_Processor")
    
    processors = []
    ignoredProcessorsCount = 0
    for processorInstace in processorInstances:
        systemName = cim_discover.cleanString(_getCimInstanceProperty(processorInstace, 'SystemName'))
        # uuid of cpu should match uuid of ESX, safe check
        if unitaryComputerSystem.name == systemName:
            processor = Processor()
            processor.setObjectPath(processorInstace.getObjectPath())
            processor.systemName = systemName
            processor.cpuStatus = cim_discover.getIntFromCimInt(_getCimInstanceProperty(processorInstace, 'CPUStatus'))
            processor.elementName = cim_discover.cleanString(_getCimInstanceProperty(processorInstace, 'ElementName'))
            modelName = cim_discover.cleanString(_getCimInstanceProperty(processorInstace, 'ModelName'))
            processor.modelName = modelName and re.sub(r"\s+", " ", modelName)
            processor.currentClockSpeed = cim_discover.getIntFromCimInt(_getCimInstanceProperty(processorInstace, 'CurrentClockSpeed'))
            processor.numberOfEnabledCores = cim_discover.getIntFromCimInt(_getCimInstanceProperty(processorInstace, 'NumberOfEnabledCores'))
            processor.enabledState = cim_discover.getIntFromCimInt(_getCimInstanceProperty(processorInstace, 'EnabledState'))
            
            processors.append(processor)
        else:
            ignoredProcessorsCount += 1
            
    if ignoredProcessorsCount > 0:
        logger.warn("Ignored %s processors due to mismatching UUID or being disabled" % ignoredProcessorsCount)
    
    return processors
示例#19
0
def getMemoryByUnitaryComputerSystem(client, unitaryComputerSystem):
    '''
    CimClient, UnitaryComputerSystem -> list(Memory)
    Get Memory by UnitaryComputerSystem
    '''
    memoryInstances = client.getInstances("OMC_Memory")
    
    memoryList = []
    ignoredMemoryCount = 0
    for memoryInstance in memoryInstances:
        systemName = cim_discover.cleanString(_getCimInstanceProperty(memoryInstance, 'SystemName'))
        # uuid of memory should match uuid of ESX, safe check
        if unitaryComputerSystem.name == systemName:
            memory = Memory()
            memory.setObjectPath(memoryInstance.getObjectPath())
            memory.systemName = systemName
            memory.numberOfBlocks = cim_discover.getIntFromCimInt(_getCimInstanceProperty(memoryInstance, "NumberOfBlocks"))
            memory.blockSize = cim_discover.getIntFromCimInt(_getCimInstanceProperty(memoryInstance, "BlockSize"))
            memoryList.append(memory)
        else:
            ignoredMemoryCount += 1
            
    if ignoredMemoryCount > 0:
        logger.warn("Ignored %s memory instances due to mismatching UUID")
    
    return memoryList
示例#20
0
文件: users.py 项目: MikeDawg/plexpy
    def get_filters(self, user_id=None):
        import urlparse

        if not user_id:
            return {}

        try:
            monitor_db = database.MonitorDatabase()
            query = 'SELECT filter_all, filter_movies, filter_tv, filter_music, filter_photos FROM users ' \
                    'WHERE user_id = ?'
            result = monitor_db.select_single(query, args=[user_id])
        except Exception as e:
            logger.warn(u"PlexPy Users :: Unable to execute database query for get_filters: %s." % e)
            result = {}

        filters_list = {}
        for k, v in result.iteritems():
            filters = {}
                
            for f in v.split('|'):
                if 'contentRating=' in f or 'label=' in f:
                    filters.update(dict(urlparse.parse_qsl(f)))
                        
            filters['content_rating'] = tuple(f for f in filters.pop('contentRating', '').split(',') if f)
            filters['labels'] = tuple(f for f in filters.pop('label', '').split(',') if f)

            filters_list[k] = filters

        return filters_list
示例#21
0
    def onConnection(self, context):
        '''
        Method handles successful connection described by context
        '''
        
        self.connected = True
        
        try:

            vector = self.discoveryFunction(context, self.framework)

            if vector is not None:
                
                logger.debug(" -- Sending vector of %s objects" % vector.size())
                if self._logVector:
                    logger.debug(vector.toXmlString())
                
                self.framework.sendObjects(vector)
                self.framework.flushObjects()
            
            else:
                logger.warn("Discovery function returned result vector that is None")
        
        except JException, ex:
            msg = ex.getMessage()
            msg = cim_discover.translateErrorMessage(msg)
            logger.debug(msg)
            errormessages.resolveAndReport(msg, cim.Protocol.DISPLAY, self.framework)
def resolveIpFromDnsPortipPortconcepts(Framework, OSHVResult, ipPortconcepts, localShell, dnsServers = None):
    logger.debug('Resolving concepts')
    for ipPortConceptEntry in ipPortconcepts.entrySet():
        conceptName = ipPortConceptEntry.getKey()
        conceptFields = ipPortConceptEntry.getValue()
        logger.debug('processing [', conceptName, ']')
        PROVIDER_IPs = None
        PROVIDER_PORTs = None
        for conceptFieldEntry in conceptFields.entrySet():
            fieldName = conceptFieldEntry.getKey().upper()
            if fieldName == PROVIDER_IP:
                PROVIDER_IPs = conceptFieldEntry.getValue()
            elif fieldName == PROVIDER_PORT:
                PROVIDER_PORTs = conceptFieldEntry.getValue()

        if PROVIDER_IPs is not None:
            logger.debug('for concept [', conceptName, '].[', PROVIDER_IP, '] found [', str(len(PROVIDER_IPs)), '] values')
            if PROVIDER_PORTs is None:
                processIps(Framework, OSHVResult, PROVIDER_IPs, localShell)
            elif len(PROVIDER_IPs) != len(PROVIDER_PORTs):
                errorMessage = 'There is a mismatch between the number of IP addresses and the number of ports that were found. The concept [' + conceptName + '].[' + PROVIDER_IP + '] found [' + str(len(PROVIDER_IPs)) + '] values while for [' + conceptName + '].[' + PROVIDER_PORT + '] found [' + str(len(PROVIDER_PORTs)) + '] values'
                Framework.reportWarning(errorMessage)
                logger.warn(errorMessage)
                processIps(Framework, OSHVResult, PROVIDER_IPs, localShell)
            else:
                for index in range(len(PROVIDER_IPs)):
                    resolvedIp = resolveIpFromDns(Framework, PROVIDER_IPs[index], localShell, dnsServers)
                    if resolvedIp is not None:
                        processIpPort(Framework, OSHVResult, resolvedIp, PROVIDER_PORTs[index], localShell, dnsServers)
        else:
            logger.error('No ' + PROVIDER_IP + ' field returned for concept [', conceptName, ']')
示例#23
0
 def getSystemInfo(self):
     ''' Get general system info (manufacturer, name, model and domain)
     If host is not in domain, WORKGROUP name will be returned instead of
     domain.
     @note: Host name can be shorter than original due to AD restrictions,
     only 15 symbols. Recommended to use discoverHostInfo method
     @types: -> HostDo
     @raise Exception: WMI query failed
     '''
     queryBuilder = self._wmiProvider.getBuilder('Win32_ComputerSystem')
     queryBuilder.addWmiObjectProperties('Manufacturer', 'Name', 'Model',
                                         'Domain', 'NumberOfProcessors')
     items = self._wmiProvider.getAgent().getWmiData(queryBuilder)
     if not items:
         raise Exception("WMI query failed. No data returned")
     host = HostDo()
     system = items[0]
     # make sure that host name will be in lower case to prevent
     # from false history changes
     host.hostName = system.Name.lower()
     host.hostManufacturer = system.Manufacturer
     host.hostModel = system.Model
     if system.Domain:
         host.osDomain = system.Domain
     if system.NumberOfProcessors:
         try:
             cpuNumber = int(system.NumberOfProcessors.strip())
             host.winProcessorsNumber = cpuNumber
         except:
             logger.warn('Number of processors value is not an integer'
                         ' type: %s' % system.NumberOfProcessors)
     return host
示例#24
0
文件: users.py 项目: MikeDawg/plexpy
    def delete_all_history(self, user_id=None):
        monitor_db = database.MonitorDatabase()

        try:
            if str(user_id).isdigit():
                logger.info(u"PlexPy Users :: Deleting all history for user id %s from database." % user_id)
                session_history_media_info_del = \
                    monitor_db.action('DELETE FROM '
                                      'session_history_media_info '
                                      'WHERE session_history_media_info.id IN (SELECT session_history_media_info.id '
                                      'FROM session_history_media_info '
                                      'JOIN session_history ON session_history_media_info.id = session_history.id '
                                      'WHERE session_history.user_id = ?)', [user_id])
                session_history_metadata_del = \
                    monitor_db.action('DELETE FROM '
                                      'session_history_metadata '
                                      'WHERE session_history_metadata.id IN (SELECT session_history_metadata.id '
                                      'FROM session_history_metadata '
                                      'JOIN session_history ON session_history_metadata.id = session_history.id '
                                      'WHERE session_history.user_id = ?)', [user_id])
                session_history_del = \
                    monitor_db.action('DELETE FROM '
                                      'session_history '
                                      'WHERE session_history.user_id = ?', [user_id])

                return 'Deleted all items for user_id %s.' % user_id
            else:
                return 'Unable to delete items. Input user_id not valid.'
        except Exception as e:
            logger.warn(u"PlexPy Users :: Unable to execute database query for delete_all_history: %s." % e)
示例#25
0
 def initConnectionConfigurations(self):
     contextsMap = {}
     for ip in self.ips:
         
         credentialsIdList = self.framework.getAvailableProtocols(ip, VcloudProtocol.SHORT)
         if not credentialsIdList:
             logger.warn("No credentials for IP %s found" % ip)
             msg = errormessages.makeErrorMessage(VcloudProtocol.DISPLAY, None, errormessages.ERROR_NO_CREDENTIALS)
             connectionContext = ConnectionContext()
             connectionContext.ipAddress = ip
             connectionContext.warnings.append(msg)
             self.connectionHandler.onFailure(connectionContext)
             continue
         
         contextsByCredentialsId = {}
         for credentialsId in credentialsIdList:
             
             connectionContext = ConnectionContext()
             connectionContext.ipAddress = ip
             connectionContext.credentialsId = credentialsId
             
             contexts = []
             self.urlGenerator.generate(connectionContext)
             for url in self.urlGenerator:
                 connectionContextWithUrl = copy.copy(connectionContext)
                 connectionContextWithUrl.urlString = url
                 contexts.append(connectionContextWithUrl)
             
             if contexts:
                 contextsByCredentialsId[credentialsId] = contexts
         
         if contextsByCredentialsId:
             contextsMap[ip] = contextsByCredentialsId
     
     self.contextsMap = contextsMap
示例#26
0
    def getModelAndBiosUuid(self):
        '''@types: -> HostDo
        @raise Exception: WMI query failed
        '''
        convertToMicrosoftStandart = GeneralSettingsConfigFile.getInstance().getPropertyStringValue('setBiosUuidToMicrosoftStandart', 'false')

        hostDo = HostDo()
        queryBuilder = self._wmiProvider.getBuilder('win32_ComputerSystemProduct')
        queryBuilder.addWmiObjectProperties('uuid', 'name')
        computerProductList = self._wmiProvider.getAgent().getWmiData(queryBuilder)
        for computerProduct in computerProductList:
            if computerProduct.uuid:
                if (re.match(r"(0{8}-0{4}-0{4}-0{4}-0{12})", computerProduct.uuid) or
                     re.match(r"([fF]{8}-[fF]{4}-[fF]{4}-[fF]{4}-[fF]{12})", computerProduct.uuid)):
                    logger.debug('Invalid UUID was received. Skipping.')
                    continue
                if convertToMicrosoftStandart.lower() == 'false':
                    #returned 00010203-0405-0607-0809-0a0b0c0d0e0f
                    #should be 03020100-0504-0706-0809-0a0b0c0d0e0f
                    byteStyle = re.match(r"(\w{2})(\w{2})(\w{2})(\w{2})\-(\w{2})(\w{2})-(\w{2})(\w{2})(.*)", computerProduct.uuid)
                    if byteStyle:
                        group1 = byteStyle.group(4) + byteStyle.group(3) + byteStyle.group(2) + byteStyle.group(1)
                        group2 = byteStyle.group(6) + byteStyle.group(5)
                        group3 = byteStyle.group(8) + byteStyle.group(7)
                        uuidFormated = group1 + '-' + group2 + '-' + group3 + byteStyle.group(9)
                        hostDo.biosUUID = uuidFormated
                    else:
                        logger.warn('UUID is not in proper format.')
                else:
                    hostDo.biosUUID = computerProduct.uuid
                    logger.warn('BIOS UUID is reported according to Microsoft definitions since parameter setBiosUuidToMicrosoftStandart is set to True.')

            hostDo.hostModel = computerProduct.name

        return hostDo
示例#27
0
    def getVappsForVdc(self, vdcInstance, vcloudClient):
        from com.vmware.vcloud.sdk import Vapp
        from com.vmware.vcloud.sdk import VCloudException
        vappByName = {}

        _vappRefsByName = vdcInstance.getVappRefsByName()
        if _vappRefsByName.isEmpty():
            return vappByName
        
        for vappName in _vappRefsByName.keySet():
            reference = _vappRefsByName.get(vappName)
            if reference:
                try:
                    vappInstance = Vapp.getVappByReference(vcloudClient, reference)
                    vappResource = vappInstance.getResource()
                    
                    vapp = vcloud.Vapp(vappName)
                    vapp.setInstance(vappInstance)
                    vapp.description = vappResource.getDescription()
                    vapp.isDeployed = vappResource.isDeployed()
                    vapp.status.set(vappResource.getStatus())
                    
                    vappByName[vappName] = vapp
                except VCloudException, vex:
                    logger.warn("Failed to retrieve vApp by reference, name = '%s', message = '%s'" % (reference.getName(), vex.getMessage()))
示例#28
0
 def getCatalogEntriesByTypeForCatalog(self, catalogInstance, vcloudClient):
     from com.vmware.vcloud.sdk import CatalogItem 
     from com.vmware.vcloud.sdk import VCloudException
     
     catalogEntriesByType = {}
     catalogEntriesByType[VcloudReferenceType.MEDIA] = {}
     catalogEntriesByType[VcloudReferenceType.VAPP_TEMPLATE] = {}
     
     _methodByEntityType = {
         VcloudReferenceType.MEDIA : self.getMediaByReference,
         VcloudReferenceType.VAPP_TEMPLATE : self.getVappTemplateByReference
     }
     
     _catalogItemRefs = catalogInstance.getCatalogItemReferences()
     if _catalogItemRefs.isEmpty():
         return catalogEntriesByType
     
     for catalogItemReference in _catalogItemRefs:
         catalogItemInstance = CatalogItem.getCatalogItemByReference(vcloudClient, catalogItemReference)
         entityReference = catalogItemInstance.getEntityReference()
         entityType = entityReference.getType()
         
         method = _methodByEntityType.get(entityType)
         if method is not None and entityReference is not None:
             try:
                 entity = method(entityReference, vcloudClient)
                 entityName = entity.getName()
                 catalogEntriesByType[entityType][entityName] = entity
             except VCloudException, vex:
                 logger.warn("Failed to retrieve catalog entry by reference, name = '%s', message = '%s'" % (entityReference.getName(), vex.getMessage()))
示例#29
0
 def getProcesses(self):
     ''' Get information about all non-system processes
     -> list(Process)
     @command: wmic path Win32_Process get CommandLine, CreationDate, ExecutablePath, Name, ProcessId
     @raise Exception: if WMI query failed
     '''
     queryBuilder = self._provider.getBuilder('Win32_Process')
     queryBuilder.addWmiObjectProperties('Name', 'ProcessId', 'CommandLine', 'ExecutablePath', 'CreationDate')
     processes = []
     for info in self._provider.getAgent().getWmiData(queryBuilder):
         name = info.Name
         pid = info.ProcessId
         if pid == '-1' or not str(pid).isdigit():
             logger.debug("Skip process '%s'. It is system process or has non numeric PID" % name)
             continue
         startupTime = info.CreationDate
         try:
             startupTime = modeling.getDateFromUtcString(startupTime)
         except ValueError, ve:
             logger.warn(str(ve))
             startupTime = None
         cmdline = self.__getCommandLineWithProcessName(info.CommandLine, name)
         # process argument list
         argsMatch = re.match('("[^"]+"|[^"]\S+)\s+(.+)$', cmdline)
         parameters = argsMatch and argsMatch.group(2) or None
         process = Process(name, cmdline)
         process.setPid(pid)
         process.parameters = parameters
         process.path = info.ExecutablePath
         processes.append(process)
示例#30
0
 def __getDnsServerIPs(self):
     '''
     @types: -> list[str]
     @raise Exception: WMI query failed
     '''
     ips = []
     clazz = 'Win32_NetworkAdapterConfiguration'
     queryBuilder = self._wmiProvider.getBuilder(clazz)
     queryBuilder.addWmiObjectProperties('dnsServerSearchOrder')
     queryBuilder.addWhereClause('domainDnsRegistrationEnabled <> NULL')
     agent = self._wmiProvider.getAgent()
     dnsServersConfigurationList = agent.getWmiData(queryBuilder)
     for dnsServersConfiguration in dnsServersConfigurationList:
         dnsIps = dnsServersConfiguration.dnsServerSearchOrder
         # depending on protocol this field represented as CSV string
         # or list of values
         if not isinstance(dnsIps, types.ListType):
             dnsIps = map(string.strip, str(dnsIps).split(','))
         for ip in dnsIps:
             if ip:
                 try:
                     if ip_addr.isValidIpAddressNotZero(ip):
                         ips.append(ip_addr.IPAddress(ip))
                 except:
                     logger.warn('Failed to parse to IP value "%s"' % ip)
     return ips
示例#31
0
def parseResourceGroups(cluster, scrgadmOutput):
    resourceGroupsByName = {}

    results = re.findall(r"Res Group name:\s+([\w.-]+)", scrgadmOutput)
    for groupName in results:
        group = ResourceGroup(groupName)
        logger.debug("Found resource group '%s'" % groupName)

        attributesMap = {}
        groupAttributeLines = re.findall(
            r"\(%s\) Res Group([^\n]+)" % re.escape(groupName), scrgadmOutput)
        for attributeLine in groupAttributeLines:
            attributeLine = attributeLine.strip()
            elements = re.split(r":", attributeLine, 1)
            if len(elements) == 2:
                attributeName = elements[0] and elements[0].strip()
                attributeValue = elements[1] and elements[1].strip()
                if attributeName and attributeValue and attributeValue.lower(
                ) != '<null>':
                    attributesMap[attributeName] = attributeValue
            else:
                logger.warn(
                    "Ignoring invalid resource group attribute line: '%s'" %
                    attributeLine)

        mode = attributesMap.get('mode')
        if mode in (ResourceGroup.MODE_FAILOVER, ResourceGroup.MODE_SCALABLE):
            group.mode = mode

        maxPrimaries = attributesMap.get('Maximum_primaries')
        if maxPrimaries:
            try:
                group.maxPrimaries = int(maxPrimaries)
            except:
                logger.warn(
                    "Failed to convert maximum_primaries value '%s' to integer"
                    % maxPrimaries)

        desiredPrimaries = attributesMap.get('Desired_primaries')
        if desiredPrimaries:
            try:
                group.desiredPrimaries = int(desiredPrimaries)
            except:
                logger.warn(
                    "Failed to convert desired_primaries value '%s' to integer"
                    % desiredPrimaries)

        managementState = attributesMap.get('management state')
        if managementState:
            group.isManaged = managementState.lower() == 'managed'

        failbackMode = attributesMap.get('Failback')
        if failbackMode:
            group.isFailback = failbackMode.lower() == 'true'

        systemGroup = attributesMap.get('system')
        if systemGroup:
            group.isSystem = systemGroup.lower() == 'true'

        autoStart = attributesMap.get('Auto_start_on_new_cluster')
        if autoStart:
            group.autoStartOnNewCluster = autoStart.lower() == 'true'

        nodeList = attributesMap.get('Nodelist')
        if nodeList:
            nodes = re.split(r"\s+", nodeList)
            for node in nodes:
                group.configuredNodes.add(node)
        else:
            logger.debug(
                "Resource group '%s' does not have nodes list configured" %
                groupName)
            # we assume all nodes in the cluster can run this group
            for nodeName in cluster.nodesByName.keys():
                group.configuredNodes.add(nodeName)

        resourceGroupsByName[groupName] = group

    cluster.resourceGroupsByName = resourceGroupsByName
示例#32
0
def GetComicInfo(comicid,dom):

    #comicvine isn't as up-to-date with issue counts..
    #so this can get really buggered, really fast.
    tracks = dom.getElementsByTagName('issue')
    try:
        cntit = dom.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
    except:
        cntit = len(tracks)
    trackcnt = len(tracks)
    logger.fdebug("number of issues I counted: " + str(trackcnt))
    logger.fdebug("number of issues CV says it has: " + str(cntit))
    # if the two don't match, use trackcnt as count_of_issues might be not upto-date for some reason
    if int(trackcnt) != int(cntit):
        cntit = trackcnt
        vari = "yes"
    else: vari = "no"
    logger.fdebug("vari is set to: " + str(vari))
    #if str(trackcnt) != str(int(cntit)+2):
    #    cntit = int(cntit) + 1
    comic = {}
    comicchoice = []
    cntit = int(cntit)
    #retrieve the first xml tag (<tag>data</tag>)
    #that the parser finds with name tagName:
    # to return the parent name of the <name> node : dom.getElementsByTagName('name')[0].parentNode.nodeName
    # where [0] denotes the number of the name field(s)
    # where nodeName denotes the parentNode : ComicName = results, publisher = publisher, issues = issue
    try:
        names = len( dom.getElementsByTagName('name') )
        n = 0
        while ( n < names ):
            if dom.getElementsByTagName('name')[n].parentNode.nodeName == 'results':
                try:
                    comic['ComicName'] = dom.getElementsByTagName('name')[n].firstChild.wholeText
                    comic['ComicName'] = comic['ComicName'].rstrip() 
                except:
                    logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible AND that you have provided your OWN ComicVine API key.')
                    return

            elif dom.getElementsByTagName('name')[n].parentNode.nodeName == 'publisher':
                try:
                    comic['ComicPublisher'] = dom.getElementsByTagName('name')[n].firstChild.wholeText
                except:
                    comic['ComicPublisher'] = "Unknown"

            n+=1  
    except:
        logger.warn('Something went wrong retrieving from ComicVine. Ensure your API is up-to-date and that comicvine is accessible')
        return

    try:
        comic['ComicYear'] = dom.getElementsByTagName('start_year')[0].firstChild.wholeText
    except:
        comic['ComicYear'] = '0000'
    comic['ComicURL'] = dom.getElementsByTagName('site_detail_url')[trackcnt].firstChild.wholeText

    desdeck = 0
    #the description field actually holds the Volume# - so let's grab it
    try:
        descchunk = dom.getElementsByTagName('description')[0].firstChild.wholeText
        comic_desc = drophtml(descchunk)
        desdeck +=1
    except:
        comic_desc = 'None'

    #sometimes the deck has volume labels
    try:
        deckchunk = dom.getElementsByTagName('deck')[0].firstChild.wholeText
        comic_deck = deckchunk
        desdeck +=1
    except:
        comic_deck = 'None'

    try:
        comic['Aliases'] = dom.getElementsByTagName('aliases')[0].firstChild.wholeText
        #logger.fdebug('Aliases: ' + str(aliases))
    except:
        comic['Aliases'] = 'None'

    comic['ComicVersion'] = 'noversion'
    #logger.info('comic_desc:' + comic_desc)
    #logger.info('comic_deck:' + comic_deck)
    #logger.info('desdeck: ' + str(desdeck))
    while (desdeck > 0):
        if desdeck == 1:
            if comic_desc == 'None':
                comicDes = comic_deck[:30]
            else:
                #extract the first 60 characters
                comicDes = comic_desc[:60].replace('New 52', '')
        elif desdeck == 2:
            #extract the characters from the deck
            comicDes = comic_deck[:30].replace('New 52', '')
        else:
            break

        i = 0
        while (i < 2):
            if 'volume' in comicDes.lower():
                #found volume - let's grab it.
                v_find = comicDes.lower().find('volume')
                #arbitrarily grab the next 10 chars (6 for volume + 1 for space + 3 for the actual vol #)
                #increased to 10 to allow for text numbering (+5 max)
                #sometimes it's volume 5 and ocassionally it's fifth volume.
                if i == 0:
                    vfind = comicDes[v_find:v_find+15]   #if it's volume 5 format
                    basenums = {'zero':'0','one':'1','two':'2','three':'3','four':'4','five':'5','six':'6','seven':'7','eight':'8','nine':'9','ten':'10','i':'1','ii':'2','iii':'3','iv':'4','v':'5'}
                    logger.fdebug('volume X format - ' + str(i) + ': ' + vfind)
                else:
                    vfind = comicDes[:v_find]   # if it's fifth volume format
                    basenums = {'zero':'0','first':'1','second':'2','third':'3','fourth':'4','fifth':'5','sixth':'6','seventh':'7','eighth':'8','nineth':'9','tenth':'10','i':'1','ii':'2','iii':'3','iv':'4','v':'5'}
                    logger.fdebug('X volume format - ' + str(i) + ': ' + vfind)
                volconv = ''
                for nums in basenums:
                    if nums in vfind.lower():
                        sconv = basenums[nums]
                        vfind = re.sub(nums, sconv, vfind.lower())
                        break        
                #logger.info('volconv: ' + str(volconv))

                #now we attempt to find the character position after the word 'volume'
                if i == 0:
                    volthis = vfind.lower().find('volume')
                    volthis = volthis + 6 # add on the actual word to the position so that we can grab the subsequent digit
                    vfind = vfind[volthis:volthis+4] #grab the next 4 characters ;)
                elif i == 1:
                    volthis = vfind.lower().find('volume')
                    vfind = vfind[volthis-4:volthis] #grab the next 4 characters ;)

                if '(' in vfind:
                    #bracket detected in versioning'
                    vfindit = re.findall('[^()]+', vfind)
                    vfind = vfindit[0]
                vf = re.findall('[^<>]+', vfind)
                ledigit = re.sub("[^0-9]", "", vf[0])
                if ledigit != '':
                    comic['ComicVersion'] = ledigit
                    logger.fdebug("Volume information found! Adding to series record : volume " + comic['ComicVersion'])
                    break
                i+=1
            else:
                i+=1

        if comic['ComicVersion'] == 'noversion':
            logger.fdebug('comic[ComicVersion]:' + str(comic['ComicVersion']))
            desdeck -=1
        else:
            break

    if vari == "yes": 
        comic['ComicIssues'] = str(cntit)
    else:
        comic['ComicIssues'] = dom.getElementsByTagName('count_of_issues')[0].firstChild.wholeText

    comic['ComicImage'] = dom.getElementsByTagName('super_url')[0].firstChild.wholeText
    comic['ComicImageALT'] = dom.getElementsByTagName('small_url')[0].firstChild.wholeText

    comic['FirstIssueID'] = dom.getElementsByTagName('id')[0].firstChild.wholeText

#    print ("fistIss:" + str(comic['FirstIssueID']))
#    comicchoice.append({
#        'ComicName':              comic['ComicName'],
#        'ComicYear':              comic['ComicYear'],
#        'Comicid':                comicid,
#        'ComicURL':               comic['ComicURL'],
#        'ComicIssues':            comic['ComicIssues'],
#        'ComicImage':             comic['ComicImage'],
#        'ComicVolume':            ParseVol,
#        'ComicPublisher':         comic['ComicPublisher']
#        })

#    comic['comicchoice'] = comicchoice
    return comic
示例#33
0
def checkGithub(auto_update=False):
    plexpy.COMMITS_BEHIND = 0

    # Get the latest version available from github
    logger.info('Retrieving latest version information from GitHub')
    url = 'https://api.github.com/repos/%s/%s/commits/%s' % (
        plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO,
        plexpy.CONFIG.GIT_BRANCH)
    if plexpy.CONFIG.GIT_TOKEN:
        url = url + '?access_token=%s' % plexpy.CONFIG.GIT_TOKEN
    version = request.request_json(url,
                                   timeout=20,
                                   validator=lambda x: type(x) == dict)

    if version is None:
        logger.warn(
            'Could not get the latest version from GitHub. Are you running a local development version?'
        )
        return plexpy.CURRENT_VERSION

    plexpy.LATEST_VERSION = version['sha']
    logger.debug("Latest version is %s", plexpy.LATEST_VERSION)

    # See how many commits behind we are
    if not plexpy.CURRENT_VERSION:
        logger.info(
            'You are running an unknown version of Tautulli. Run the updater to identify your version'
        )
        return plexpy.LATEST_VERSION

    if plexpy.LATEST_VERSION == plexpy.CURRENT_VERSION:
        logger.info('Tautulli is up to date')
        return plexpy.LATEST_VERSION

    logger.info(
        'Comparing currently installed version with latest GitHub version')
    url = 'https://api.github.com/repos/%s/%s/compare/%s...%s' % (
        plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO, plexpy.LATEST_VERSION,
        plexpy.CURRENT_VERSION)
    if plexpy.CONFIG.GIT_TOKEN:
        url = url + '?access_token=%s' % plexpy.CONFIG.GIT_TOKEN
    commits = request.request_json(url,
                                   timeout=20,
                                   whitelist_status_code=404,
                                   validator=lambda x: type(x) == dict)

    if commits is None:
        logger.warn('Could not get commits behind from GitHub.')
        return plexpy.LATEST_VERSION

    try:
        plexpy.COMMITS_BEHIND = int(commits['behind_by'])
        logger.debug("In total, %d commits behind", plexpy.COMMITS_BEHIND)
    except KeyError:
        logger.info(
            'Cannot compare versions. Are you running a local development version?'
        )
        plexpy.COMMITS_BEHIND = 0

    if plexpy.COMMITS_BEHIND > 0:
        logger.info('New version is available. You are %s commits behind' %
                    plexpy.COMMITS_BEHIND)

        url = 'https://api.github.com/repos/%s/%s/releases' % (
            plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO)
        releases = request.request_json(url,
                                        timeout=20,
                                        whitelist_status_code=404,
                                        validator=lambda x: type(x) == list)

        if releases is None:
            logger.warn('Could not get releases from GitHub.')
            return plexpy.LATEST_VERSION

        if plexpy.CONFIG.GIT_BRANCH == 'master':
            release = next((r for r in releases if not r['prerelease']),
                           releases[0])
        elif plexpy.CONFIG.GIT_BRANCH == 'beta':
            release = next(
                (r
                 for r in releases if not r['tag_name'].endswith('-nightly')),
                releases[0])
        elif plexpy.CONFIG.GIT_BRANCH == 'nightly':
            release = next((r for r in releases), releases[0])
        else:
            release = releases[0]

        plexpy.LATEST_RELEASE = release['tag_name']

        plexpy.NOTIFY_QUEUE.put({
            'notify_action': 'on_plexpyupdate',
            'plexpy_download_info': release,
            'plexpy_update_commit': plexpy.LATEST_VERSION,
            'plexpy_update_behind': plexpy.COMMITS_BEHIND
        })

        if auto_update:
            logger.info('Running automatic update.')
            plexpy.shutdown(restart=True, update=True)

    elif plexpy.COMMITS_BEHIND == 0:
        logger.info('Tautulli is up to date')

    return plexpy.LATEST_VERSION
示例#34
0
    def get_datatables_user_login(self,
                                  user_id=None,
                                  jwt_token=None,
                                  kwargs=None):
        default_return = {
            'recordsFiltered': 0,
            'recordsTotal': 0,
            'draw': 0,
            'data': []
        }

        if not session.allow_session_user(user_id):
            return default_return

        data_tables = datatables.DataTables()

        if session.get_session_user_id():
            custom_where = [[
                'user_login.user_id',
                session.get_session_user_id()
            ]]
        else:
            custom_where = [['user_login.user_id', user_id]] if user_id else []

        columns = [
            'user_login.id AS row_id', 'user_login.timestamp',
            'user_login.user_id', 'user_login.user', 'user_login.user_group',
            'user_login.ip_address', 'user_login.host',
            'user_login.user_agent', 'user_login.success', 'user_login.expiry',
            'user_login.jwt_token',
            '(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = "" \
                    THEN users.username ELSE users.friendly_name END) AS friendly_name'
        ]

        try:
            query = data_tables.ssp_query(
                table_name='user_login',
                columns=columns,
                custom_where=custom_where,
                group_by=[],
                join_types=['LEFT OUTER JOIN'],
                join_tables=['users'],
                join_evals=[['user_login.user_id', 'users.user_id']],
                kwargs=kwargs)
        except Exception as e:
            logger.warn(
                "Tautulli Users :: Unable to execute database query for get_datatables_user_login: %s."
                % e)
            return default_return

        results = query['result']

        rows = []
        for item in results:
            (os, browser) = httpagentparser.simple_detect(item['user_agent'])

            expiry = None
            current = False
            if item['jwt_token'] and item['expiry']:
                _expiry = helpers.iso_to_datetime(item['expiry'])
                if _expiry > arrow.now():
                    expiry = _expiry.strftime('%Y-%m-%d %H:%M:%S')
                current = (item['jwt_token'] == jwt_token)

            row = {
                'row_id': item['row_id'],
                'timestamp': item['timestamp'],
                'user_id': item['user_id'],
                'user_group': item['user_group'],
                'ip_address': item['ip_address'],
                'host': item['host'],
                'user_agent': item['user_agent'],
                'os': os,
                'browser': browser,
                'success': item['success'],
                'expiry': expiry,
                'current': current,
                'friendly_name': item['friendly_name'] or item['user']
            }

            rows.append(row)

        dict = {
            'recordsFiltered': query['filteredCount'],
            'recordsTotal': query['totalCount'],
            'data': session.friendly_name_to_username(rows),
            'draw': query['draw']
        }

        return dict
示例#35
0
def upload_blocks_from_communicator(comm_inst: 'OnionrCommunicatorDaemon'):
    """Accept a communicator instance + upload blocks from its upload queue."""
    """when inserting a block, we try to upload
     it to a few peers to add some deniability & increase functionality"""
    TIMER_NAME = "upload_blocks_from_communicator"

    session_manager: sessionmanager.BlockUploadSessionManager
    session_manager = comm_inst.shared_state.get(
        sessionmanager.BlockUploadSessionManager)
    tried_peers: UserID = []
    finishedUploads = []
    comm_inst.blocksToUpload = onionrcrypto.cryptoutils.random_shuffle(
        comm_inst.blocksToUpload)

    def remove_from_hidden(bl):
        sleep(60)
        try:
            comm_inst.shared_state.get_by_string(
                'PublicAPI').hideBlocks.remove(bl)
        except ValueError:
            pass

    if len(comm_inst.blocksToUpload) != 0:
        for bl in comm_inst.blocksToUpload:
            if not stringvalidators.validate_hash(bl):
                logger.warn('Requested to upload invalid block', terminal=True)
                comm_inst.decrementThreadCount(TIMER_NAME)
                return
            session = session_manager.add_session(bl)
            for _ in range(min(len(comm_inst.onlinePeers), 6)):
                try:
                    peer = onlinepeers.pick_online_peer(comm_inst)
                except onionrexceptions.OnlinePeerNeeded:
                    continue
                try:
                    session.peer_exists[peer]
                    continue
                except KeyError:
                    pass
                try:
                    if session.peer_fails[peer] > 3:
                        continue
                except KeyError:
                    pass
                if peer in tried_peers:
                    continue
                tried_peers.append(peer)
                url = f'http://{peer}/upload'
                try:
                    data = block.Block(bl).getRaw()
                except onionrexceptions.NoDataAvailable:
                    finishedUploads.append(bl)
                    break
                proxy_type = proxypicker.pick_proxy(peer)
                logger.info(
                    f"Uploading block {bl[:8]} to {peer}", terminal=True)
                resp = basicrequests.do_post_request(
                    url, data=data, proxyType=proxy_type,
                    content_type='application/octet-stream')
                if resp is not False:
                    if resp == 'success':
                        Thread(target=remove_from_hidden,
                               args=[bl], daemon=True).start()
                        session.success()
                        session.peer_exists[peer] = True
                    elif resp == 'exists':
                        session.success()
                        session.peer_exists[peer] = True
                    else:
                        session.fail()
                        session.fail_peer(peer)
                        comm_inst.getPeerProfileInstance(peer).addScore(-5)
                        logger.warn(
                           f'Failed to upload {bl[:8]}, reason: {resp}',
                           terminal=True)
                else:
                    session.fail()
        session_manager.clean_session()
    for x in finishedUploads:
        try:
            comm_inst.blocksToUpload.remove(x)

            comm_inst.shared_state.get_by_string(
                'PublicAPI').hideBlocks.remove(x)

        except ValueError:
            pass
    comm_inst.decrementThreadCount(TIMER_NAME)
def executeSoftwareQueryByPath(client, reg_path, prefix=''):
    '''
    Shell, str, str = '' -> list(list(str), str)
    @command: <prefix>reg query <reg_path>\Uninstall /S
    @command: reg_mam query <reg_path>\Uninstall /S
    '''
    ntcmdErrStr = 'Remote command returned 1(0x1)'
    non64BitOsErrStr = 'The system was unable to find the specified registry key or value'

    queryStr = ' query ' + reg_path + '\Uninstall /S'
    #First trying the default reg.exe(might not work on Win2k or NT)
    if len(prefix) > 0 and (not prefix.endswith('\\')):
        prefix += '\\'
    cmdRemoteAgent = prefix + 'reg' + queryStr
    buffer = client.execCmd(cmdRemoteAgent,
                            120000)  #@@CMD_PERMISION ntcmd protocol execution
    logger.debug('Outputting ', cmdRemoteAgent, ': ...')
    reg_mamRc = client.getLastCmdReturnCode()
    if (reg_mamRc != 0) or (buffer.find(ntcmdErrStr) != -1):
        if (reg_mamRc == 1) and (buffer.find('ERROR: More data is available.')
                                 != -1):
            errMsg = 'reg command returned \'More data is available\' error, not all software might be reported'
            logger.warn(errMsg)
            pass
        else:
            logger.debug(
                'reg query command ended unsuccessfully with return code:%d, error:%s'
                % (reg_mamRc, buffer))
            logger.debug(
                'Failed getting software info using default reg.exe trying the reg_mam.exe'
            )
            cmdRemote = 'reg_mam'
            localFile = CollectorsParameters.BASE_PROBE_MGR_DIR + CollectorsParameters.getDiscoveryResourceFolder(
            ) + CollectorsParameters.FILE_SEPARATOR + 'reg_mam.exe'
            remoteFile = client.copyFileIfNeeded(localFile)
            if not remoteFile:
                logger.warn('Failed copying %s' % cmdRemote)
                return [[], '']

            cmdRemoteAgent = remoteFile + queryStr

            buffer = client.execCmd(
                cmdRemoteAgent,
                120000)  #@@CMD_PERMISION ntcmd protocol execution
            regRc = client.getLastCmdReturnCode()
            if (regRc != 0) or (buffer.find(ntcmdErrStr) != -1):
                if (regRc == 1) and (
                        buffer.find('ERROR: More data is available.') != -1):
                    errMsg = 'reg_mam command returned \'More data is available\' error, not all software might be reported'
                    logger.warn(errMsg)
                    pass
                else:
                    if buffer.find(non64BitOsErrStr) == -1:
                        logger.debug(
                            'Failed getting software info, reg.exe ended with %d, error:%s'
                            % (regRc, buffer))
                    return [[], '']

    logger.debug('got software buffer from remote registry - parsing...')
    keys = buffer.split(reg_path)

    return [keys, buffer]
示例#37
0
def install_geoip_db():
    maxmind_url = 'http://geolite.maxmind.com/download/geoip/database/'
    geolite2_gz = 'GeoLite2-City.mmdb.gz'
    geolite2_md5 = 'GeoLite2-City.md5'
    geolite2_db = geolite2_gz[:-3]
    md5_checksum = ''

    temp_gz = os.path.join(plexpy.CONFIG.CACHE_DIR, geolite2_gz)
    geolite2_db = plexpy.CONFIG.GEOIP_DB or os.path.join(
        plexpy.DATA_DIR, geolite2_db)

    # Retrieve the GeoLite2 gzip file
    logger.debug(
        u"Tautulli Helpers :: Downloading GeoLite2 gzip file from MaxMind...")
    try:
        maxmind = urllib.URLopener()
        maxmind.retrieve(maxmind_url + geolite2_gz, temp_gz)
        md5_checksum = urllib2.urlopen(maxmind_url + geolite2_md5).read()
    except Exception as e:
        logger.error(
            u"Tautulli Helpers :: Failed to download GeoLite2 gzip file from MaxMind: %s"
            % e)
        return False

    # Extract the GeoLite2 database file
    logger.debug(u"Tautulli Helpers :: Extracting GeoLite2 database...")
    try:
        with gzip.open(temp_gz, 'rb') as gz:
            with open(geolite2_db, 'wb') as db:
                db.write(gz.read())
    except Exception as e:
        logger.error(
            u"Tautulli Helpers :: Failed to extract the GeoLite2 database: %s"
            % e)
        return False

    # Check MD5 hash for GeoLite2 database file
    logger.debug(
        u"Tautulli Helpers :: Checking MD5 checksum for GeoLite2 database...")
    try:
        hash_md5 = hashlib.md5()
        with open(geolite2_db, 'rb') as f:
            for chunk in iter(lambda: f.read(4096), b""):
                hash_md5.update(chunk)
        md5_hash = hash_md5.hexdigest()

        if md5_hash != md5_checksum:
            logger.error(
                u"Tautulli Helpers :: MD5 checksum doesn't match for GeoLite2 database. "
                "Checksum: %s, file hash: %s" % (md5_checksum, md5_hash))
            return False
    except Exception as e:
        logger.error(
            u"Tautulli Helpers :: Failed to generate MD5 checksum for GeoLite2 database: %s"
            % e)
        return False

    # Delete temportary GeoLite2 gzip file
    logger.debug(
        u"Tautulli Helpers :: Deleting temporary GeoLite2 gzip file...")
    try:
        os.remove(temp_gz)
    except Exception as e:
        logger.warn(
            u"Tautulli Helpers :: Failed to remove temporary GeoLite2 gzip file: %s"
            % e)

    logger.debug(
        u"Tautulli Helpers :: GeoLite2 database installed successfully.")
    plexpy.CONFIG.__setattr__('GEOIP_DB', geolite2_db)
    plexpy.CONFIG.write()

    return True
示例#38
0
def assemble(user_called=True, retry_with_zero_m4v=False):
    try:
        ass_json_file = pil.assemble_arg if pil.assemble_arg.endswith(
            ".json") else pil.assemble_arg + ".json"
        ass_mp4_file = os.path.join(
            pil.dl_path,
            os.path.basename(ass_json_file).replace("_downloads", "").replace(
                ".json", ".mp4"))
        ass_segment_dir = pil.assemble_arg if not pil.assemble_arg.endswith(
            ".json") else pil.assemble_arg.replace(".json", "")

        if pil.verbose:
            logger.plain("{}\n{}\n{}".format(ass_json_file, ass_mp4_file,
                                             ass_segment_dir))

        broadcast_info = {}
        if not os.path.isdir(ass_segment_dir) or not os.listdir(
                ass_segment_dir):
            logger.error(
                'The segment directory does not exist or does not contain any files: %s'
                % ass_segment_dir)
            logger.separator()
            return
        if not os.path.isfile(ass_json_file):
            logger.warn(
                "No matching json file found for the segment directory, trying to continue without it."
            )
            ass_stream_id = os.listdir(ass_segment_dir)[0].split('-')[0]
            broadcast_info['id'] = ass_stream_id
            broadcast_info['broadcast_status'] = "active"
            broadcast_info['segments'] = {}
        else:
            with open(ass_json_file) as info_file:
                try:
                    broadcast_info = json.load(info_file)
                except Exception as e:
                    logger.warn(
                        "Could not decode json file, trying to continue without it."
                    )
                    ass_stream_id = os.listdir(ass_segment_dir)[0].split(
                        '-')[0]
                    broadcast_info['id'] = ass_stream_id
                    broadcast_info['broadcast_status'] = "active"
                    broadcast_info['segments'] = {}

        if broadcast_info.get('broadcast_status', '') == 'post_live':
            logger.error(
                'Video segment files from replay downloads cannot be assembled.'
            )
            return

        stream_id = str(broadcast_info['id'])

        segment_meta = broadcast_info.get('segments', {})
        if segment_meta:
            all_segments = [
                os.path.join(ass_segment_dir, k)
                for k in broadcast_info['segments'].keys()
            ]
        else:
            all_segments = list(
                filter(
                    os.path.isfile,
                    glob.glob(
                        os.path.join(ass_segment_dir,
                                     '%s-*.m4v' % stream_id))))

        all_segments = sorted(all_segments, key=lambda x: _get_file_index(x))
        sources = []
        audio_stream_format = 'assembled_source_{0}_{1}_mp4.tmp'
        video_stream_format = 'assembled_source_{0}_{1}_m4a.tmp'
        video_stream = ''
        audio_stream = ''
        has_skipped_zero_m4v = False

        if not all_segments:
            logger.error(
                "No video segment files have been found in the specified folder."
            )
            logger.separator()
            return
        else:
            logger.info(
                "Assembling video segment files from specified folder: {}".
                format(ass_segment_dir))

        for segment in all_segments:
            if not os.path.isfile(segment.replace('.m4v', '.m4a')):
                logger.warn('Audio segment not found: {0!s}'.format(
                    segment.replace('.m4v', '.m4a')))
                continue

            if segment.endswith('-init.m4v'):
                logger.info('Replacing %s' % segment)
                segment = os.path.join(
                    os.path.dirname(os.path.realpath(__file__)), 'repair',
                    'init.m4v')

            if segment.endswith('-0.m4v') and not retry_with_zero_m4v:
                has_skipped_zero_m4v = True
                continue

            video_stream = os.path.join(
                ass_segment_dir,
                video_stream_format.format(stream_id, len(sources)))
            audio_stream = os.path.join(
                ass_segment_dir,
                audio_stream_format.format(stream_id, len(sources)))

            file_mode = 'ab'

            with open(video_stream,
                      file_mode) as outfile, open(segment, 'rb') as readfile:
                shutil.copyfileobj(readfile, outfile)

            with open(audio_stream, file_mode) as outfile, open(
                    segment.replace('.m4v', '.m4a'), 'rb') as readfile:
                shutil.copyfileobj(readfile, outfile)

        if audio_stream and video_stream:
            sources.append({'video': video_stream, 'audio': audio_stream})

        for n, source in enumerate(sources):
            ffmpeg_binary = os.getenv('FFMPEG_BINARY', 'ffmpeg')
            cmd = [
                ffmpeg_binary, '-loglevel', 'warning', '-y', '-i',
                source['audio'], '-i', source['video'], '-c:v', 'copy', '-c:a',
                'copy', ass_mp4_file
            ]
            #fnull = open(os.devnull, 'w')
            fnull = None
            exit_code = subprocess.call(cmd,
                                        stdout=fnull,
                                        stderr=subprocess.STDOUT)
            if exit_code != 0:
                logger.warn(
                    "FFmpeg exit code not '0' but '{:d}'.".format(exit_code))
                if has_skipped_zero_m4v and not retry_with_zero_m4v:
                    logger.binfo(
                        "*-0.m4v segment was detected but skipped, retrying to assemble video without "
                        "skipping it.")
                    os.remove(source['audio'])
                    os.remove(source['video'])
                    logger.separator()
                    assemble(user_called, retry_with_zero_m4v=True)
                    return
            else:
                logger.info('The video file has been generated: %s' %
                            os.path.basename(ass_mp4_file))
                os.remove(source['audio'])
                os.remove(source['video'])
            if user_called:
                logger.separator()
    except Exception as e:
        logger.error("An error occurred: {:s}".format(str(e)))
示例#39
0
def authenticate(username, password, force_use_login_args=False):
    ig_api = None
    try:
        if force_use_login_args:
            pil.ig_user = username
            pil.ig_pass = password
            pil.config_login_overridden = True
            logger.binfo(
                "Overriding configuration file login with -u and -p arguments."
            )
            logger.separator()
        cookie_file = os.path.join(os.path.dirname(pil.config_path),
                                   "{}.json".format(username))
        if not os.path.isfile(cookie_file):
            # settings file does not exist
            logger.warn('Unable to find cookie file: {0!s}'.format(
                os.path.basename(cookie_file)))
            logger.info('Creating a new one.')

            # login new
            ig_api = Client(
                username,
                password,
                on_login=lambda x: onlogin_callback(x, cookie_file),
                proxy=pil.proxy)
            # ig_api = Client(username, password, on_login=lambda x: onlogin_callback(x, cookie_file), proxy=pil.proxy)
            login(ig_api)
        else:
            with open(cookie_file) as file_data:
                cached_settings = json.load(file_data, object_hook=from_json)
            logger.info('Using settings file: {0!s}'.format(cookie_file))

            # device_id = cached_settings.get('device_id')
            # reuse auth cached_settings
            try:
                ig_api = Client(username,
                                password,
                                settings=cached_settings,
                                proxy=pil.proxy)

            except (ClientSentryBlockError, ClientChallengeRequiredError,
                    ClientCheckpointRequiredError, ClientCookieExpiredError,
                    ClientLoginError, ClientError) as e:
                logger.separator()
                logger.warn('Some sort of login exception!')
                if pil.verbose:
                    logger.plain(json.dumps(e.error_response))
                logger.error('Could not login: {:s}'.format(e.error_response))
                logger.error('{:s}'.format(
                    json.loads(e.error_response).get("message",
                                                     e.error_response)))
                logger.error('{:s}'.format(e.error_response))
                logger.separator()

                ig_api = Client(
                    username,
                    password,
                    on_login=lambda x: onlogin_callback(x, cookie_file),
                    proxy=pil.proxy)
                login(ig_api)
                logger.warn('successfully resolved error and logged back in!')

    except (ClientLoginError, ClientError) as e:
        logger.separator()
        if pil.verbose:
            logger.plain(json.dumps(e.error_response))
        logger.error('Could not login: {:s}'.format(e.error_response))
        logger.error('{:s}'.format(
            json.loads(e.error_response).get("message", e.error_response)))
        logger.error('{:s}'.format(e.error_response))
        logger.separator()
    except Exception as e:
        if pil.verbose:
            logger.plain(json.dumps(e))
        if str(e).startswith("unsupported pickle protocol"):
            logger.warn(
                "This cookie file is not compatible with Python {}.".format(
                    sys.version.split(' ')[0][0]))
            logger.warn(
                "Please delete your cookie file '{}.json' and try again.".
                format(username))
        else:
            logger.separator()
            logger.error('Unexpected exception: {:s}'.format(e))
        logger.separator()
    except KeyboardInterrupt:
        logger.separator()
        logger.warn("The user authentication has been aborted.")
        logger.separator()

    if ig_api:
        logger.info('Successfully logged into account: {:s}'.format(
            str(ig_api.authenticated_user_name)))
        if pil.show_cookie_expiry and not force_use_login_args:
            try:
                cookie_expiry = ig_api.cookie_jar.auth_expires
                logger.info('Cookie file expiry date: {:s}'.format(
                    datetime.datetime.fromtimestamp(cookie_expiry).strftime(
                        '%Y-%m-%d at %I:%M:%S %p')))
            except Exception as e:
                logger.warn(
                    'An error occurred while getting the cookie file expiry date: {:s}'
                    .format(str(e)))

        logger.separator()
        return ig_api
    else:
        return None
示例#40
0
    def startTor(self):
        '''
            Start Tor with onion service on port 80 & socks proxy on random port
        '''

        self.generateTorrc()

        if os.path.exists('./tor'):
            torBinary = './tor'
        elif os.path.exists('/usr/bin/tor'):
            torBinary = '/usr/bin/tor'
        else:
            torBinary = 'tor'

        try:
            tor = subprocess.Popen([torBinary, '-f', self.torConfigLocation],
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
        except FileNotFoundError:
            logger.fatal(
                "Tor was not found in your path or the Onionr directory. Please install Tor and try again."
            )
            sys.exit(1)
        else:
            # Test Tor Version
            torVersion = subprocess.Popen([torBinary, '--version'],
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE)
            for line in iter(torVersion.stdout.readline, b''):
                if 'Tor 0.2.' in line.decode():
                    logger.warn(
                        "Running 0.2.x Tor series, no support for v3 onion peers"
                    )
                    break
            torVersion.kill()

        # wait for tor to get to 100% bootstrap
        try:
            for line in iter(tor.stdout.readline, b''):
                if 'Bootstrapped 100%: Done' in line.decode():
                    break
                elif 'Opening Socks listener' in line.decode():
                    logger.debug(line.decode().replace('\n', ''))
            else:
                logger.fatal(
                    'Failed to start Tor. Try killing any other Tor processes owned by this user.'
                )
                return False
        except KeyboardInterrupt:
            logger.fatal("Got keyboard interrupt")
            return False

        logger.info('Finished starting Tor', timestamp=True)
        self.readyState = True

        myID = open('data/hs/hostname', 'r')
        self.myID = myID.read().replace('\n', '')
        myID.close()

        torPidFile = open('data/torPid.txt', 'w')
        torPidFile.write(str(tor.pid))
        torPidFile.close()

        return True
示例#41
0
def run():
    from websocket import create_connection

    if plexpy.CONFIG.PMS_SSL and plexpy.CONFIG.PMS_URL[:5] == 'https':
        uri = plexpy.CONFIG.PMS_URL.replace(
            'https://', 'wss://') + '/:/websockets/notifications'
        secure = ' secure'
    else:
        uri = 'ws://%s:%s/:/websockets/notifications' % (
            plexpy.CONFIG.PMS_IP, plexpy.CONFIG.PMS_PORT)
        secure = ''

    # Set authentication token (if one is available)
    if plexpy.CONFIG.PMS_TOKEN:
        header = ["X-Plex-Token: %s" % plexpy.CONFIG.PMS_TOKEN]
    else:
        header = []

    global ws_reconnect
    ws_reconnect = False
    ws_connected = False
    reconnects = 0

    # Try an open the websocket connection - if it fails after 15 retries fallback to polling
    while not ws_connected and reconnects <= 15:
        try:
            logger.info(
                u"PlexPy WebSocket :: Opening%s websocket, connection attempt %s."
                % (secure, str(reconnects + 1)))
            ws = create_connection(uri, header=header)
            reconnects = 0
            ws_connected = True
            logger.info(u"PlexPy WebSocket :: Ready")
        except IOError as e:
            logger.error(u"PlexPy WebSocket :: %s." % e)
            reconnects += 1
            time.sleep(5)

    while ws_connected:
        try:
            process(*receive(ws))

            # successfully received data, reset reconnects counter
            reconnects = 0
        except (websocket.WebSocketConnectionClosedException, Exception):
            if reconnects <= 15:
                reconnects += 1

                # Sleep 5 between connection attempts
                if reconnects > 1:
                    time.sleep(5)

                logger.warn(
                    u"PlexPy WebSocket :: Connection has closed, reconnection attempt %s."
                    % reconnects)
                try:
                    ws = create_connection(uri, header=header)
                except IOError as e:
                    logger.info(u"PlexPy WebSocket :: %s." % e)

            else:
                ws.shutdown()
                ws_connected = False
                break

        # Check if we recieved a restart notification and close websocket connection cleanly
        if ws_reconnect:
            logger.info(u"PlexPy WebSocket :: Reconnecting websocket...")
            ws.shutdown()
            ws_connected = False
            start_thread()

    if not ws_connected and not ws_reconnect:
        logger.error(
            u"PlexPy WebSocket :: Connection unavailable, falling back to polling."
        )
        plexpy.POLLING_FAILOVER = True
        plexpy.initialize_scheduler()

    logger.debug(u"PlexPy WebSocket :: Leaving thread.")
示例#42
0
    def processor(self):
        sendresponse = self.params['nzo_id']
        try:
            logger.info('sending now to %s' % self.sab_url)
            logger.info('parameters set to %s' % self.params)
            time.sleep(
                5
            )  #pause 5 seconds before monitoring just so it hits the queue
            h = requests.get(self.sab_url,
                             params=self.params['queue'],
                             verify=False)
        except Exception as e:
            logger.info('uh-oh: %s' % e)
            return {'status': False}
        else:
            queueresponse = h.json()
            logger.info('successfully queried the queue for status')
            try:
                queueinfo = queueresponse['queue']
                logger.info('queue: %s' % queueresponse)
                logger.info('Queue status : %s' % queueinfo['status'])
                logger.info('Queue mbleft : %s' % queueinfo['mbleft'])
                while any([
                        str(queueinfo['status']) == 'Downloading',
                        str(queueinfo['status']) == 'Idle'
                ]) and float(queueinfo['mbleft']) > 0:
                    logger.info('queue_params: %s' % self.params['queue'])
                    queue_resp = requests.get(self.sab_url,
                                              params=self.params['queue'],
                                              verify=False)
                    queueresp = queue_resp.json()
                    queueinfo = queueresp['queue']
                    logger.info('status: %s' % queueinfo['status'])
                    logger.info('mbleft: %s' % queueinfo['mbleft'])
                    logger.info('timeleft: %s' % queueinfo['timeleft'])
                    logger.info('eta: %s' % queueinfo['eta'])
                    time.sleep(5)
            except Exception as e:
                logger.warn('error: %s' % e)

            logger.info('File has now downloaded!')
            hist_params = {
                'mode': 'history',
                'category': mylar.CONFIG.SAB_CATEGORY,
                'failed': 0,
                'output': 'json',
                'apikey': mylar.CONFIG.SAB_APIKEY
            }
            hist = requests.get(self.sab_url, params=hist_params, verify=False)
            historyresponse = hist.json()
            #logger.info(historyresponse)
            histqueue = historyresponse['history']
            found = {'status': False}
            while found['status'] is False:
                try:
                    for hq in histqueue['slots']:
                        #logger.info('nzo_id: %s --- %s [%s]' % (hq['nzo_id'], sendresponse, hq['status']))
                        if hq['nzo_id'] == sendresponse and hq[
                                'status'] == 'Completed':
                            logger.info(
                                'found matching completed item in history. Job has a status of %s'
                                % hq['status'])
                            if os.path.isfile(hq['storage']):
                                logger.info('location found @ %s' %
                                            hq['storage'])
                                found = {
                                    'status':
                                    True,
                                    'name':
                                    re.sub('.nzb', '', hq['nzb_name']).strip(),
                                    'location':
                                    os.path.abspath(
                                        os.path.join(hq['storage'],
                                                     os.pardir)),
                                    'failed':
                                    False
                                }
                                break
                            else:
                                logger.info(
                                    'no file found where it should be @ %s - is there another script that moves things after completion ?'
                                    % hq['storage'])
                                break
                        elif hq['nzo_id'] == sendresponse and hq[
                                'status'] == 'Failed':
                            #get the stage / error message and see what we can do
                            stage = hq['stage_log']
                            for x in stage[0]:
                                if 'Failed' in x['actions'] and any([
                                        x['name'] == 'Unpack', x['name']
                                        == 'Repair'
                                ]):
                                    if 'moving' in x['actions']:
                                        logger.warn(
                                            'There was a failure in SABnzbd during the unpack/repair phase that caused a failure: %s'
                                            % x['actions'])
                                    else:
                                        logger.warn(
                                            'Failure occured during the Unpack/Repair phase of SABnzbd. This is probably a bad file: %s'
                                            % x['actions'])
                                        if mylar.FAILED_DOWNLOAD_HANDLING is True:
                                            found = {
                                                'status':
                                                True,
                                                'name':
                                                re.sub('.nzb', '',
                                                       hq['nzb_name']).strip(),
                                                'location':
                                                os.path.abspath(
                                                    os.path.join(
                                                        hq['storage'],
                                                        os.pardir)),
                                                'failed':
                                                True
                                            }
                                    break
                            break
                except Exception as e:
                    logger.warn('error %s' % e)
                    break

        return found
示例#43
0
    def get_datatables_list(self, kwargs=None, grouping=None):
        default_return = {
            'recordsFiltered': 0,
            'recordsTotal': 0,
            'draw': 0,
            'data': []
        }

        data_tables = datatables.DataTables()

        custom_where = [['users.deleted_user', 0]]

        if grouping is None:
            grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES

        if session.get_session_user_id():
            custom_where.append(
                ['users.user_id',
                 session.get_session_user_id()])

        if kwargs.get('user_id'):
            custom_where.append(['users.user_id', kwargs.get('user_id')])

        group_by = 'session_history.reference_id' if grouping else 'session_history.id'

        columns = [
            'users.id AS row_id', 'users.user_id', 'users.username',
            '(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = "" \
                    THEN users.username ELSE users.friendly_name END) AS friendly_name',
            'users.thumb AS user_thumb',
            'users.custom_avatar_url AS custom_thumb',
            'COUNT(DISTINCT %s) AS plays' % group_by,
            'SUM(CASE WHEN session_history.stopped > 0 THEN (session_history.stopped - session_history.started) \
                    ELSE 0 END) - SUM(CASE WHEN session_history.paused_counter IS NULL THEN 0 ELSE \
                    session_history.paused_counter END) AS duration',
            'MAX(session_history.started) AS last_seen',
            'MAX(session_history.id) AS history_row_id',
            'session_history_metadata.full_title AS last_played',
            'session_history.ip_address', 'session_history.platform',
            'session_history.player', 'session_history.rating_key',
            'session_history_metadata.media_type',
            'session_history_metadata.thumb',
            'session_history_metadata.parent_thumb',
            'session_history_metadata.grandparent_thumb',
            'session_history_metadata.parent_title',
            'session_history_metadata.year',
            'session_history_metadata.media_index',
            'session_history_metadata.parent_media_index',
            'session_history_metadata.live',
            'session_history_metadata.added_at',
            'session_history_metadata.originally_available_at',
            'session_history_metadata.guid',
            'session_history_media_info.transcode_decision',
            'users.do_notify AS do_notify',
            'users.keep_history AS keep_history',
            'users.allow_guest AS allow_guest', 'users.is_active AS is_active'
        ]
        try:
            query = data_tables.ssp_query(
                table_name='users',
                columns=columns,
                custom_where=custom_where,
                group_by=['users.user_id'],
                join_types=[
                    'LEFT OUTER JOIN', 'LEFT OUTER JOIN', 'LEFT OUTER JOIN'
                ],
                join_tables=[
                    'session_history', 'session_history_metadata',
                    'session_history_media_info'
                ],
                join_evals=[
                    ['session_history.user_id', 'users.user_id'],
                    ['session_history.id', 'session_history_metadata.id'],
                    ['session_history.id', 'session_history_media_info.id']
                ],
                kwargs=kwargs)
        except Exception as e:
            logger.warn(
                "Tautulli Users :: Unable to execute database query for get_list: %s."
                % e)
            return default_return

        users = query['result']

        rows = []
        for item in users:
            if item['media_type'] == 'episode' and item['parent_thumb']:
                thumb = item['parent_thumb']
            elif item['media_type'] == 'episode':
                thumb = item['grandparent_thumb']
            else:
                thumb = item['thumb']

            if item['custom_thumb'] and item['custom_thumb'] != item[
                    'user_thumb']:
                user_thumb = item['custom_thumb']
            elif item['user_thumb']:
                user_thumb = item['user_thumb']
            else:
                user_thumb = common.DEFAULT_USER_THUMB

            # Rename Mystery platform names
            platform = common.PLATFORM_NAME_OVERRIDES.get(
                item['platform'], item['platform'])

            row = {
                'row_id': item['row_id'],
                'user_id': item['user_id'],
                'username': item['username'],
                'friendly_name': item['friendly_name'],
                'user_thumb': user_thumb,
                'plays': item['plays'],
                'duration': item['duration'],
                'last_seen': item['last_seen'],
                'last_played': item['last_played'],
                'history_row_id': item['history_row_id'],
                'ip_address': item['ip_address'],
                'platform': platform,
                'player': item['player'],
                'rating_key': item['rating_key'],
                'media_type': item['media_type'],
                'thumb': thumb,
                'parent_title': item['parent_title'],
                'year': item['year'],
                'media_index': item['media_index'],
                'parent_media_index': item['parent_media_index'],
                'live': item['live'],
                'originally_available_at': item['originally_available_at'],
                'guid': item['guid'],
                'transcode_decision': item['transcode_decision'],
                'do_notify': helpers.checked(item['do_notify']),
                'keep_history': helpers.checked(item['keep_history']),
                'allow_guest': helpers.checked(item['allow_guest']),
                'is_active': item['is_active']
            }

            rows.append(row)

        dict = {
            'recordsFiltered': query['filteredCount'],
            'recordsTotal': query['totalCount'],
            'data': session.friendly_name_to_username(rows),
            'draw': query['draw']
        }

        return dict
示例#44
0
def _debug_execution_exception(e):
    logger.warn(str(e))
    if e.result and e.result.exception:
        logger.warn(e.result.exception.getMessage())
示例#45
0
def daemon(o_inst):
    '''
        Starts the Onionr communication daemon
    '''

    # remove runcheck if it exists
    if os.path.isfile('%s/.runcheck' % (o_inst.onionrCore.dataDir,)):
        logger.debug('Runcheck file found on daemon start, deleting in advance.')
        os.remove('%s/.runcheck' % (o_inst.onionrCore.dataDir,))

    Thread(target=api.API, args=(o_inst, o_inst.debug, onionr.API_VERSION)).start()
    Thread(target=api.PublicAPI, args=[o_inst.getClientApi()]).start()
    try:
        time.sleep(0)
    except KeyboardInterrupt:
        logger.debug('Got keyboard interrupt, shutting down...')
        _proper_shutdown(o_inst)

    apiHost = ''
    while apiHost == '':
        try:
            with open(o_inst.onionrCore.publicApiHostFile, 'r') as hostFile:
                apiHost = hostFile.read()
        except FileNotFoundError:
            pass
        time.sleep(0.5)
    #onionr.Onionr.setupConfig('data/', self = o_inst)

    if o_inst._developmentMode:
        logger.warn('DEVELOPMENT MODE ENABLED (NOT RECOMMENDED)', timestamp = False)
    net = NetController(o_inst.onionrCore.config.get('client.public.port', 59497), apiServerIP=apiHost)
    logger.debug('Tor is starting...')
    if not net.startTor():
        o_inst.onionrUtils.localCommand('shutdown')
        sys.exit(1)
    if len(net.myID) > 0 and o_inst.onionrCore.config.get('general.security_level', 1) == 0:
        logger.debug('Started .onion service: %s' % (logger.colors.underline + net.myID))
    else:
        logger.debug('.onion service disabled')
    logger.debug('Using public key: %s' % (logger.colors.underline + o_inst.onionrCore._crypto.pubKey))

    try:
        time.sleep(1)
    except KeyboardInterrupt:
        _proper_shutdown(o_inst)

    o_inst.onionrCore.torPort = net.socksPort
    communicatorThread = Thread(target=communicator.startCommunicator, args=(o_inst, str(net.socksPort)))
    communicatorThread.start()
    
    while o_inst.communicatorInst is None:
        time.sleep(0.1)

    # print nice header thing :)
    if o_inst.onionrCore.config.get('general.display_header', True):
        o_inst.header()

    # print out debug info
    o_inst.version(verbosity = 5, function = logger.debug)
    logger.debug('Python version %s' % platform.python_version())

    logger.debug('Started communicator.')

    events.event('daemon_start', onionr = o_inst)
    while True:
        try:
            time.sleep(3)
        except KeyboardInterrupt:
            o_inst.communicatorInst.shutdown = True
        finally:
            # Debug to print out used FDs (regular and net)
            #proc = psutil.Process()
            #print('api-files:',proc.open_files(), len(psutil.net_connections()))
            # Break if communicator process ends, so we don't have left over processes
            if o_inst.communicatorInst.shutdown:
                break
            if o_inst.killed:
                break # Break out if sigterm for clean exit

    signal.signal(signal.SIGINT, _ignore_sigint)
    o_inst.onionrCore.daemonQueueAdd('shutdown')
    o_inst.onionrUtils.localCommand('shutdown')

    net.killTor()
    time.sleep(3)
    o_inst.deleteRunFiles()
    return
示例#46
0
文件: flux_map.py 项目: CelsoFCF/new
def main():
    parser = argparse.ArgumentParser(description='Script to create flux maps.')
    parser.add_argument(
        'inputfile',
        help='''Simulation results to use as input. '''
        '''Supports retrieving files from EOS via the XRootD protocol.''')
    parser.add_argument(
        '-o',
        '--outputfile',
        default='flux_map.root',
        help='''File to write the flux maps to. '''
        '''Will be recreated if it already exists.''')
    args = parser.parse_args()
    f = r.TFile.Open(args.outputfile, 'recreate')
    f.cd()
    maxpt = 10. * u.GeV
    maxp = 360. * u.GeV
    h = {}

    # Define histograms
    for nplane in range(0, 23):
        ut.bookHist(h, 'NuTauMu_all_{}'.format(nplane),
                    'Rpc_{};x[cm];y[cm]'.format(
                        nplane), 100, -300, +300, 100, -300,
                    300)
        ut.bookHist(h, 'NuTauMu_mu_{}'.format(nplane),
                    'Rpc_{};x[cm];y[cm]'.format(
                        nplane), 100, -300, +300, 100, -300,
                    300)
    for suffix, title in [('mu', '#mu#pm hits'), ('all', 'All hits')]:
        ut.bookHist(h, 'muon_tiles_{}'.format(suffix),
                    '{};x[cm];y[cm]'.format(title), 200, -1000, +1000, 90,
                    -900, 900)
        ut.bookHist(h, 'muon_bars_x_{}'.format(suffix),
                    '{};x[cm];y[cm]'.format(title), 2, -300, +300, 240, -600,
                    600)
        ut.bookHist(h, 'muon_bars_y_{}'.format(suffix),
                    '{};x[cm];y[cm]'.format(title), 120, -300, +300, 4, -600,
                    600)
        ut.bookHist(h, 'timing_{}'.format(suffix),
                    '{};x[cm];y[cm]'.format(title), 3, -252, +252, 167, -501,
                    501)
        ut.bookHist(h, 'TargetTracker_{}'.format(suffix),
                    '{};x[cm];y[cm]'.format(title), 120, -60, 60, 120, -60,
                    60)
        ut.bookHist(h, 'TargetTracker_yvsz_{}'.format(suffix),
                    '{};z[cm];y[cm]'.format(
                        title), 400, -3300, -2900, 120, -60,
                    60)
        ut.bookHist(h, 'TargetTracker_xvsz_{}'.format(suffix),
                    '{};z[cm];x[cm]'.format(
                        title), 400, -3300, -2900, 120, -60,
                    60)
        ut.bookHist(h, 'NuTauMu_{}'.format(suffix),
                    '{};x[cm];y[cm]'.format(title), 100, -300, +300, 100, -300,
                    300)
        ut.bookHist(h, 'NuTauMu_yvsz_{}'.format(suffix),
                    '{};z[cm];y[cm]'.format(
                        title), 200, -2680, -2480, 600, -300,
                    300)
        ut.bookHist(h, 'NuTauMu_xvsz_{}'.format(suffix),
                    '{};z[cm];x[cm]'.format(
                        title), 200, -2680, -2480, 600, -300,
                    300)
        ut.bookHist(h, 'ECAL_TP_{}'.format(suffix),
                    '{};x[cm];y[cm]'.format(title), 167, -501, +501, 334,
                    -1002, 1002)
        ut.bookHist(h, 'ECAL_Alt_{}'.format(suffix),
                    '{};x[cm];y[cm]'.format(title), 50, -500, +500, 100, -1000,
                    1000)
        ut.bookHist(h, 'SBT_Liquid_{}'.format(suffix),
                    '{};z[cm];#phi'.format(title), 100, -3000, +3000, 100,
                    -r.TMath.Pi(), r.TMath.Pi())
        ut.bookHist(h, 'SBT_Plastic_{}'.format(suffix),
                    '{};z[cm];#phi'.format(title), 100, -3000, +3000, 100,
                    -r.TMath.Pi(), r.TMath.Pi())
        for station in range(1, 5):
            ut.bookHist(h, 'T{}_{}'.format(station, suffix),
                        '{};x[cm];y[cm]'.format(title), 10, -250, +250, 20,
                        -500, 500)

    ut.bookHist(h, 'NuTauMu_mu_p', '#mu#pm;p[GeV];', 100, 0, maxp)
    ut.bookHist(h, 'NuTauMu_mu_pt', '#mu#pm;p_t[GeV];', 100, 0,
                maxpt)
    ut.bookHist(h, 'NuTauMu_mu_ppt', '#mu#pm;p[GeV];p_t[GeV];',
                100, 0, maxp, 100, 0, maxpt)
    ut.bookHist(h, 'NuTauMu_all_p', '#mu#pm;p[GeV];', 100, 0, maxp)
    ut.bookHist(h, 'NuTauMu_all_pt', '#mu#pm;p_t[GeV];', 100, 0,
                maxpt)
    ut.bookHist(h, 'NuTauMu_all_ppt', '#mu#pm;p[GeV];p_t[GeV];',
                100, 0, maxp, 100, 0, maxpt)

    for suffix in ['', '_original']:
        ut.bookHist(h, 'mu_p{}'.format(suffix), '#mu#pm;p[GeV];', 100, 0, maxp)
        ut.bookHist(h, 'mu_pt{}'.format(suffix), '#mu#pm;p_t[GeV];', 100, 0,
                    maxpt)
        ut.bookHist(h, 'mu_ppt{}'.format(suffix), '#mu#pm;p[GeV];p_t[GeV];',
                    100, 0, maxp, 100, 0, maxpt)
    ut.bookHist(h, 'ECAL_TP_e', 'e#pm with E#geq 250 MeV;x[cm];y[cm]', 167,
                -501, +501, 334, -1002, 1002)
    ut.bookHist(h, 'ECAL_Alt_e', 'e#pm with E#geq 250 MeV;x[cm];y[cm]', 50,
                -500, +500, 100, -1000, 1000)
    ut.bookHist(h, 'ECAL_TP_gamma', '#gamma;x[cm];y[cm]', 167, -501, +501, 334,
                -1002, 1002)
    ut.bookHist(h, 'ECAL_Alt_gamma', '#gamma;x[cm];y[cm]', 50, -500, +500, 100,
                -1000, 1000)
    ut.bookHist(h, 'ECAL_e_E', 'e#pm;E[GeV/c^{2}];', 100, 0, 1)
    ut.bookHist(h, 'ECAL_gamma_E', '#gamma;E[GeV/c^{2}];', 100, 0, 1)
    ch = r.TChain('cbmsim')
    ch.Add(args.inputfile)
    n = ch.GetEntries()
    log.info(n)
    i = 0
    for event in ch:
        if i % 10000 == 0:
            log.info('{}/{}'.format(i, n))
        i += 1
        muon = False
        for hit in event.strawtubesPoint:
            if hit:
                if not hit.GetEnergyLoss() > 0:
                    continue
                trid = hit.GetTrackID()
                assert trid > 0
                weight = event.MCTrack[trid].GetWeight()
                x = hit.GetX()
                y = hit.GetY()
                z = hit.GetZ()
                px = hit.GetPx()
                py = hit.GetPy()
                pz = hit.GetPz()
                pt = np.hypot(px, py)
                P = np.hypot(pz, pt)
                pid = hit.PdgCode()
                assert pid not in [12, -12, 14, -14, 16, -16]
                detector_ID = hit.GetDetectorID()
                station = detector_ID // 10000000
                h['T{}_all'.format(station)].Fill(x, y, weight)
                if abs(pid) == 13:
                    muon = True
                    muonid = trid
                    h['T{}_mu'.format(station)].Fill(x, y, weight)
                    h['mu_p'].Fill(P, weight)
                    h['mu_pt'].Fill(pt, weight)
                    h['mu_ppt'].Fill(P, pt, weight)
        for hit in event.EcalPoint:
            if hit:
                if not hit.GetEnergyLoss() > 0:
                    continue
                trid = hit.GetTrackID()
                assert trid > 0
                weight = event.MCTrack[trid].GetWeight()
                x = hit.GetX()
                y = hit.GetY()
                px = hit.GetPx()
                py = hit.GetPy()
                pz = hit.GetPz()
                pt = np.hypot(px, py)
                P = np.hypot(pz, pt)
                pid = hit.PdgCode()
                if pid in [12, -12, 14, -14, 16, -16]:
                    continue
                h['ECAL_TP_all'].Fill(x, y, weight)
                h['ECAL_Alt_all'].Fill(x, y, weight)
                if abs(pid) == 13:
                    muon = True
                    muonid = trid
                    h['mu_p'].Fill(P, weight)
                    h['mu_pt'].Fill(pt, weight)
                    h['mu_ppt'].Fill(P, pt, weight)
                    h['ECAL_TP_mu'].Fill(x, y, weight)
                    h['ECAL_Alt_mu'].Fill(x, y, weight)
                elif abs(pid) == 11:
                    Esq = px ** 2 + py ** 2 + pz ** 2 + 0.000511 ** 2
                    h['ECAL_e_E'].Fill(np.sqrt(Esq), weight)
                    h['ECAL_TP_e'].Fill(x, y, weight)
                    h['ECAL_Alt_e'].Fill(x, y, weight)
                elif abs(pid) == 22:
                    Esq = px ** 2 + py ** 2 + pz ** 2
                    h['ECAL_gamma_E'].Fill(np.sqrt(Esq), weight)
                    h['ECAL_TP_gamma'].Fill(x, y, weight)
                    h['ECAL_Alt_gamma'].Fill(x, y, weight)
        for hit in event.TTPoint:
            if hit:
                if not hit.GetEnergyLoss() > 0:
                    continue
                trid = hit.GetTrackID()
                assert trid > 0
                detID = hit.GetDetectorID()
                weight = event.MCTrack[trid].GetWeight()
                x = hit.GetX()
                y = hit.GetY()
                z = hit.GetZ()
                px = hit.GetPx()
                py = hit.GetPy()
                pz = hit.GetPz()
                pt = np.hypot(px, py)
                P = np.hypot(pz, pt)
                pid = hit.PdgCode()
                assert pid not in [12, -12, 14, -14, 16, -16]
                if detID == 0:
                    h['TargetTracker_all'].Fill(x, y, weight)
                h['TargetTracker_xvsz_all'].Fill(z, x, weight)
                h['TargetTracker_yvsz_all'].Fill(z, y, weight)
                if abs(pid) == 13:
                    muon = True
                    muonid = trid
                    h['mu_p'].Fill(P, weight)
                    h['mu_pt'].Fill(pt, weight)
                    h['mu_ppt'].Fill(P, pt, weight)
                    if detID == 0:
                        h['TargetTracker_mu'].Fill(x, y, weight)
                    h['TargetTracker_xvsz_mu'].Fill(z, x, weight)
                    h['TargetTracker_yvsz_mu'].Fill(z, y, weight)
        for hit in event.ShipRpcPoint:
            if hit:
                if not hit.GetEnergyLoss() > 0:
                    continue
                trid = hit.GetTrackID()
                assert trid > 0
                detID = hit.GetDetectorID()
                nplane = detID - 10000
                weight = event.MCTrack[trid].GetWeight()
                x = hit.GetX()
                y = hit.GetY()
                z = hit.GetZ()
                px = hit.GetPx()
                py = hit.GetPy()
                pz = hit.GetPz()
                pt = np.hypot(px, py)
                P = np.hypot(pz, pt)
                pid = hit.PdgCode()
                assert pid not in [12, -12, 14, -14, 16, -16]
                h['NuTauMu_all'].Fill(x, y, weight)
                if nplane >= 0:
                    h['NuTauMu_all_{}'.format(nplane)].Fill(x, y, weight)
                h['NuTauMu_xvsz_all'].Fill(z, x, weight)
                h['NuTauMu_yvsz_all'].Fill(z, y, weight)
                if detID == 10000:
                    h['NuTauMu_all_p'].Fill(P, weight)
                    h['NuTauMu_all_pt'].Fill(pt, weight)
                    h['NuTauMu_all_ppt'].Fill(P, pt, weight)
                if abs(pid) == 13:
                    muon = True
                    muonid = trid
                    h['mu_p'].Fill(P, weight)
                    h['mu_pt'].Fill(pt, weight)
                    h['mu_ppt'].Fill(P, pt, weight)
                    h['NuTauMu_mu'].Fill(x, y, weight)
                    if nplane >= 0:
                        # fill the histogram corresponding to nplane
                        h['NuTauMu_mu_{}'.format(nplane)].Fill(x, y, weight)
                    if detID == 10000:
                        h['NuTauMu_mu_p'].Fill(P, weight)
                        h['NuTauMu_mu_pt'].Fill(pt, weight)
                        h['NuTauMu_mu_ppt'].Fill(P, pt, weight)
                    h['NuTauMu_xvsz_mu'].Fill(z, x, weight)
                    h['NuTauMu_yvsz_mu'].Fill(z, y, weight)
        for hit in event.TimeDetPoint:
            if hit:
                if not hit.GetEnergyLoss() > 0:
                    continue
                trid = hit.GetTrackID()
                assert trid > 0
                weight = event.MCTrack[trid].GetWeight()
                x = hit.GetX()
                y = hit.GetY()
                z = hit.GetZ()
                px = hit.GetPx()
                py = hit.GetPy()
                pz = hit.GetPz()
                pt = np.hypot(px, py)
                P = np.hypot(pz, pt)
                pid = hit.PdgCode()
                assert pid not in [12, -12, 14, -14, 16, -16]
                h['timing_all'].Fill(x, y, weight)
                if abs(pid) == 13:
                    muon = True
                    muonid = trid
                    h['mu_p'].Fill(P, weight)
                    h['mu_pt'].Fill(pt, weight)
                    h['mu_ppt'].Fill(P, pt, weight)
                    h['timing_mu'].Fill(x, y, weight)
        for hit in event.muonPoint:
            if hit:
                if not hit.GetEnergyLoss() > 0:
                    continue
                trid = hit.GetTrackID()
                assert trid > 0
                weight = event.MCTrack[trid].GetWeight()
                x = hit.GetX()
                y = hit.GetY()
                px = hit.GetPx()
                py = hit.GetPy()
                pz = hit.GetPz()
                pt = np.hypot(px, py)
                P = np.hypot(pz, pt)
                pid = hit.PdgCode()
                assert pid not in [12, -12, 14, -14, 16, -16]
                h['muon_tiles_all'].Fill(x, y, weight)
                h['muon_bars_x_all'].Fill(x, y, weight)
                h['muon_bars_y_all'].Fill(x, y, weight)
                if abs(pid) == 13:
                    muon = True
                    muonid = trid
                    h['mu_p'].Fill(P, weight)
                    h['mu_pt'].Fill(pt, weight)
                    h['mu_ppt'].Fill(P, pt, weight)
                    h['muon_tiles_mu'].Fill(x, y, weight)
                    h['muon_bars_y_mu'].Fill(x, y, weight)
                    h['muon_bars_x_mu'].Fill(x, y, weight)
        for hit in event.vetoPoint:
            if hit:
                if not hit.GetEnergyLoss() > 0:
                    continue
                trid = hit.GetTrackID()
                assert trid > 0
                weight = event.MCTrack[trid].GetWeight()
                x = hit.GetX()
                y = hit.GetY()
                z = hit.GetZ()
                px = hit.GetPx()
                py = hit.GetPy()
                pz = hit.GetPz()
                pt = np.hypot(px, py)
                P = np.hypot(pz, pt)
                pid = hit.PdgCode()
                detector_ID = hit.GetDetectorID()
                assert pid not in [12, -12, 14, -14, 16, -16]
                phi = r.TMath.ATan2(y, x)
                if 99999 < detector_ID < 999999:
                    h['SBT_Liquid_all'].Fill(z, phi, weight)
                    if abs(pid) == 13:
                        muon = True
                        muonid = trid
                        h['mu_p'].Fill(P, weight)
                        h['mu_pt'].Fill(pt, weight)
                        h['mu_ppt'].Fill(P, pt, weight)
                        h['SBT_Liquid_mu'].Fill(z, phi, weight)
                    continue
                elif detector_ID > 999999:
                    h['SBT_Plastic_all'].Fill(z, phi, weight)
                    if abs(pid) == 13:
                        muon = True
                        muonid = trid
                        h['mu_p'].Fill(P, weight)
                        h['mu_pt'].Fill(pt, weight)
                        h['mu_ppt'].Fill(P, pt, weight)
                        h['SBT_Plastic_mu'].Fill(z, phi, weight)
                    continue
                log.warn('Unidentified vetoPoint.')
        if muon:
            original_muon = event.MCTrack[muonid]
            weight = original_muon.GetWeight()
            h['mu_p_original'].Fill(original_muon.GetP(), weight)
            h['mu_pt_original'].Fill(original_muon.GetPt(), weight)
            h['mu_ppt_original'].Fill(original_muon.GetP(),
                                      original_muon.GetPt(), weight)
            # NOTE: muons are counted several times if they create several hits
            #       But the original muon is only counted once.
    log.info('Event loop done')
    for key in h:
        classname = h[key].Class().GetName()
        if 'TH' in classname or 'TP' in classname:
            h[key].Write()
    f.Close()
示例#47
0
def GetSeriesYears(dom):
    #used by the 'add a story arc' option to individually populate the Series Year for each series within the given arc.
    #series year is required for alot of functionality.
    series = dom.getElementsByTagName('volume')
    tempseries = {}
    serieslist = []
    for dm in series:
        try:
            totids = len(dm.getElementsByTagName('id'))
            idc = 0
            while (idc < totids):
                if dm.getElementsByTagName(
                        'id')[idc].parentNode.nodeName == 'volume':
                    tempseries['ComicID'] = dm.getElementsByTagName(
                        'id')[idc].firstChild.wholeText
                idc += 1
        except:
            logger.warn(
                'There was a problem retrieving a comicid for a series within the arc. This will have to manually corrected most likely.'
            )
            tempseries['ComicID'] = 'None'

        tempseries['Series'] = 'None'
        tempseries['Publisher'] = 'None'
        try:
            totnames = len(dm.getElementsByTagName('name'))
            namesc = 0
            while (namesc < totnames):
                if dm.getElementsByTagName(
                        'name')[namesc].parentNode.nodeName == 'volume':
                    tempseries['Series'] = dm.getElementsByTagName(
                        'name')[namesc].firstChild.wholeText
                elif dm.getElementsByTagName(
                        'name')[namesc].parentNode.nodeName == 'publisher':
                    tempseries['Publisher'] = dm.getElementsByTagName(
                        'name')[namesc].firstChild.wholeText
                namesc += 1
        except:
            logger.warn(
                'There was a problem retrieving a Series Name or Publisher for a series within the arc. This will have to manually corrected.'
            )

        try:
            tempseries['SeriesYear'] = dm.getElementsByTagName(
                'start_year')[0].firstChild.wholeText
        except:
            logger.warn(
                'There was a problem retrieving the start year for a particular series within the story arc.'
            )
            tempseries['SeriesYear'] = '0000'

        #cause you know, dufus'...
        if tempseries['SeriesYear'][-1:] == '-':
            tempseries['SeriesYear'] = tempseries['SeriesYear'][:-1]

        desdeck = 0
        tempseries['Volume'] = 'None'

        #the description field actually holds the Volume# - so let's grab it
        try:
            descchunk = dm.getElementsByTagName(
                'description')[0].firstChild.wholeText
            comic_desc = drophtml(descchunk)
            desdeck += 1
        except:
            comic_desc = 'None'

        #sometimes the deck has volume labels
        try:
            deckchunk = dm.getElementsByTagName('deck')[0].firstChild.wholeText
            comic_deck = deckchunk
            desdeck += 1
        except:
            comic_deck = 'None'

        while (desdeck > 0):
            if desdeck == 1:
                if comic_desc == 'None':
                    comicDes = comic_deck[:30]
                else:
                    #extract the first 60 characters
                    comicDes = comic_desc[:60].replace('New 52', '')
            elif desdeck == 2:
                #extract the characters from the deck
                comicDes = comic_deck[:30].replace('New 52', '')
            else:
                break

            i = 0
            while (i < 2):
                if 'volume' in comicDes.lower():
                    #found volume - let's grab it.
                    v_find = comicDes.lower().find('volume')
                    #arbitrarily grab the next 10 chars (6 for volume + 1 for space + 3 for the actual vol #)
                    #increased to 10 to allow for text numbering (+5 max)
                    #sometimes it's volume 5 and ocassionally it's fifth volume.
                    if i == 0:
                        vfind = comicDes[v_find:v_find +
                                         15]  #if it's volume 5 format
                        basenums = {
                            'zero': '0',
                            'one': '1',
                            'two': '2',
                            'three': '3',
                            'four': '4',
                            'five': '5',
                            'six': '6',
                            'seven': '7',
                            'eight': '8',
                            'nine': '9',
                            'ten': '10',
                            'i': '1',
                            'ii': '2',
                            'iii': '3',
                            'iv': '4',
                            'v': '5'
                        }
                        logger.fdebug('volume X format - ' + str(i) + ': ' +
                                      vfind)
                    else:
                        vfind = comicDes[:
                                         v_find]  # if it's fifth volume format
                        basenums = {
                            'zero': '0',
                            'first': '1',
                            'second': '2',
                            'third': '3',
                            'fourth': '4',
                            'fifth': '5',
                            'sixth': '6',
                            'seventh': '7',
                            'eighth': '8',
                            'nineth': '9',
                            'tenth': '10',
                            'i': '1',
                            'ii': '2',
                            'iii': '3',
                            'iv': '4',
                            'v': '5'
                        }
                        logger.fdebug('X volume format - ' + str(i) + ': ' +
                                      vfind)
                    volconv = ''
                    for nums in basenums:
                        if nums in vfind.lower():
                            sconv = basenums[nums]
                            vfind = re.sub(nums, sconv, vfind.lower())
                            break
                    #logger.info('volconv: ' + str(volconv))

                    if i == 0:
                        volthis = vfind.lower().find('volume')
                        volthis = volthis + 6  # add on the actual word to the position so that we can grab the subsequent digit
                        vfind = vfind[volthis:volthis +
                                      4]  # grab the next 4 characters ;)
                    elif i == 1:
                        volthis = vfind.lower().find('volume')
                        vfind = vfind[volthis - 4:
                                      volthis]  # grab the next 4 characters ;)

                    if '(' in vfind:
                        #bracket detected in versioning'
                        vfindit = re.findall('[^()]+', vfind)
                        vfind = vfindit[0]
                    vf = re.findall('[^<>]+', vfind)
                    try:
                        ledigit = re.sub("[^0-9]", "", vf[0])
                        if ledigit != '':
                            tempseries['Volume'] = ledigit
                            logger.fdebug(
                                "Volume information found! Adding to series record : volume "
                                + tempseries['Volume'])
                            break
                    except:
                        pass

                    i += 1
                else:
                    i += 1

            if tempseries['Volume'] == 'None':
                logger.fdebug('tempseries[Volume]:' +
                              str(tempseries['Volume']))
                desdeck -= 1
            else:
                break

        serieslist.append({
            "ComicID": tempseries['ComicID'],
            "ComicName": tempseries['Series'],
            "SeriesYear": tempseries['SeriesYear'],
            "Publisher": tempseries['Publisher'],
            "Volume": tempseries['Volume']
        })

    return serieslist
示例#48
0
def UpdateDates(dom):
    issues = dom.getElementsByTagName('issue')
    tempissue = {}
    issuelist = []
    for dm in issues:
        tempissue['ComicID'] = 'None'
        tempissue['IssueID'] = 'None'
        try:
            totids = len(dm.getElementsByTagName('id'))
            idc = 0
            while (idc < totids):
                if dm.getElementsByTagName(
                        'id')[idc].parentNode.nodeName == 'volume':
                    tempissue['ComicID'] = dm.getElementsByTagName(
                        'id')[idc].firstChild.wholeText
                if dm.getElementsByTagName(
                        'id')[idc].parentNode.nodeName == 'issue':
                    tempissue['IssueID'] = dm.getElementsByTagName(
                        'id')[idc].firstChild.wholeText
                idc += 1
        except:
            logger.warn(
                'There was a problem retrieving a comicid/issueid for the given issue. This will have to manually corrected most likely.'
            )

        tempissue['SeriesTitle'] = 'None'
        tempissue['IssueTitle'] = 'None'
        try:
            totnames = len(dm.getElementsByTagName('name'))
            namesc = 0
            while (namesc < totnames):
                if dm.getElementsByTagName(
                        'name')[namesc].parentNode.nodeName == 'issue':
                    tempissue['IssueTitle'] = dm.getElementsByTagName(
                        'name')[namesc].firstChild.wholeText
                elif dm.getElementsByTagName(
                        'name')[namesc].parentNode.nodeName == 'volume':
                    tempissue['SeriesTitle'] = dm.getElementsByTagName(
                        'name')[namesc].firstChild.wholeText
                namesc += 1
        except:
            logger.warn(
                'There was a problem retrieving the Series Title / Issue Title for a series within the arc. This will have to manually corrected.'
            )

        try:
            tempissue['CoverDate'] = dm.getElementsByTagName(
                'cover_date')[0].firstChild.wholeText
        except:
            tempissue['CoverDate'] = '0000-00-00'
        try:
            tempissue['StoreDate'] = dm.getElementsByTagName(
                'store_date')[0].firstChild.wholeText
        except:
            tempissue['StoreDate'] = '0000-00-00'
        try:
            tempissue['IssueNumber'] = dm.getElementsByTagName(
                'issue_number')[0].firstChild.wholeText
        except:
            logger.fdebug(
                'No Issue Number available - Trade Paperbacks, Graphic Novels and Compendiums are not supported as of yet.'
            )
            tempissue['IssueNumber'] = 'None'
        try:
            tempissue['date_last_updated'] = dm.getElementsByTagName(
                'date_last_updated')[0].firstChild.wholeText
        except:
            tempissue['date_last_updated'] = '0000-00-00'

        issuelist.append({
            'ComicID': tempissue['ComicID'],
            'IssueID': tempissue['IssueID'],
            'SeriesTitle': tempissue['SeriesTitle'],
            'IssueTitle': tempissue['IssueTitle'],
            'CoverDate': tempissue['CoverDate'],
            'StoreDate': tempissue['StoreDate'],
            'IssueNumber': tempissue['IssueNumber'],
            'Date_Last_Updated': tempissue['date_last_updated']
        })

    return issuelist
示例#49
0
def GetComicInfo(comicid, dom, safechk=None):
    if safechk is None:
        #safetycheck when checking comicvine. If it times out, increment the chk on retry attempts up until 5 tries then abort.
        safechk = 1
    elif safechk > 4:
        logger.error(
            'Unable to add / refresh the series due to inablity to retrieve data from ComicVine. You might want to try abit later and/or make sure ComicVine is up.'
        )
        return
    #comicvine isn't as up-to-date with issue counts..
    #so this can get really buggered, really fast.
    tracks = dom.getElementsByTagName('issue')
    try:
        cntit = dom.getElementsByTagName(
            'count_of_issues')[0].firstChild.wholeText
    except:
        cntit = len(tracks)
    trackcnt = len(tracks)
    logger.fdebug("number of issues I counted: " + str(trackcnt))
    logger.fdebug("number of issues CV says it has: " + str(cntit))
    # if the two don't match, use trackcnt as count_of_issues might be not upto-date for some reason
    if int(trackcnt) != int(cntit):
        cntit = trackcnt
        vari = "yes"
    else:
        vari = "no"
    logger.fdebug("vari is set to: " + str(vari))
    #if str(trackcnt) != str(int(cntit)+2):
    #    cntit = int(cntit) + 1
    comic = {}
    comicchoice = []
    cntit = int(cntit)
    #retrieve the first xml tag (<tag>data</tag>)
    #that the parser finds with name tagName:
    # to return the parent name of the <name> node : dom.getElementsByTagName('name')[0].parentNode.nodeName
    # where [0] denotes the number of the name field(s)
    # where nodeName denotes the parentNode : ComicName = results, publisher = publisher, issues = issue
    try:
        names = len(dom.getElementsByTagName('name'))
        n = 0
        comic[
            'ComicPublisher'] = 'Unknown'  #set this to a default value here so that it will carry through properly
        while (n < names):
            if dom.getElementsByTagName(
                    'name')[n].parentNode.nodeName == 'results':
                try:
                    comic['ComicName'] = dom.getElementsByTagName(
                        'name')[n].firstChild.wholeText
                    comic['ComicName'] = comic['ComicName'].rstrip()
                except:
                    logger.error(
                        'There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible AND that you have provided your OWN ComicVine API key.'
                    )
                    return

            elif dom.getElementsByTagName(
                    'name')[n].parentNode.nodeName == 'publisher':
                try:
                    comic['ComicPublisher'] = dom.getElementsByTagName(
                        'name')[n].firstChild.wholeText
                except:
                    comic['ComicPublisher'] = "Unknown"

            n += 1
    except:
        logger.warn(
            'Something went wrong retrieving from ComicVine. Ensure your API is up-to-date and that comicvine is accessible'
        )
        return

    try:
        comic['ComicYear'] = dom.getElementsByTagName(
            'start_year')[0].firstChild.wholeText
    except:
        comic['ComicYear'] = '0000'

    #safety check, cause you known, dufus'...
    if comic['ComicYear'][-1:] == '-':
        comic['ComicYear'] = comic['ComicYear'][:-1]

    try:
        comic['ComicURL'] = dom.getElementsByTagName(
            'site_detail_url')[trackcnt].firstChild.wholeText
    except:
        #this should never be an exception. If it is, it's probably due to CV timing out - so let's sleep for abit then retry.
        logger.warn(
            'Unable to retrieve URL for volume. This is usually due to a timeout to CV, or going over the API. Retrying again in 10s.'
        )
        time.sleep(10)
        safechk += 1
        GetComicInfo(comicid, dom, safechk)

    desdeck = 0
    #the description field actually holds the Volume# - so let's grab it
    try:
        descchunk = dom.getElementsByTagName(
            'description')[0].firstChild.wholeText
        comic_desc = drophtml(descchunk)
        desdeck += 1
    except:
        comic_desc = 'None'

    #sometimes the deck has volume labels
    try:
        deckchunk = dom.getElementsByTagName('deck')[0].firstChild.wholeText
        comic_deck = deckchunk
        desdeck += 1
    except:
        comic_deck = 'None'

    #comic['ComicDescription'] = comic_desc

    try:
        comic['Aliases'] = dom.getElementsByTagName(
            'aliases')[0].firstChild.wholeText
        comic['Aliases'] = re.sub('\n', '##', comic['Aliases']).strip()
        if comic['Aliases'][-2:] == '##':
            comic['Aliases'] = comic['Aliases'][:-2]
        #logger.fdebug('Aliases: ' + str(aliases))
    except:
        comic['Aliases'] = 'None'

    comic['ComicVersion'] = 'None'  #noversion'
    #logger.info('comic_desc:' + comic_desc)
    #logger.info('comic_deck:' + comic_deck)
    #logger.info('desdeck: ' + str(desdeck))

    #figure out if it's a print / digital edition.
    comic['Type'] = 'None'
    if comic_deck != 'None':
        if any(
            ['print' in comic_deck.lower(), 'digital' in comic_deck.lower()]):
            if 'print' in comic_deck.lower():
                comic['Type'] = 'Print'
            elif 'digital' in comic_deck.lower():
                comic['Type'] = 'Digital'
    if comic_desc != 'None' and comic['Type'] == 'None':
        if 'print' in comic_desc[:60].lower(
        ) and 'print edition can be found' not in comic_desc.lower():
            comic['Type'] = 'Print'
        elif 'digital' in comic_desc[:60].lower(
        ) and 'digital edition can be found' not in comic_desc.lower():
            comic['Type'] = 'Digital'
        else:
            comic['Type'] = 'Print'

    while (desdeck > 0):
        if desdeck == 1:
            if comic_desc == 'None':
                comicDes = comic_deck[:30]
            else:
                #extract the first 60 characters
                comicDes = comic_desc[:60].replace('New 52', '')
        elif desdeck == 2:
            #extract the characters from the deck
            comicDes = comic_deck[:30].replace('New 52', '')
        else:
            break

        i = 0
        while (i < 2):
            if 'volume' in comicDes.lower():
                #found volume - let's grab it.
                v_find = comicDes.lower().find('volume')
                #arbitrarily grab the next 10 chars (6 for volume + 1 for space + 3 for the actual vol #)
                #increased to 10 to allow for text numbering (+5 max)
                #sometimes it's volume 5 and ocassionally it's fifth volume.
                if i == 0:
                    vfind = comicDes[v_find:v_find +
                                     15]  #if it's volume 5 format
                    basenums = {
                        'zero': '0',
                        'one': '1',
                        'two': '2',
                        'three': '3',
                        'four': '4',
                        'five': '5',
                        'six': '6',
                        'seven': '7',
                        'eight': '8',
                        'nine': '9',
                        'ten': '10',
                        'i': '1',
                        'ii': '2',
                        'iii': '3',
                        'iv': '4',
                        'v': '5'
                    }
                    logger.fdebug('volume X format - ' + str(i) + ': ' + vfind)
                else:
                    vfind = comicDes[:v_find]  # if it's fifth volume format
                    basenums = {
                        'zero': '0',
                        'first': '1',
                        'second': '2',
                        'third': '3',
                        'fourth': '4',
                        'fifth': '5',
                        'sixth': '6',
                        'seventh': '7',
                        'eighth': '8',
                        'nineth': '9',
                        'tenth': '10',
                        'i': '1',
                        'ii': '2',
                        'iii': '3',
                        'iv': '4',
                        'v': '5'
                    }
                    logger.fdebug('X volume format - ' + str(i) + ': ' + vfind)
                volconv = ''
                for nums in basenums:
                    if nums in vfind.lower():
                        sconv = basenums[nums]
                        vfind = re.sub(nums, sconv, vfind.lower())
                        break
                #logger.info('volconv: ' + str(volconv))

                #now we attempt to find the character position after the word 'volume'
                if i == 0:
                    volthis = vfind.lower().find('volume')
                    volthis = volthis + 6  # add on the actual word to the position so that we can grab the subsequent digit
                    vfind = vfind[volthis:volthis +
                                  4]  # grab the next 4 characters ;)
                elif i == 1:
                    volthis = vfind.lower().find('volume')
                    vfind = vfind[volthis -
                                  4:volthis]  # grab the next 4 characters ;)

                if '(' in vfind:
                    #bracket detected in versioning'
                    vfindit = re.findall('[^()]+', vfind)
                    vfind = vfindit[0]
                vf = re.findall('[^<>]+', vfind)
                try:
                    ledigit = re.sub("[^0-9]", "", vf[0])
                    if ledigit != '':
                        comic['ComicVersion'] = ledigit
                        logger.fdebug(
                            "Volume information found! Adding to series record : volume "
                            + comic['ComicVersion'])
                        break
                except:
                    pass

                i += 1
            else:
                i += 1

        if comic['ComicVersion'] == 'None':
            logger.fdebug('comic[ComicVersion]:' + str(comic['ComicVersion']))
            desdeck -= 1
        else:
            break

    if vari == "yes":
        comic['ComicIssues'] = str(cntit)
    else:
        comic['ComicIssues'] = dom.getElementsByTagName(
            'count_of_issues')[0].firstChild.wholeText

    comic['ComicImage'] = dom.getElementsByTagName(
        'super_url')[0].firstChild.wholeText
    comic['ComicImageALT'] = dom.getElementsByTagName(
        'small_url')[0].firstChild.wholeText

    comic['FirstIssueID'] = dom.getElementsByTagName(
        'id')[0].firstChild.wholeText

    #    print ("fistIss:" + str(comic['FirstIssueID']))
    #    comicchoice.append({
    #        'ComicName':              comic['ComicName'],
    #        'ComicYear':              comic['ComicYear'],
    #        'Comicid':                comicid,
    #        'ComicURL':               comic['ComicURL'],
    #        'ComicIssues':            comic['ComicIssues'],
    #        'ComicImage':             comic['ComicImage'],
    #        'ComicVolume':            ParseVol,
    #        'ComicPublisher':         comic['ComicPublisher']
    #        })

    #    comic['comicchoice'] = comicchoice
    return comic
示例#50
0
def pulldetails(comicid,
                type,
                issueid=None,
                offset=1,
                arclist=None,
                comicidlist=None):
    #import easy to use xml parser called minidom:
    from xml.dom.minidom import parseString

    if mylar.CONFIG.COMICVINE_API == 'None' or mylar.CONFIG.COMICVINE_API is None:
        logger.warn(
            'You have not specified your own ComicVine API key - it\'s a requirement. Get your own @ http://api.comicvine.com.'
        )
        return
    else:
        comicapi = mylar.CONFIG.COMICVINE_API

    if type == 'comic':
        if not comicid.startswith('4050-'): comicid = '4050-' + comicid
        PULLURL = mylar.CVURL + 'volume/' + str(comicid) + '/?api_key=' + str(
            comicapi
        ) + '&format=xml&field_list=name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,first_issue,deck,aliases'
    elif type == 'issue':
        if mylar.CONFIG.CV_ONLY:
            cv_type = 'issues'
            if arclist is None:
                searchset = 'filter=volume:' + str(
                    comicid
                ) + '&field_list=cover_date,description,id,image,issue_number,name,date_last_updated,store_date'
            else:
                searchset = 'filter=id:' + (
                    arclist
                ) + '&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume'
        else:
            cv_type = 'volume/' + str(comicid)
            searchset = 'name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,store_date'
        PULLURL = mylar.CVURL + str(cv_type) + '/?api_key=' + str(
            comicapi) + '&format=xml&' + str(searchset) + '&offset=' + str(
                offset)
    elif type == 'firstissue':
        #this is used ONLY for CV_ONLY
        PULLURL = mylar.CVURL + 'issues/?api_key=' + str(
            comicapi) + '&format=xml&filter=id:' + str(
                issueid) + '&field_list=cover_date'
    elif type == 'storyarc':
        PULLURL = mylar.CVURL + 'story_arcs/?api_key=' + str(
            comicapi) + '&format=xml&filter=name:' + str(
                issueid) + '&field_list=cover_date'
    elif type == 'comicyears':
        PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(
            comicapi
        ) + '&format=xml&filter=id:' + str(
            comicidlist
        ) + '&field_list=name,id,start_year,publisher,description,deck&offset=' + str(
            offset)
    elif type == 'import':
        PULLURL = mylar.CVURL + 'issues/?api_key=' + str(
            comicapi
        ) + '&format=xml&filter=id:' + (
            comicidlist
        ) + '&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume' + '&offset=' + str(
            offset)
    elif type == 'update_dates':
        PULLURL = mylar.CVURL + 'issues/?api_key=' + str(
            comicapi
        ) + '&format=xml&filter=id:' + (
            comicidlist
        ) + '&field_list=date_last_updated, id, issue_number, store_date, cover_date, name, volume ' + '&offset=' + str(
            offset)

    #logger.info('CV.PULLURL: ' + PULLURL)
    #new CV API restriction - one api request / second.
    if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2:
        time.sleep(2)
    else:
        time.sleep(mylar.CONFIG.CVAPI_RATE)

    #download the file:
    #set payload to None for now...
    payload = None

    try:
        r = requests.get(PULLURL,
                         params=payload,
                         verify=mylar.CONFIG.CV_VERIFY,
                         headers=mylar.CV_HEADERS)
    except Exception, e:
        logger.warn('Error fetching data from ComicVine: %s' % (e))
        return
示例#51
0
def updateNNM(cmdbToNnmIds, nnmConnection, framework):
    ''' map(string, string), NnmConnection, Framework -> None '''

    filePath = _getUpdatedIdsFileName(nnmConnection.serverIp)

    persistedUpdatedIds = _readUpdatedIdsFromFile(filePath)
    logger.debug("Have read %s previously updated ID pairs from file" % len(persistedUpdatedIds))

    cmdbIdsCount = len(cmdbToNnmIds)

    # filter out IDs that have not changed
    _obsoletePersistedIds = {}
    for cmdbId, nnmId in persistedUpdatedIds.items():
        if cmdbToNnmIds.get(cmdbId) == nnmId:
            del cmdbToNnmIds[cmdbId]
        else:
            _obsoletePersistedIds[cmdbId] = nnmId

    # filter out persisted IDs that are obsolete
    for obsoleteId in _obsoletePersistedIds.keys():
        del persistedUpdatedIds[obsoleteId]

    stub = _createNnmStub(nnmConnection)

    filteredCmdbIdsCount = len(cmdbToNnmIds)
    logger.debug("Custom Attribute UCMDB_ID on %d NNM nodes in the NNM topology will be updated with UCMDB IDs" % filteredCmdbIdsCount)
    if filteredCmdbIdsCount > 3000:
        logger.warn("Getting ready to update Custom Attribute UCMDB_ID on %d NNM nodes in NNM" % filteredCmdbIdsCount)
        logger.warn("This process may take a while since the UCMDB_ID custom attribute in NNM can only be updated one node at a time. Check probeMgr-patternsDebug.log for status update.")

    updatedIds = {}
    counter = 0

    for cmdbId, nnmId in cmdbToNnmIds.items():

        nodes = _getNnmNodeById(stub, nnmId)
        if not nodes:
            logger.warn("Node with NNM ID %s doesn't exists in NNMi" % nnmId)
            framework.reportWarning("One of nodes was not pushed since it doesn't exist in NNMi")
            continue

        try:
            _updateNnmNode(nnmConnection, stub, cmdbId, nnmId, framework)
        except UnsupportedNnmMethodException:
                nnmConnection.version = 8
                logger.debug('Detected NNMi version 8.x')
                #retry the update
                _updateNnmNode(nnmConnection, stub, cmdbId, nnmId, framework)
        else:
            if nnmConnection.version == -1:
                nnmConnection.version = 9
                logger.debug('Detected NNMi version 9.x')

        logger.debug("(%d) Updated NNM ID: %s with UCMDB ID value %s" % (counter, nnmId, cmdbId))

        counter += 1
        updatedIds[cmdbId] = nnmId

    _saveUpdatedIdsToFile(filePath, persistedUpdatedIds, updatedIds)

    logger.debug("Finished updating %d (out of %d) nodes in NNM" % (filteredCmdbIdsCount, cmdbIdsCount))
示例#52
0
    def discover(self, connectionContext):
        cloud = vcloud.Vcloud()
        cloud.ipAddress = connectionContext.ipAddress
        cloud.urlString = connectionContext.urlString

        client = connectionContext.client
        agent = client.getAgent()
        vcloudClient = agent.getVcloudClient()

        organizationsByName = self.getOrganizations(vcloudClient)
        cloud.organizationsByName = organizationsByName

        for organization in organizationsByName.values():
            logger.debug(organization)

            organizationInstance = organization.getInstance()
            vdcByName = self.getVdcForOrganization(organizationInstance,
                                                   vcloudClient)
            organization.vdcByName = vdcByName

            for vdc in vdcByName.values():
                logger.debug(vdc)

                # additional references for resolving
                vdcInstance = vdc.getInstance()
                vdcReference = vdcInstance.getReference()
                vdcHref = vdcReference.getHref()
                organization._vdcByHref[vdcHref] = vdc

                vappByName = self.getVappsForVdc(vdcInstance, vcloudClient)
                vdc.vappsByName = vappByName

                for vapp in vappByName.values():
                    logger.debug(vapp)
                    vappInstance = vapp.getInstance()
                    vmsByName = self.getVmsForVapp(vappInstance, vcloudClient)
                    vapp.vmsByName = vmsByName

                    for vm in vmsByName.values():
                        vm._findHostKeyAndIps()

            catalogsByName = self.getCatalogsByNameForOrganization(
                organizationInstance, vcloudClient)
            organization.catalogsByName = catalogsByName
            for catalog in catalogsByName.values():
                logger.debug(catalog)

                catalogInstance = catalog.getInstance()
                catalogEntriesByType = self.getCatalogEntriesByTypeForCatalog(
                    catalogInstance, vcloudClient)

                mediaByName = catalogEntriesByType[VcloudReferenceType.MEDIA]
                vappTemplatesByName = catalogEntriesByType[
                    VcloudReferenceType.VAPP_TEMPLATE]

                catalog.mediaByName = mediaByName
                catalog.vappTemplatesByName = vappTemplatesByName

        vcloudAdmin = self.getAdmin(vcloudClient)
        if vcloudAdmin is not None:

            self.getVcloudDirectorVersion(cloud, vcloudAdmin)

            systemOrganization = self.getSystemOrganization(
                vcloudClient, vcloudAdmin)
            cloud.systemOrganization = systemOrganization

            if Configuration.SKIP_DUPLICATING_SYSTEM_ORG:
                self._removeSystemOrganizationDuplicates(cloud)

            adminOrganizationsByName = self.getAdminOrganizations(
                vcloudClient, vcloudAdmin)

            for adminOrganizationName, adminOrganization in adminOrganizationsByName.items(
            ):
                organization = organizationsByName.get(adminOrganizationName)
                if organization is not None:
                    organization.setAdminOrganization(adminOrganization)
                elif cloud.systemOrganization is not None and adminOrganizationName == cloud.systemOrganization.getName(
                ):
                    cloud.systemOrganization.setAdminOrganization(
                        adminOrganization)
                else:
                    logger.warn(
                        "No regular organization for admin organization '%s'" %
                        adminOrganizationName)

            providerVdcByName = self.getProviderVdc(vcloudClient, vcloudAdmin)
            cloud.providerVdcByName = providerVdcByName

            if not providerVdcByName:
                logger.debug("Provider vDC not available")

            for providerVdc in providerVdcByName.values():
                logger.debug(providerVdc)

                providerVdcInstance = providerVdc.getInstance()
                adminVdcByName = self.getAdminVdcForProviderVdc(
                    providerVdcInstance, vcloudClient)
                for adminVdcName, adminVdc in adminVdcByName.items():
                    for organization in cloud.organizationsByName.values():
                        vdc = organization.vdcByName.get(adminVdcName)
                        if vdc is not None:
                            vdc.setAdminVdc(adminVdc)
                            vdc.providerVdcName = providerVdc.getName()
        else:
            logger.debug("vCloud admin not available")

        self.discoverGlobalVcloudSettings(vcloudClient, cloud)

        return cloud
示例#53
0
def isHexadecimalName(dataName):
    if dataName and re.match(r"^[\dabcdefABCDEF]+$", dataName.strip()):
        logger.warn('Skipping Hex Software: ' + dataName)
        return 1
示例#54
0
def _report_db_host(topology, resolver):
    '@types: _Topology, dns_resolver.Resolver -> osh, list[osh]'
    osh, oshs = None, []
    if topology.db_info and resolver:
        hostname = topology.db_info.hostname
        logger.info("Resolve database address: %s" % hostname)
        try:
            try:
                ips = resolver.resolve_ips(hostname)
            except dns_resolver.ResolveException, e:
                logger.warn("Failed to resolve. %s" % e)
            else:
                osh, oshs = _report_host(sap.Address(hostname, ips))
        except ValueError, e:
            logger.warn("Failed to resolve %s" % e)

    return osh, oshs


def _report_application_ip(shell, topology, resolver):
    '@types: _Topology, dns_resolver.Resolver -> string'
    if topology.inst and resolver:
        hostname = topology.inst.getHostname()
        logger.info("Resolve host address from profile: %s" % hostname)
        if not hostname:
            cmd = "hostname"
            output = shell.execCmd(cmd)
            if not shell.getLastCmdReturnCode():
                hostname = output
            logger.info("Resolve host address from host: %s" % hostname)
示例#55
0
文件: nzbget.py 项目: sankarara/mylar
    def processor(self, nzbinfo):
        nzbid = nzbinfo['NZBID']
        try:
            logger.fdebug(
                'Now checking the active queue of nzbget for the download')
            queueinfo = self.server.listgroups()
        except Expection as e:
            logger.warn(
                'Error attempting to retrieve active queue listing: %s' % e)
            return {'status': False}
        else:
            logger.fdebug('valid queue result returned. Analyzing...')
            queuedl = [qu for qu in queueinfo if qu['NZBID'] == nzbid]
            if len(queuedl) == 0:
                logger.warn(
                    'Unable to locate item in active queue. Could it be finished already ?'
                )
                return {'status': False}

            stat = False
            while stat is False:
                time.sleep(10)
                queueinfo = self.server.listgroups()
                queuedl = [qu for qu in queueinfo if qu['NZBID'] == nzbid]
                if len(queuedl) == 0:
                    logger.fdebug(
                        'Item is no longer in active queue. It should be finished by my calculations'
                    )
                    stat = True
                else:
                    logger.fdebug('status: %s' % queuedl[0]['Status'])
                    logger.fdebug('name: %s' % queuedl[0]['NZBName'])
                    logger.fdebug('FileSize: %sMB' % queuedl[0]['FileSizeMB'])
                    logger.fdebug('Download Left: %sMB' %
                                  queuedl[0]['RemainingSizeMB'])
                    logger.fdebug('health: %s' % (queuedl[0]['Health'] / 10))
                    logger.fdebug('destination: %s' % queuedl[0]['DestDir'])
            logger.fdebug('File has now downloaded!')
            time.sleep(
                5
            )  #wait some seconds so shit can get written to history properly
            history = self.server.history()
            found = False
            hq = [
                hs for hs in history
                if hs['NZBID'] == nzbid and 'SUCCESS' in hs['Status']
            ]
            if len(hq) > 0:
                logger.fdebug(
                    'found matching completed item in history. Job has a status of %s'
                    % hq[0]['Status'])
                if hq[0]['DownloadedSizeMB'] == hq[0]['FileSizeMB']:
                    logger.fdebug('%s has final file size of %sMB' %
                                  (hq[0]['Name'], hq[0]['DownloadedSizeMB']))
                    if os.path.isdir(hq[0]['DestDir']):
                        logger.fdebug('location found @ %s' % hq[0]['DestDir'])
                        return {
                            'status': True,
                            'name': re.sub('.nzb', '',
                                           hq[0]['NZBName']).strip(),
                            'location': hq[0]['DestDir'],
                            'failed': False
                        }

                    else:
                        logger.warn(
                            'no file found where it should be @ %s - is there another script that moves things after completion ?'
                            % hq[0]['DestDir'])
                        return {'status': False}
            else:
                logger.warn('Could not find completed item in history')
                return {'status': False}
示例#56
0
def DiscoveryMain(Framework):

    OSHVResult = ObjectStateHolderVector()
    ms_domain_name = Framework.getDestinationAttribute('ms_domain_name')
    if not ms_domain_name:
        ms_domain_name = 'NULL'

    try:
        netUtil = MsNetworkUtil()
        hostsOutput = netUtil.doNetServerEnum('NULL', SV_TYPE_SERVER,
                                              ms_domain_name)
        if hostsOutput != None:
            discoverUnknownIPs = 1
            try:
                strDiscoverUnknownIPs = Framework.getParameter(
                    'discoverUnknownIPs')
                discoverUnknownIPs = Boolean.parseBoolean(
                    strDiscoverUnknownIPs)
            except:
                pass

            oshMsDomain = ObjectStateHolder('msdomain')
            oshMsDomain.setStringAttribute('data_name', ms_domain_name)
            alreadyDiscoveredIps = HashMap()
            for hostInfo in hostsOutput:
                hostType = Long(hostInfo[1]).longValue()
                hostName = (str(hostInfo[0])).lower()
                try:
                    ip = InetAddress.getByName(hostInfo[0]).getHostAddress()
                    if netutils.isLocalIp(ip):
                        continue
                    cachedHostName = alreadyDiscoveredIps.get(ip)
                    if cachedHostName != None:
                        logger.debug(
                            'IP ', ip,
                            ' already reported for host ' + cachedHostName,
                            ' current host ', hostName, ' - skipping')
                        continue
                    else:
                        logger.debug('Discovered IP ' + ip + ' for host ' +
                                     hostName)
                        alreadyDiscoveredIps.put(ip, hostName)
                    ipDomain = DomainScopeManager.getDomainByIp(ip)
                    if not discoverUnknownIPs and ipDomain == 'unknown':
                        logger.debug(
                            'ip: ' + ip +
                            ' is out of probe range and will be excluded')
                        continue
                    if SV_TYPE_CLUSTER_NT & hostType:
                        logger.debug(
                            'Not reporting the entry %s because it is a Cluster'
                            % hostName)
                        continue
                    hostOsType = 'nt'
                    if SV_TYPE_SERVER_UNIX & hostType:
                        hostOsType = 'unix'
                    oshHost = modeling.createHostOSH(ip, hostOsType)
                    oshHost.setStringAttribute("host_hostname", hostName)
                    OSHVResult.add(oshHost)

                    link = modeling.createLinkOSH('member', oshMsDomain,
                                                  oshHost)
                    OSHVResult.add(link)
                    ipOSH = modeling.createIpOSH(ip)
                    OSHVResult.add(ipOSH)
                    contained = modeling.createLinkOSH('contained', oshHost,
                                                       ipOSH)
                    OSHVResult.add(contained)
                except:
                    errorMsg = str(sys.exc_info()[1]).strip()
                    logger.warn('Failed to resolve host ', hostInfo[0], ' : ',
                                errorMsg)
        else:
            message = 'Failed to discover hosts on MS Domain'
            logger.warn(message)
            logger.reportWarning(message)
    except:
        errorMsg = str(sys.exc_info()[1]).strip()
        logger.errorException('Failed to discovery MS Domains')
        errorMessage = errormessages.makeErrorMessage(
            "msdomain", errorMsg,
            errormessages.ERROR_FAILED_DISCOVERING_MSDOMAIN_HOSTS)
        errobj = errorobject.createError(
            errorcodes.FAILED_DISCOVERIING_MSDOMAIN_HOST,
            ["msdomain", errorMsg], errorMessage)
        logger.reportErrorObject(errobj)
    return OSHVResult
示例#57
0
def parseTransportAdaptersForNode(node, scconfOutput):
    adaptersList = []
    matcher = re.search(
        r"\(%s\) Node transport adapters:([^\n]+)" % re.escape(node.name),
        scconfOutput)
    if matcher:
        adaptersListStr = matcher.group(1) and matcher.group(1).strip()
        if adaptersListStr:
            adaptersList = re.split(r"\s+", adaptersListStr)

    adaptersList = [adapter for adapter in adaptersList if adapter]

    if adaptersList:
        for adapterName in adaptersList:

            adapterStatus = None
            tuple = (re.escape(node.name), re.escape(adapterName))
            matcher = re.search(r"\(%s:%s\) Adapter enabled:([^\n]+)" % tuple,
                                scconfOutput)
            if matcher:
                adapterStatus = matcher.group(1) and matcher.group(1).strip()

            if adapterStatus != 'yes':
                logger.debug("Skipping disabled transport adapter '%s'" %
                             adapterName)
                continue

            logger.debug("Found transport adapter '%s'" % adapterName)
            transportAdapter = TransportAdapter(adapterName)

            properties = {}
            results = re.findall(
                r"\(%s:%s\) Adapter property:([^\n]+)" % tuple, scconfOutput)
            if results:
                for row in results:
                    elements = re.split(r"=", row, 1)
                    if len(elements) == 2:
                        propertyName = elements[0] and elements[0].strip()
                        propertyValue = elements[1] and elements[1].strip()
                        if propertyName and propertyValue:
                            properties[propertyName] = propertyValue

            ip = properties.get('ip_address')
            if ip and netutils.isValidIp(ip):
                transportAdapter.ip = ip
                logger.debug("Adapter's private IP is '%s'" % ip)
            else:
                logger.warn(
                    "Could not find private IP for transport adapter '%s' on node '%s'"
                    % (adapterName, node.name))

            netmask = properties.get('netmask')
            if netmask:
                try:
                    transportAdapter.netmask = netutils.parseNetMask(netmask)
                except:
                    logger.warn("Failed parsing netmask: %s" % netmask)

            node.transportAdaptersByName[adapterName] = transportAdapter
    else:
        logger.warn("No transport adapters found for node '%s'" % node.name)
示例#58
0
def run():
    from websocket import create_connection

    if plexpy.CONFIG.PMS_SSL and plexpy.CONFIG.PMS_URL[:5] == 'https':
        uri = plexpy.CONFIG.PMS_URL.replace(
            'https://', 'wss://') + '/:/websockets/notifications'
        secure = 'secure '
    else:
        uri = 'ws://%s:%s/:/websockets/notifications' % (
            plexpy.CONFIG.PMS_IP, plexpy.CONFIG.PMS_PORT)
        secure = ''

    # Set authentication token (if one is available)
    if plexpy.CONFIG.PMS_TOKEN:
        header = ["X-Plex-Token: %s" % plexpy.CONFIG.PMS_TOKEN]
    else:
        header = []

    global ws_shutdown
    ws_shutdown = False
    reconnects = 0

    # Try an open the websocket connection
    while not plexpy.WS_CONNECTED and reconnects < plexpy.CONFIG.WEBSOCKET_CONNECTION_ATTEMPTS:
        if reconnects == 0:
            logger.info(u"Tautulli WebSocket :: Opening %swebsocket." % secure)

        reconnects += 1

        # Sleep 5 between connection attempts
        if reconnects > 1:
            time.sleep(plexpy.CONFIG.WEBSOCKET_CONNECTION_TIMEOUT)

        logger.info(u"Tautulli WebSocket :: Connection attempt %s." %
                    str(reconnects))

        try:
            plexpy.WEBSOCKET = create_connection(uri, header=header)
            logger.info(u"Tautulli WebSocket :: Ready")
            plexpy.WS_CONNECTED = True
        except (websocket.WebSocketException, IOError, Exception) as e:
            logger.error("Tautulli WebSocket :: %s." % e)

    if plexpy.WS_CONNECTED:
        on_connect()

    while plexpy.WS_CONNECTED:
        try:
            process(*receive(plexpy.WEBSOCKET))

            # successfully received data, reset reconnects counter
            reconnects = 0

        except websocket.WebSocketConnectionClosedException:
            if ws_shutdown:
                break

            if reconnects == 0:
                logger.warn(u"Tautulli WebSocket :: Connection has closed.")

            if not plexpy.CONFIG.PMS_IS_CLOUD and reconnects < plexpy.CONFIG.WEBSOCKET_CONNECTION_ATTEMPTS:
                reconnects += 1

                # Sleep 5 between connection attempts
                if reconnects > 1:
                    time.sleep(plexpy.CONFIG.WEBSOCKET_CONNECTION_TIMEOUT)

                logger.warn(u"Tautulli WebSocket :: Reconnection attempt %s." %
                            str(reconnects))

                try:
                    plexpy.WEBSOCKET = create_connection(uri, header=header)
                    logger.info(u"Tautulli WebSocket :: Ready")
                    plexpy.WS_CONNECTED = True
                except (websocket.WebSocketException, IOError, Exception) as e:
                    logger.error("Tautulli WebSocket :: %s." % e)

            else:
                close()
                break

        except (websocket.WebSocketException, Exception) as e:
            if ws_shutdown:
                break

            logger.error("Tautulli WebSocket :: %s." % e)
            close()
            break

    if not plexpy.WS_CONNECTED and not ws_shutdown:
        on_disconnect()

    logger.debug(u"Tautulli WebSocket :: Leaving thread.")
 def render(self):
     logger.warn('Render not defined for %s' % self)
示例#60
0
    def discover(self, include_cloud=True, all_servers=False):
        """ Query plex for all servers online. Returns the ones you own in a selectize format """

        # Try to discover localhost server
        local_machine_identifier = None
        request_handler = http_handler.HTTPHandler(urls='http://127.0.0.1:32400', timeout=1,
                                                   ssl_verify=False, silent=True)
        request = request_handler.make_request(uri='/identity', request_type='GET', output_format='xml')
        if request:
            xml_head = request.getElementsByTagName('MediaContainer')[0]
            local_machine_identifier = xml_head.getAttribute('machineIdentifier')

        local_server = {'httpsRequired': '0',
                        'clientIdentifier': local_machine_identifier,
                        'label': 'Local',
                        'ip': '127.0.0.1',
                        'port': '32400',
                        'uri': 'http://127.0.0.1:32400',
                        'local': '1',
                        'value': '127.0.0.1:32400',
                        'is_cloud': False
                        }

        servers = self.get_plextv_resources(include_https=True, output_format='xml')
        clean_servers = []

        try:
            xml_head = servers.getElementsByTagName('MediaContainer')
        except Exception as e:
            logger.warn("Tautulli PlexTV :: Failed to get servers from plex: %s." % e)
            return []

        for a in xml_head:
            if a.getAttribute('size'):
                if a.getAttribute('size') == '0':
                    return []

            if a.getElementsByTagName('Device'):
                devices = a.getElementsByTagName('Device')

                for d in devices:
                    if helpers.get_xml_attr(d, 'presence') == '1' and \
                            helpers.get_xml_attr(d, 'owned') == '1' and \
                            helpers.get_xml_attr(d, 'provides') == 'server':

                        is_cloud = (helpers.get_xml_attr(d, 'platform').lower() == 'cloud')
                        if not include_cloud and is_cloud:
                            continue

                        connections = d.getElementsByTagName('Connection')

                        for c in connections:
                            if not all_servers:
                                # If this is a remote server don't show any local IPs.
                                if helpers.get_xml_attr(d, 'publicAddressMatches') == '0' and \
                                        helpers.get_xml_attr(c, 'local') == '1':
                                    continue

                                # If this is a local server don't show any remote IPs.
                                if helpers.get_xml_attr(d, 'publicAddressMatches') == '1' and \
                                        helpers.get_xml_attr(c, 'local') == '0':
                                    continue

                            if helpers.get_xml_attr(d, 'clientIdentifier') == local_machine_identifier:
                                local_server['httpsRequired'] = helpers.get_xml_attr(d, 'httpsRequired')
                                local_server['label'] = helpers.get_xml_attr(d, 'name')
                                clean_servers.append(local_server)
                                local_machine_identifier = None

                            server = {'httpsRequired': '1' if is_cloud else helpers.get_xml_attr(d, 'httpsRequired'),
                                      'clientIdentifier': helpers.get_xml_attr(d, 'clientIdentifier'),
                                      'label': helpers.get_xml_attr(d, 'name'),
                                      'ip': helpers.get_xml_attr(c, 'address'),
                                      'port': helpers.get_xml_attr(c, 'port'),
                                      'uri': helpers.get_xml_attr(c, 'uri'),
                                      'local': helpers.get_xml_attr(c, 'local'),
                                      'value': helpers.get_xml_attr(c, 'address') + ':' + helpers.get_xml_attr(c, 'port'),
                                      'is_cloud': is_cloud
                                      }
                            clean_servers.append(server)

            if local_machine_identifier:
                clean_servers.append(local_server)

        clean_servers.sort(key=lambda s: (s['label'], -int(s['local']), s['ip']))

        return clean_servers