Exemple #1
0
def _execute( cmd, callback = None, user_data = None ):
    logger.debug("Call command \"%s\"" %cmd, traceDepth = 1)
    ret_val = 0

    if callback is None:
        ret_val = os.system( cmd )
    else:
        pipe = os.popen( cmd, 'r' )

        while True:
            line = temp_failure_retry( pipe.readline )
            if not line:
                break
            callback( line.strip(), user_data )

        ret_val = pipe.close()
        if ret_val is None:
            ret_val = 0

    if ret_val != 0:
        logger.warning("Command \"%s\" returns %s"
                       %(cmd, ret_val),
                       traceDepth = 1)
    else:
        logger.debug("Command \"%s...\" returns %s"
                     %(cmd[:min(16, len(cmd))], ret_val),
                     traceDepth = 1)

    return ret_val
Exemple #2
0
    def addCisWithDependency(self, osh):
        if self.__vector.contains(osh) or osh not in self.needRelationshipCIs:  # already in or doesn't need dependency
            return True
        deps = self.needRelationshipCIs[osh].split(',')
        allFound = True
        for dep in deps:
            relationship, targetCIType = dep.split(':')
            singleFound = False
            if osh in self.linksMap:
                links = self.linksMap[osh]
                logger.debug("Get link of target ci", osh)

                for link in links:
                    if link.getObjectClass() == relationship:
                        end1 = link.getAttributeValue('link_end1')
                        end2 = link.getAttributeValue('link_end2')
                        that = end1 if end2 == osh else end2
                        if that.getObjectClass() == targetCIType:
                            singleFound = self.addAllDependencies(that)
                            if singleFound:
                                break
            if not singleFound:
                allFound = False
                break
        if allFound:
            self.__vector.add(osh)
        else:
            for link in self.linksMap[osh]:
                self.__vector.remove(link)
        return allFound
Exemple #3
0
def create_https_certificates(ssl_cert, ssl_key):
    """
    Create a self-signed HTTPS certificate and store in it in
    'ssl_cert' and 'ssl_key'. Method assumes pyOpenSSL is installed.

    This code is stolen from SickBeard (http://github.com/midgetspy/Sick-Beard).
    """
    from OpenSSL import crypto
    from certgen import createKeyPair, createSelfSignedCertificate, TYPE_RSA

    serial = int(time.time())
    domains = ['DNS:' + d.strip() for d in plexpy.CONFIG.HTTPS_DOMAIN.split(',') if d]
    ips = ['IP:' + d.strip() for d in plexpy.CONFIG.HTTPS_IP.split(',') if d]
    altNames = ','.join(domains + ips)

    # Create the self-signed PlexPy certificate
    logger.debug(u"Generating self-signed SSL certificate.")
    pkey = createKeyPair(TYPE_RSA, 2048)
    cert = createSelfSignedCertificate(("PlexPy", pkey), serial, (0, 60 * 60 * 24 * 365 * 10), altNames) # ten years

    # Save the key and certificate to disk
    try:
        with open(ssl_cert, "w") as fp:
            fp.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
        with open(ssl_key, "w") as fp:
            fp.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
    except IOError as e:
        logger.error("Error creating SSL key and certificate: %s", e)
        return False

    return True
Exemple #4
0
def inhibitSuspend( app_id = sys.argv[0],
                    toplevel_xid = None,
                    reason = 'take snapshot',
                    flags = INHIBIT_SUSPENDING | INHIBIT_IDLE):
    """
    Prevent machine to go to suspend or hibernate.
    Returns the inhibit cookie which is used to end the inhibitor.
    """
    if not app_id:
        app_id = 'backintime'
    if not toplevel_xid:
        toplevel_xid = 0

    for dbus_props in INHIBIT_DBUS:
        try:
            #connect directly to the socket instead of dbus.SessionBus because
            #the dbus.SessionBus was initiated before we loaded the environ
            #variables and might not work
            if 'DBUS_SESSION_BUS_ADDRESS' in os.environ:
                bus = dbus.bus.BusConnection(os.environ['DBUS_SESSION_BUS_ADDRESS'])
            else:
                bus = dbus.SessionBus()
            interface = bus.get_object(dbus_props['service'], dbus_props['objectPath'])
            proxy = interface.get_dbus_method(dbus_props['methodSet'], dbus_props['interface'])
            cookie = proxy(*[ (app_id, dbus.UInt32(toplevel_xid), reason, dbus.UInt32(flags))[i] for i in dbus_props['arguments'] ])
            logger.info('Inhibit Suspend started. Reason: %s' % reason)
            return (cookie, bus, dbus_props)
        except dbus.exceptions.DBusException:
            pass
    if isRoot():
        logger.debug("Inhibit Suspend failed because BIT was started as root.")
        return
    logger.warning('Inhibit Suspend failed.')
Exemple #5
0
def cache_image(url, image=None):
    """
    Saves an image to the cache directory.
    If no image is provided, tries to return the image from the cache directory.
    """
    # Create image directory if it doesn't exist
    imgdir = os.path.join(plexpy.CONFIG.CACHE_DIR, 'images/')
    if not os.path.exists(imgdir):
        logger.debug(u"PlexPy Helpers :: Creating image cache directory at %s" % imgdir)
        os.makedirs(imgdir)

    # Create a hash of the url to use as the filename
    imghash = hashlib.md5(url).hexdigest()
    imagefile = os.path.join(imgdir, imghash)

    # If an image is provided, save it to the cache directory
    if image:
        try:
            with open(imagefile, 'wb') as cache_file:
                cache_file.write(image)
        except IOError as e:
            logger.error(u"PlexPy Helpers :: Failed to cache image %s: %s" % (imagefile, e))

    # Try to return the image from the cache directory
    if os.path.isfile(imagefile):
        imagetype = 'image/' + imghdr.what(os.path.abspath(imagefile))
    else:
        imagefile = None
        imagetype = 'image/jpeg'

    return imagefile, imagetype
 def discoverPlans(self,oshv,sqlServerId,dbs):
     logger.debug("going to get jobs and plans")
     if self.discoveryOptions and self.discoveryOptions.discoverSqlJob:
         jobById=self.getSqlJobs(oshv, sqlServerId)
     else:
         jobById=HashMap()
     rs = self.connection.getTable(self.plansQuery)
     plans = HashMap()
     while(rs.next()):
         name = rs.getString('plan_name')
         id = rs.getString('plan_id')
         osh = ObjectStateHolder('sqlservermaintenanceplan')
         osh.setAttribute(Queries.DATA_NAME,name)
         osh.setAttribute('planId',id)
         osh.setContainer(sqlServerId)
         oshv.add(osh)
         if self.discoveryOptions and self.discoveryOptions.discoverDbUser:
             owner = rs.getString('owner')
             # Some plans may not have an owner so we need to check
             if owner:
                 user = ObjectStateHolder('dbuser')
                 user.setAttribute(Queries.DATA_NAME,owner)
                 user.setContainer(sqlServerId)
                 oshv.add(user)
                 oshv.add(modeling.createLinkOSH('owner',user,osh))
         plans.put(name,osh)
     rs.close()
     logger.debug("got plans: ", plans.keySet().toString())
     self.discoverPlanJobs(oshv,sqlServerId,plans,jobById)
     self.discoverPlanDbs(oshv,plans,dbs)
Exemple #7
0
 def collectData(self,hostId,sqlServerId, discoverConfigs = 1):
     self.connection.open()
     oshv = ObjectStateHolderVector()
     try:
         oshv.add(self.getServerProperties(sqlServerId,hostId))
         dbMap = self.getDatabases(sqlServerId)
         #get the databases
         oshv.addAll(self.sqlDataBaseProps.getDatabases(dbMap,hostId,discoverConfigs))
         oshv.addAll(self.sqlDataBaseProps.getStoredProcedures(dbMap))
         #get the server configuration:
         logger.debug('discovering configs')
         try:
             oshv.add(self.sqlServerConfig.getServerConfiguration(sqlServerId))
             oshv.add(self.sqlServerConfig.getServerStartup(sqlServerId))
             self.sqlServerConfig.discoverPlans(oshv,sqlServerId,dbMap)
         except:
             logger.debugException(hostId.toString())
         if self.discoveryOptions and self.discoveryOptions.discoverDbUser:
             users = self.getDbUsers(sqlServerId)
             Util.addFromMap(users,oshv)
         else:
             users = LinkedHashMap()
         oshv.addAll(self.getProcesses(hostId,sqlServerId,dbMap,users))
         oshv.addAll(self.clusterConfiguration.collectData(sqlServerId))
         #db configuration:
         oshv.addAll(self.getDbConf(dbMap,hostId,users))
         logger.debug("sql db result for hostid:"+hostId.toString())
     except:
         logger.debugException(hostId.toString())
     self.connection.close()
     return oshv
Exemple #8
0
def connectToRemoteNode(Framework):
    if AgentUtils.isMigrateNeeded(Framework):
        #setting connected client identifier
        #using host name since uduid is stored in agent options and on old and new ddmi agent their location is different
        logger.debug('Connected using uda.')
        client = Framework.getConnectedClient()
        sysInfo = client.getSysInfo()
        hostName = sysInfo.getProperty('computerName')
        Framework.setProperty(InventoryUtils.UD_HOSTNAME, hostName)
        AgentUtils.setUdAgentProtocolForMigration(Framework, client.getCredentialId())
        logger.debug('Migrate is going to be performed')
        if client.hasShell():
            logger.debug('The connected Agent already supports shell, assume it is a non-native agent.')
            reason = 'The connected Agent already supports shell,it may be a non-native agent.'
            Framework.setProperty(InventoryUtils.generateSkipStep('Install Non-Native UD Agent'), reason)
            #Framework.setProperty(InventoryUtils.generateSkipStep('Check Non-Native Agent Installed'), reason)

            platform = Framework.getProperty(InventoryUtils.STATE_PROPERTY_PLATFORM)
            if platform == 'windows':
                # In windows, it is native already if it has shell.
                logger.debug('This is windows, it must be native agent.')
                Framework.setProperty(AgentUtils.DOWNLOAD_MIGRATE_LOG_FILE, '')
                reason = 'Native installation is used for Windows platform.'
                Framework.setProperty(InventoryUtils.generateSkipStep('Init Update from Non-Native to Native'), reason)
                Framework.setProperty(InventoryUtils.generateSkipStep('Install Native UD Agent'), reason)
        else:
            logger.debug('The connected client does NOT support the shell capability. This is DDMi agent!')


        Framework.setStepExecutionStatus(WorkflowStepStatus.SUCCESS)
Exemple #9
0
    def _mount(self):
        """
        mount the service
        """
        sshfs = [self.mountproc] + self.ssh_options
        if not self.cipher == 'default':
            sshfs.extend(['-o', 'Ciphers=%s' % self.cipher])
        sshfs.extend(['-o', 'idmap=user',
                      '-o', 'cache_dir_timeout=2'])

        # use read only mount if requested
        if self.read_only:
            sshfs.extend(['-o', 'ro'])

        sshfs.extend([self.user_host_path, self.mountpoint])
        #bugfix: sshfs doesn't mount if locale in LC_ALL is not available on remote host
        #LANG or other envirnoment variable are no problem.
        env = os.environ.copy()
        if 'LC_ALL' in list(env.keys()):
            env['LC_ALL'] = 'C'
        logger.debug('Call mount command: %s'
                     %' '.join(sshfs),
                     self)
        try:
            subprocess.check_call(sshfs, env = env)
        except subprocess.CalledProcessError:
            raise MountException( _('Can\'t mount %s') % ' '.join(sshfs))
    def on_stop(self, force_stop=False):
        if self.is_valid_session():
            logger.debug(u"PlexPy ActivityHandler :: Session %s has stopped." % str(self.get_session_key()))

            # Set the session last_paused timestamp
            ap = activity_processor.ActivityProcessor()
            ap.set_session_last_paused(session_key=self.get_session_key(), timestamp=None)

            # Update the session state and viewOffset
            # Set force_stop to true to disable the state set
            if not force_stop:
                ap.set_session_state(session_key=self.get_session_key(),
                                     state=self.timeline['state'],
                                     view_offset=self.timeline['viewOffset'],
                                     stopped=int(time.time()))

            # Retrieve the session data from our temp table
            db_session = ap.get_session_by_key(session_key=self.get_session_key())

            # Check if any notification agents have notifications enabled
            if any(d['on_stop'] for d in notifiers.available_notification_agents()):
                # Fire off notifications
                threading.Thread(target=notification_handler.notify,
                                 kwargs=dict(stream_data=db_session, notify_action='stop')).start()

            # Write it to the history table
            monitor_proc = activity_processor.ActivityProcessor()
            monitor_proc.write_session_history(session=db_session)

            # Remove the session from our temp session table
            logger.debug(u"PlexPy ActivityHandler :: Removing session %s from session queue" % str(self.get_session_key()))
            ap.delete_session(session_key=self.get_session_key())
Exemple #11
0
def StepMain(Framework):
    # if we have shell credentials and we are able to connect with them then connect otherwise we should connect with agent

    ip = Framework.getDestinationAttribute('ip_address')
    domain = Framework.getDestinationAttribute('ip_domain')
    codepage = Framework.getCodePage()

    allShellProtocols = []
    allShellCredentials = []
    allShellIps = []
    allShellCodePages = []

    protocols = netutils.getAvailableProtocols(Framework, ClientsConsts.DDM_AGENT_PROTOCOL_NAME, ip, domain)

    for protocol in protocols:

        allShellProtocols.append(ClientsConsts.DDM_AGENT_PROTOCOL_NAME)
        allShellCredentials.append(protocol)
        allShellIps.append(ip)
        allShellCodePages.append(codepage)

    logger.debug('Will going to attempt to connect in this order: ', allShellCredentials)
    Framework.setProperty(InventoryUtils.STATE_PROPERTY_CONNECTION_PROTOCOLS, allShellProtocols)
    Framework.setProperty(InventoryUtils.STATE_PROPERTY_CONNECTION_CREDENIALS, allShellCredentials)
    Framework.setProperty(InventoryUtils.STATE_PROPERTY_CONNECTION_IPS, allShellIps)
    Framework.setProperty(InventoryUtils.STATE_PROPERTY_CONNECTION_CODEPAGES, allShellCodePages)

    InventoryUtils.executeStep(Framework, connectToRemoteNode, InventoryUtils.STEP_REQUIRES_CONNECTION, InventoryUtils.STEP_DOESNOT_REQUIRES_LOCK)
Exemple #12
0
def accel_calibrate_reference():
    """run accelcal on reference board"""
    logger.info("STARTING REFERENCE ACCEL CALIBRATION")

    conn = connection.Connection(ref_only=True)

    logger.info("Turning safety off")
    rotate.set_rotation(conn, "level", wait=False)
    util.safety_off(conn.refmav)

    conn.ref.send("accelcal\n")
    for rotation in ["level", "left", "right", "up", "down", "back"]:
        try:
            conn.ref.expect("Place vehicle")
            conn.ref.expect("and press any key")
        except Exception as ex:
            util.failure("Failed to get place vehicle message for %s" % rotation)
        logger.debug("Rotating %s" % rotation)
        attitude = rotate.set_rotation(conn, rotation, wait=False)
        time.sleep(13)
        conn.ref.send("\n")
    i = conn.ref.expect(["Calibration successful", "Calibration FAILED"])
    if i != 0:
        util.failure("Accel calibration failed at %s" % time.ctime())
    logger.info("Calibration successful")
    rotate.set_rotation(conn, "level", wait=False)
    util.param_set(conn.ref, "AHRS_TRIM_X", 0)
    util.param_set(conn.ref, "AHRS_TRIM_Y", 0)
    util.discard_messages(conn.refmav)
    util.wait_heartbeat(conn.refmav)
    def on_created(self):
        if self.is_item():
            logger.debug(u"PlexPy TimelineHandler :: Library item %s has been added to Plex." % str(self.get_rating_key()))

            # Fire off notifications
            threading.Thread(target=notification_handler.notify_timeline,
                             kwargs=dict(timeline_data=self.get_metadata(), notify_action='created')).start()
def getMAC(shell, int_name):
    cmdResult = None
    rawCmdResult = None
    mac = None
    entstat = None
    try:
        entstat_command = concatenate('entstat ', int_name)

        logger.debug(concatenate(' Executing command: ', entstat_command))
        entstat = shell.execCmd(entstat_command)

        if entstat != None:
            m = re.search('Device Type: (.+)', entstat)
            description = None
            if(m):
                description = m.group(1).strip()
            m = re.search('Hardware Address: ([0-9a-f:]{17})', entstat)
            rawMac = None
            if(m):
                rawMac = m.group(1)
                mac = netutils.parseMac(rawMac)
    except:
        msg = " Failed getting MAC address for interface '%s'" % int_name
        errobj = errorobject.createError(errorcodes.FAILED_GETTING_INFORMATION, None, msg)
        logger.reportWarningObject(errobj)
        logger.debug(msg)
        return None

    return mac
def DiscoveryMain(Framework):
    OSHVResult = ObjectStateHolderVector()
    logger.info('Starting HACMP Applications')
    hostIP = Framework.getDestinationAttribute('ip_address')
    logger.debug ('Host IP: ',hostIP)
    cluster =  Framework.getDestinationAttribute('cluster')
    hostOS = Framework.getDestinationAttribute('host_os')
    hostOS = hostOS or 'NA'
    protocolName = Framework.getDestinationAttribute('Protocol')
    hostId = Framework.getDestinationAttribute('hostId')
    ##  Get Parameter Section
    cldisp_command = Framework.getParameter('cldisp_command') or 'cldisp'
    cllsif_command = Framework.getParameter('cllsif_command') or 'cllsif'

    try:
        client = Framework.createClient()
        shell = ShellUtils(client)
        #   If we get  good client connection , run the client commands to get the Application information for the cluster
        HostOSH = modeling.createOshByCmdbIdString('host', hostId)
        ClusterOSH = getclusterOSH(cluster)
        appDictionary = getapplicationInfo(shell,  cldisp_command,  Framework)
        resourceDictionary = getresourceinfo(shell, cllsif_command)
        OSHVResult.addAll(createserviceapplicationOSH (shell, appDictionary, resourceDictionary, HostOSH, ClusterOSH,   Framework))
        client.close()
    except JavaException, ex:
        strException = ex.getMessage()
        logger.debugException('')
        errormessages.resolveAndReport(strException, protocolName, Framework)
Exemple #16
0
def parse_oam_policy(policy_content):
    """
    Parse oam policy.xml to get oam policies
    :param policy_content:
    :return: list of oam policy
    :rtype: Policy
    """
    logger.debug('parse oam policy.xml')
    oam_policies = []
    root = _buildDocumentForXpath(policy_content, 0)
    xpath = _getXpath()
    policies = xpath.evaluate('//AuthenticationPolicy | //AuthorizationPolicy', root, XPathConstants.NODESET)
    for i in range(0, policies.getLength()):
        policy = policies.item(i)
        policy_type = policy.getNodeName()
        policy_name = xpath.evaluate('name', policy, XPathConstants.STRING)
        applicationDomainName = xpath.evaluate('applicationDomainName', policy, XPathConstants.STRING)
        successRedirectURL = xpath.evaluate('successRedirectURL', policy, XPathConstants.STRING)
        failureRedirectURL = xpath.evaluate('failureRedirectURL', policy, XPathConstants.STRING)
        oam_policy = Policy(
            policy_type, policy_name, applicationDomainName, successRedirectURL, failureRedirectURL, [])
        oam_policies.append(oam_policy)
        resources = xpath.evaluate('Resources/Resource', policy, XPathConstants.NODESET)
        for j in range(0, resources.getLength()):
            resource = resources.item(j)
            resource_name = xpath.evaluate('name', resource, XPathConstants.STRING)
            protectionLevel = xpath.evaluate('protectionLevel', resource, XPathConstants.STRING)
            resourceURL = xpath.evaluate('resourceURL', resource, XPathConstants.STRING)
            oam_resource = Resource(resource_name, protectionLevel, resourceURL)
            oam_policy.add_resource(oam_resource)
        logger.debug('find policy: %s' % oam_policy)
    return oam_policies
Exemple #17
0
def DiscoveryMain(Framework):
    connectionDataManager = None
    try:
        logger.debug('Replicating topology from HP OneView')

        connectionDataManager = FrameworkBasedConnectionDataManager(Framework)
        if not connectionDataManager.validate():
            return
        mappingFileFolder = os.path.join(CollectorsParameters.BASE_PROBE_MGR_DIR,
                                         CollectorsParameters.getDiscoveryConfigFolder(), HP_ONE_VIEW_CONFIG_FOLDER)
        mappingFileManager = OneviewMappingFileManager(mappingFileFolder)

        mappingFile = getMappingFileFromFramework(Framework)
        if mappingFile:
            return replicateTopologyUsingMappingFile(os.path.join(mappingFileFolder, mappingFile),
                                                     connectionDataManager, mappingFileManager)
        else:
            Framework.reportError('No mapping file found.')
            logger.errorException("No mapping file found.")
    except:
        Framework.reportError('Failed to pull data from OneView.')
        logger.errorException('Failed to pull data from OneView.')
    finally:
        if connectionDataManager:
            connectionDataManager.closeClient()
Exemple #18
0
def clear_history_tables():
    logger.debug(u"PlexPy Database :: Deleting all session_history records... No turning back now bub.")
    monitor_db = MonitorDatabase()
    monitor_db.action('DELETE FROM session_history')
    monitor_db.action('DELETE FROM session_history_media_info')
    monitor_db.action('DELETE FROM session_history_metadata')
    monitor_db.action('VACUUM')
Exemple #19
0
    def start(self):
        from scheduler import Tree

        loog = self.addLog("stdio")
        self.pending = 0
        properties = self.build.getProperties()
        self.rendered_tree = tree = properties.render(self.treename)
        l10nbuilds = properties.render(self.l10nbuilds)
        cp = ConfigParser()
        cp.read(l10nbuilds)
        repo = cp.get(tree, "repo")
        branch = cp.get(tree, "mozilla")
        path = cp.get(tree, "l10n.ini")
        l10nbranch = cp.get(tree, "l10n")
        locales = cp.get(tree, "locales")
        if locales == "all":
            alllocales = "yes"
        else:
            alllocales = "no"
            properties.update({"locales": filter(None, locales.split())}, "Build")
        self.tree = Tree(self.rendered_tree, repo, branch, l10nbranch, path)
        loog.addStdout("Loading l10n.inis for %s\n" % self.rendered_tree)
        logger.debug(
            "scheduler.l10n.tree", "Loading l10n.inis for %s, alllocales: %s" % (self.rendered_tree, alllocales)
        )
        self.loadIni(repo, branch, path, alllocales)
Exemple #20
0
def osh_createDb2Tablespace(db2SubsystemOsh, tb):
    str_name = 'name'
    if UCMDB_VERSION < 9:
        str_name = 'data_name'

    if isNotNull(tb) and isNotNull(tb[0]):
        tbOsh = ObjectStateHolder('mainframe_db2_tablespace')
        tbOsh.setAttribute(str_name, tb[0])
        tbOsh.setAttribute('dbtablespace_status', tb[1])
        tbOsh.setAttribute('type', tb[2])
        tbOsh.setAttribute('encoding_scheme', tb[3])
        tbOsh.setAttribute('dbtablespace_initialextent', tb[4])
        if isNotNull(tb[5]) and isnumeric(tb[5]):
            tbOsh.setIntegerAttribute('max_dataset_size', int(tb[5]))
        if isNotNull(tb[6]) and isnumeric(tb[6]):
            tbOsh.setIntegerAttribute('number_tables', int(tb[6]))
        if isNotNull(tb[7]) and isnumeric(tb[7]):
            tbOsh.setIntegerAttribute('number_partitions', int(tb[7]))
        try:
            if len(tb[8]) > 19:
                tb[8] = tb[8][0:18]
                created = modeling.getDateFromString(tb[8], 'yyyy-MM-dd-kk.mm.ss', None)
                tbOsh.setDateAttribute('create_date', created)
        except:
            logger.debug("Ignoring create_date. Unable to parse date string")
        tbOsh.setContainer(db2SubsystemOsh)
        return tbOsh
    return None
Exemple #21
0
    def __init__(self, filename=":memory:", flags="r", mode=None, tablename="shelf"):
        # XXX add flag/mode handling
        #   c -- create if it doesn't exist
        #   n -- new empty
        #   w -- open existing
        #   r -- readonly
        logger.debug("Opening %s with flags %s" % (filename, flags))

        self.conn = ClosedConnection()
        dropIfExists = False
        readOnly = False
        if flags is not None:
            if "n" in flags:
                dropIfExists = True
            if "r" in flags:
                readOnly = True

        if readOnly:
            if not os.path.isfile(filename):
                raise NoSuchFileError("%s doesn't exist" % (filename,))

        self.tablename = tablename
        self.filename = filename
        self.conn = sqlite3.connect(filename)
        self.conn.text_factory = str
        if not readOnly:
            if dropIfExists:
                self.conn.execute("drop table if exists %s" % (self.tablename,))
            MAKE_SHELF = "CREATE TABLE IF NOT EXISTS %s (key TEXT PRIMARY KEY, value TEXT NOT NULL)" % self.tablename
            self.conn.execute(MAKE_SHELF)
        self.conn.commit()
Exemple #22
0
 def create_necessary_paths(filename):
     try:
         (path,name) = os.path.split(filename)
         logger.debug("Creating dir: "+path)
         os.makedirs( path)
     except:
         pass
Exemple #23
0
 def envisalink_proxy(self, eventType, type, parameters, *args):
     try:
         res = yield self._connection.write(parameters)
         logger.debug('PROXY > '+parameters.strip())
     except StreamClosedError:
         #we don't need to handle this, the callback has been set for closed connections.
         pass
Exemple #24
0
 def queryRegistry(self, client, regKey, valueName):
     if not (client and regKey and valueName):
         logger.warn('registry query is incomplete')
         return
     logger.debug('RegistryBasedPlugin.queryRegistry')
     ntcmdErrStr = 'Remote command returned 1(0x1)'
     queryStr = ' query "%s" /v "%s"' % (regKey, valueName)
     system32Link = client.createSystem32Link() or ''
     buffer = client.execCmd(system32Link + "reg.exe" + queryStr)
     if client.getLastCmdReturnCode() != 0 or buffer.find(ntcmdErrStr) != -1:
         localFile = CollectorsParameters.BASE_PROBE_MGR_DIR + CollectorsParameters.getDiscoveryResourceFolder() + CollectorsParameters.FILE_SEPARATOR + 'reg_mam.exe'
         remoteFile = client.copyFileIfNeeded(localFile)
         if not remoteFile:
             logger.warn('Failed copying reg_mam.exe to the destination')
             return 
         buffer = client.execCmd(remoteFile + queryStr)
         if not buffer or client.getLastCmdReturnCode() != 0:
             logger.warn("Failed getting registry info.")
             return
     match = re.search(r'%s\s+%s\s+\w+\s+(.*)' % (regKey.replace('\\', '\\\\'), valueName), buffer, re.I)
     client.removeSystem32Link()
     if match:
         val = match.group(1)
         return val.strip()
     logger.warn('Cannot parse registry key')
def getProps(Framework):
    platform = Framework.getProperty(InventoryUtils.STATE_PROPERTY_PLATFORM)
    architecture = Framework.getProperty(InventoryUtils.STATE_PROPERTY_ARCHITECTURE)
    logger.debug('Platform: [', platform, '], architecture [', architecture, ']')

    # We don't care for previous datadir and tempdir on windows
    if str(platform) == "windows":
        Framework.setStepExecutionStatus(WorkflowStepStatus.SUCCESS)
    else:
        try:
            client = Framework.getConnectedClient()
            clientOptions = client.getOptionsMap()
            envVariables = client.getEnvironmentVariables()

            dataFolder = clientOptions.get(AgentUtils.DATA_DIR_OPTION)
            tempFolder = envVariables.get(AgentUtils.TEMP_DIR_OPTION)

            Framework.setProperty(AgentUtils.DATA_DIR_OPTION, dataFolder)
            Framework.setProperty(AgentUtils.TEMP_DIR_OPTION, tempFolder)
            logger.debug('Datadir option received from DDMI is [', dataFolder,
                '] and the tempdir is [', tempFolder, ']')

            Framework.setStepExecutionStatus(WorkflowStepStatus.SUCCESS)
        except:
            Framework.setStepExecutionStatus(WorkflowStepStatus.FAILURE)
Exemple #26
0
  def Display(self, node, _detached):
    if not node or node != self.lastNode:
      self.storeLastItem()
      if not self.prepare(node):
        return
      node=self.lastNode
      self.control.AddColumn(xlt("Name"), 10)
      self.control.AddColumn(xlt("Type"), 5)
      self.control.AddColumn(xlt("Values"), 40)
      self.control.AddColumn(xlt("TTL"), 5)
      self.RestoreListcols()

      for other in sorted(node.others.keys()):
        rdss=node.others[other]
        for rds in rdss.values():
          icon=node.GetImageId('other')
          dnstype=rdatatype.to_text(rds.rdtype)
          name=other
          for rd in rds:
            values=[]
            for slot in rd.__slots__:
              value=eval("rd.%s" % slot)
              if isinstance(value, list):
                if len(value) > 1:
                  logger.debug("Value list dimensions > 1: %s", str(value))
                value=" ".join(value)
                
              values.append("%s=%s" % (slot, value))
            self.control.AppendItem(icon, [name, dnstype, ", ".join(values), floatToTime(rds.ttl, -1)])
            icon=0
            name=""
            dnstype=""
      self.restoreLastItem()
def discoverDomainAdministrativeIps(allservers, dnsResolver):
    r''' Discover administrative IP address in two ways
    # 1) find server with admin-role and get server's IP or hostname (role contains port)
    # 2) try to find the same information between managed-servers

    @types: list[jee.Server], jee.DnsResolver -> list[str]'''
    domainIpAddresses = []
    isAdminServer = lambda server: server.hasRole(jee.AdminServerRole)
    isManagedServer = lambda server: server.hasRole(weblogic.ManagedServerRole)
    logger.debug( 'all servers', allservers )
    adminServers = filter(isAdminServer, allservers)
    logger.debug( 'admin servers', adminServers  )
    if adminServers:
        adminServerAddresses = (adminServers[0].hostname, adminServers[0].ip.value())
        domainIpAddresses = getIpsResolveIfNeeded(adminServerAddresses, dnsResolver)
        if not adminServers[0].ip.value():
            # in case there is only one resolved ip address we can set it in server
            # otherwise problem to chose such
            adminServers[0].ip.set(domainIpAddresses[0])
    # case when failed to get admin-server IPs or admin-servers are not found
    # 2)
    if not domainIpAddresses:
        # lets gather all IP addresses declared in managed servers
        adminServerAddresses = {}
        for server in filter(isManagedServer, allservers):
            managedServerRole = server.getRole(weblogic.ManagedServerRole)
            adminServerAddresses[managedServerRole.endpoint.getAddress()] = 1
        # find IPs among addresses
        domainIpAddresses = getIpsResolveIfNeeded(adminServerAddresses.keys(), dnsResolver)
    return domainIpAddresses
Exemple #28
0
    def do_connect(self, reconnect = False):
        # Create the socket and connect to the server
        if reconnect == True:
            logger.warning('Connection failed, retrying in '+str(self._retrydelay)+ ' seconds')
            yield gen.sleep(self._retrydelay)

        while self._connection == None:
            logger.debug('Connecting to {}:{}'.format(config.ENVISALINKHOST, config.ENVISALINKPORT))
            try:
                self._connection = yield self.tcpclient.connect(config.ENVISALINKHOST, config.ENVISALINKPORT)
                self._connection.set_close_callback(self.handle_close)
            except StreamClosedError:
                #failed to connect, but got no connection object so we will loop here
                logger.warning('Connection failed, retrying in '+str(self._retrydelay)+ ' seconds')
                yield gen.sleep(self._retrydelay)
                continue

            try:
                line = yield self._connection.read_until(self._terminator)
            except StreamClosedError:
                #in this state, since the connection object isnt none, its going to throw the callback for handle_close so we just bomb out.
                #and let handle_close deal with this
                return

            logger.debug("Connected to %s:%i" % (config.ENVISALINKHOST, config.ENVISALINKPORT))
            self.handle_line(line)
Exemple #29
0
def init(mode = ""):
    logger.debug("Initializing...")
    app = main.application()
    registry.setValue("application", app)
    
    error = None
    try:
        isfunc = eval("type(app." + mode + "Action)")
    except:
        isfunc = ""
        pass
    
    if (mode != "" and str(isfunc) == "<type 'instancemethod'>"):
        registry.setValue("mode", mode)
        error = eval("app." + mode + "Action()")
    else:
        registry.setValue("mode", "main")
        error = app.mainAction()
    
    if (error is not None):
        logger.logException(error)
    
    # Destroy window event
    try:
        eval("app.destroy()")
    except:
        pass
    
    return False
Exemple #30
0
 def _makedirs(self, directories, basedir):
     logger.debug("basedir: "+basedir)
     for dir in directories:
         curdir = os.path.join(basedir, dir)
         if not os.path.exists(curdir):
             logger.debug("Creating dir: "+curdir)
             os.mkdir(curdir)
Exemple #31
0
    def process(self):
        if self.is_valid_session():
            ap = activity_processor.ActivityProcessor()
            db_session = ap.get_session_by_key(
                session_key=self.get_session_key())

            this_state = self.timeline['state']
            this_key = str(self.timeline['ratingKey'])

            # If we already have this session in the temp table, check for state changes
            if db_session:
                # Re-schedule the callback to reset the 5 minutes timer
                schedule_callback('session_key-{}'.format(
                    self.get_session_key()),
                                  func=force_stop_stream,
                                  args=[self.get_session_key()],
                                  minutes=5)

                last_state = db_session['state']
                last_key = str(db_session['rating_key'])

                # Make sure the same item is being played
                if this_key == last_key:
                    # Update the session state and viewOffset
                    if this_state == 'playing':
                        # Update the session in our temp session table
                        # if the last set temporary stopped time exceeds 15 seconds
                        if int(time.time()) - db_session['stopped'] > 60:
                            self.update_db_session()

                    # Start our state checks
                    if this_state != last_state:
                        if this_state == 'paused':
                            self.on_pause()
                        elif last_state == 'paused' and this_state == 'playing':
                            self.on_resume()
                        elif this_state == 'stopped':
                            self.on_stop()

                    elif this_state == 'buffering':
                        self.on_buffer()

                    elif this_state == 'paused':
                        # Update the session last_paused timestamp
                        self.on_pause(still_paused=True)

                # If a client doesn't register stop events (I'm looking at you PHT!) check if the ratingKey has changed
                else:
                    # Manually stop and start
                    # Set force_stop so that we don't overwrite our last viewOffset
                    self.on_stop(force_stop=True)
                    self.on_start()

                # Monitor if the stream has reached the watch percentage for notifications
                # The only purpose of this is for notifications
                if this_state != 'buffering':
                    progress_percent = helpers.get_percent(
                        self.timeline['viewOffset'], db_session['duration'])
                    watched_percent = {
                        'movie': plexpy.CONFIG.MOVIE_WATCHED_PERCENT,
                        'episode': plexpy.CONFIG.TV_WATCHED_PERCENT,
                        'track': plexpy.CONFIG.MUSIC_WATCHED_PERCENT,
                        'clip': plexpy.CONFIG.TV_WATCHED_PERCENT
                    }

                    if progress_percent >= watched_percent.get(
                            db_session['media_type'], 101):
                        watched_notifiers = notification_handler.get_notify_state_enabled(
                            session=db_session,
                            notify_action='on_watched',
                            notified=False)

                        if watched_notifiers:
                            logger.debug(
                                u"Tautulli ActivityHandler :: Session %s watched."
                                % str(self.get_session_key()))

                        for d in watched_notifiers:
                            plexpy.NOTIFY_QUEUE.put({
                                'stream_data':
                                db_session.copy(),
                                'notifier_id':
                                d['notifier_id'],
                                'notify_action':
                                'on_watched'
                            })

            else:
                # We don't have this session in our table yet, start a new one.
                if this_state != 'buffering':
                    self.on_start()

                    # Schedule a callback to force stop a stale stream 5 minutes later
                    schedule_callback('session_key-{}'.format(
                        self.get_session_key()),
                                      func=force_stop_stream,
                                      args=[self.get_session_key()],
                                      minutes=5)
Exemple #32
0
def weakly_supervision_train():
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    print(device)

    model = maskrcnn_resnet50_fpn(pretrained=True)
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, 7)
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
                                                       hidden_layer, 7)

    model.load_state_dict(
        torch.load("../model/mask_rcnn_5_2_002.pth"))  #加载已训练的模型
    model.to(device)
    model.open_weakly_supervision_train()  #打开弱监督训练方式

    for name, params in model.named_parameters():
        if 'mask' not in name:  #冻结mask分支以外的参数
            params.requires_grad = False

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)
    num_epochs = 5
    batch_size = 2

    loss_sum = 0

    data = SeashipDataset("../../SeaShips", None)

    with open("./train_data", "r") as f:
        lines = f.readlines()
        train_list = [int(line) for line in lines]

    print(len(train_list))

    for epoch in range(num_epochs):
        for idx in range(0, len(train_list), batch_size):
            try:
                imgs = []
                targets = []
                for i in range(idx, idx + batch_size):
                    img, target = data.getitem2(train_list[i] - 1, epoch)
                    imgs.append(F.to_tensor(img).to(device))
                    target = {k: v.to(device) for k, v in target.items()}
                    targets.append(target)

                original_image_sizes = [img.shape[-2:] for img in imgs]
                loss_dict, result = model.forward(imgs, targets)
                losses = sum(loss for loss in loss_dict.values())
                loss_sum += losses

                #print(result)

                optimizer.zero_grad()
                losses.backward()
                optimizer.step()

                for j, res in enumerate(result):
                    scores = res['scores'].cpu().detach().numpy()
                    masks = res["masks"].cpu()
                    #print(masks[0].shape)
                    index = np.where(scores > 0.9)  #只要9分以上的
                    masks = masks[index]
                    masks = torch.where(masks > 0.5, torch.full_like(masks, 1),
                                        torch.full_like(masks, 0))
                    m = torch.zeros(original_image_sizes[0])
                    for mask in masks:
                        m += mask[0]
                    m = torch.where(m > 0.5, torch.full_like(m, 1),
                                    torch.full_like(m, 0))
                    img_mask = TensorToPIL(m)
                    data.updatemask(idx + j, img_mask, epoch)
            except:
                logger.error(str(traceback.format_exc()))

            if idx % 10 == 0:
                #print("[%d]rpn_loss: %f" %(idx, loss_sum))
                logger.debug("[%d]total_loss: %f" % (idx, loss_sum))
                loss_sum = 0

    torch.save(model.state_dict(), "./mask_rcnn_weakly_5_2_002.pth")
Exemple #33
0
def train_mask():
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    print(device)

    model = maskrcnn_resnet50_fpn(pretrained=True)
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, 7)
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
                                                       hidden_layer, 7)

    model.to(device)
    model.train()

    data = SeashipDataset("../../SeaShips", None)

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.002,
                                momentum=0.9,
                                weight_decay=0.0005)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=3,
                                                   gamma=0.1)
    num_epochs = 5
    batch_size = 2

    loss_sum = 0
    loss_classifier = 0
    loss_box_reg = 0
    loss_mask = 0

    with open("./train_data", "r") as f:
        lines = f.readlines()
        train_list = [int(line) for line in lines]

    print(len(train_list))

    for epoch in range(num_epochs):
        lr_scheduler = None
        if epoch == 0:
            warmup_factor = 1. / 1000
            warmup_iters = min(1000, 5000 - 1)
            lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters,
                                                     warmup_factor)

        for idx in range(0, len(train_list), batch_size):
            try:
                imgs = []
                targets = []
                for i in range(idx, idx + batch_size):
                    img, target = data.__getitem__(train_list[i] - 1)
                    imgs.append(F.to_tensor(img).to(device))
                    target = {k: v.to(device) for k, v in target.items()}
                    targets.append(target)

                loss_dict = model.forward(imgs, targets)
                losses = sum(loss for loss in loss_dict.values())
                loss_sum += losses

                #loss_classifier += loss_dict['loss_classifier'].values()
                #loss_box_reg += loss_dict['loss_box_reg'].values()
                #loss_mask += loss_dict['loss_mask'].values()

                optimizer.zero_grad()
                losses.backward()
                optimizer.step()

                if lr_scheduler is not None:
                    lr_scheduler.step()
            except:
                logger.error(str(traceback.format_exc()))

            if idx % 12 == 0:
                logger.debug("[%d]total_loss: %f" % (idx, loss_sum))
                #logger.debug("[%d]loss: %f loss_classifier: %f loss_box_reg: %f loss_mask: %f" %(idx, loss_sum, loss_classifier, loss_box_reg, loss_mask))
                loss_sum = 0
                #loss_classifier = 0
                #loss_box_reg = 0
                #loss_mask = 0

    torch.save(model.state_dict(), "../model/mask_rcnn_5_2_002.pth")
    logger.debug("train successfully!")\
Exemple #34
0
                          '100000')
        client = Framework.createClient(props)
        if client is None:
            raise Exception, 'Failed to create NTCMD client'
    except Exception, ex:
        strException = ex.getMessage()
        errormessages.resolveAndReport(strException, protocolName, Framework)
    except:
        strException = logger.prepareJythonStackTrace('')
        errormessages.resolveAndReport(strException, protocolName, Framework)
    else:
        try:
            clientShUtils = shellutils.ShellUtils(client)
            OSHVResult.addAll(
                NTCMD_HR_REG_Software_Lib.doSoftware(clientShUtils, hostOSH))
        except Exception, ex:
            strException = ex.getMessage()
            errormessages.resolveAndReport(strException, protocolName,
                                           Framework)
        except:
            strException = logger.prepareJythonStackTrace('')
            errormessages.resolveAndReport(strException, protocolName,
                                           Framework)

    try:
        clientShUtils and clientShUtils.clientClose()
    except:
        logger.debugException('')
        logger.debug('Failed disconnecting from shell agent')
    return OSHVResult
Exemple #35
0
def parseTNSNames(tns_buffer, db_domain, shell=None):
    tns_entries = []
    tns_buffer = tns_buffer.upper()
    tns_buffer = stripCommentLines(tns_buffer, '#')
    logger.debug('tns_buffer')
    logger.debug(tns_buffer)
    tns_entries_str = findTNSEntriesStr(tns_buffer)
    if tns_entries_str == []:
        return []  # error, no entries

    startPattern = Pattern('([\w\d.]*)\s*=\s*\(DESCRIPTION')
    for tns_entry_str in tns_entries_str:
        host_names = getTNSAttributeList(tns_entry_str, ['HOST'])
        for host_name in host_names:
            tns_entry = []
            logger.debug('tns_entry_str', tns_entry_str)
            match = startPattern.matcher(tns_entry_str)
            if match.find() == 1:
                tns_name = string.strip(match.group(1))
                logger.debug('tns_name', tns_name)
                tns_entry += [tns_name]
            logger.debug('host_name', host_name)
            tns_entry += [host_name]
            port = getTNSAttribute(tns_entry_str, ['PORT'])
            logger.debug('port', port)
            tns_entry += [port]
            sid = getTNSAttribute(tns_entry_str, ['SID'])
            if sid == '':
                sid = getTNSAttribute(tns_entry_str,
                                      ['SERVICE_NAME', 'service_name'])
                if sid == '':
                    sid = getTNSAttribute(tns_entry_str, ['GLOBAL_DBNAME'])
            tns_name = stripDomain(tns_name, db_domain)
            sid = stripDomain(sid, db_domain)
            logger.debug('sid', sid)
            tns_entry += [sid]
            tns_entry += [tns_name]
            host_ip = ''
            if shell:
                try:
                    resolver = netutils.DNSResolver(shell)
                    ips = resolver.resolveIpByNsLookup(host_name)
                    host_ip = ips and ips[
                        0] or resolver.resolveHostIpByHostsFile(host_name)
                except:
                    logger.warn('Failed to resolve host ip throught nslookup')
                    host_ip = host_name
            else:
                host_ip = netutils.getHostAddress(host_name, host_name)
            tns_entry += [host_ip]
            tns_entries += [tns_entry]
            logger.debug(tns_entry)

    return tns_entries
Exemple #36
0
#!/usr/bin/python

import logger
import logging
import requests

logger.debug('123')
logger.error('Failed...')

logger.cprint(logger.LIGHT_CYAN, 'cprint!')

logging.debug('debug 123')
#logging.info('info 123')
#logging.warning('warning 123')

import urllib2

url = 'http://10.88.15.168:9200/File/FileDownloadService.aspx?locationID=7&fileID=d08ede40-4fe5-4ebf-b548-b3225ea815bf'
file_name = urllib2.unquote(url).decode('utf8').split('/')[-1]
print(file_name)

r = requests.get(url, stream=True)
#print r.headers['Location']
print r.headers["Content-Length"]
Exemple #37
0
    def process(self):
        if self.is_item():
            global RECENTLY_ADDED_QUEUE

            rating_key = self.get_rating_key()

            media_types = {
                1: 'movie',
                2: 'show',
                3: 'season',
                4: 'episode',
                8: 'artist',
                9: 'album',
                10: 'track'
            }

            identifier = self.timeline.get('identifier')
            state_type = self.timeline.get('state')
            media_type = media_types.get(self.timeline.get('type'))
            section_id = self.timeline.get('sectionID', 0)
            title = self.timeline.get('title', 'Unknown')
            metadata_state = self.timeline.get('metadataState')
            media_state = self.timeline.get('mediaState')
            queue_size = self.timeline.get('queueSize')

            # Return if it is not a library event (i.e. DVR EPG event)
            if identifier != 'com.plexapp.plugins.library':
                return

            # Add a new media item to the recently added queue
            if media_type and section_id > 0 and \
                ((state_type == 0 and metadata_state == 'created')):  # or \
                #(plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_UPGRADE and state_type in (1, 5) and \
                #media_state == 'analyzing' and queue_size is None)):

                if media_type in ('episode', 'track'):
                    metadata = self.get_metadata()
                    if metadata:
                        grandparent_rating_key = int(
                            metadata['grandparent_rating_key'])
                        parent_rating_key = int(metadata['parent_rating_key'])

                        grandparent_set = RECENTLY_ADDED_QUEUE.get(
                            grandparent_rating_key, set())
                        grandparent_set.add(parent_rating_key)
                        RECENTLY_ADDED_QUEUE[
                            grandparent_rating_key] = grandparent_set

                        parent_set = RECENTLY_ADDED_QUEUE.get(
                            parent_rating_key, set())
                        parent_set.add(rating_key)
                        RECENTLY_ADDED_QUEUE[parent_rating_key] = parent_set

                        RECENTLY_ADDED_QUEUE[rating_key] = set(
                            [grandparent_rating_key])

                        logger.debug(
                            u"Tautulli TimelineHandler :: Library item '%s' (%s, grandparent %s) added to recently added queue."
                            % (title, str(rating_key),
                               str(grandparent_rating_key)))

                        # Schedule a callback to clear the recently added queue
                        schedule_callback(
                            'rating_key-{}'.format(grandparent_rating_key),
                            func=clear_recently_added_queue,
                            args=[grandparent_rating_key],
                            seconds=plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY)

                elif media_type in ('season', 'album'):
                    metadata = self.get_metadata()
                    if metadata:
                        parent_rating_key = int(metadata['parent_rating_key'])

                        parent_set = RECENTLY_ADDED_QUEUE.get(
                            parent_rating_key, set())
                        parent_set.add(rating_key)
                        RECENTLY_ADDED_QUEUE[parent_rating_key] = parent_set

                        logger.debug(
                            u"Tautulli TimelineHandler :: Library item '%s' (%s , parent %s) added to recently added queue."
                            % (title, str(rating_key), str(parent_rating_key)))

                        # Schedule a callback to clear the recently added queue
                        schedule_callback(
                            'rating_key-{}'.format(parent_rating_key),
                            func=clear_recently_added_queue,
                            args=[parent_rating_key],
                            seconds=plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY)

                else:
                    queue_set = RECENTLY_ADDED_QUEUE.get(rating_key, set())
                    RECENTLY_ADDED_QUEUE[rating_key] = queue_set

                    logger.debug(
                        u"Tautulli TimelineHandler :: Library item '%s' (%s) added to recently added queue."
                        % (title, str(rating_key)))

                    # Schedule a callback to clear the recently added queue
                    schedule_callback(
                        'rating_key-{}'.format(rating_key),
                        func=clear_recently_added_queue,
                        args=[rating_key],
                        seconds=plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY)

            # A movie, show, or artist is done processing
            elif media_type in ('movie', 'show', 'artist') and section_id > 0 and \
                state_type == 5 and metadata_state is None and queue_size is None and \
                rating_key in RECENTLY_ADDED_QUEUE:

                logger.debug(
                    u"Tautulli TimelineHandler :: Library item '%s' (%s) done processing metadata."
                    % (title, str(rating_key)))

            # An item was deleted, make sure it is removed from the queue
            elif state_type == 9 and metadata_state == 'deleted':
                if rating_key in RECENTLY_ADDED_QUEUE and not RECENTLY_ADDED_QUEUE[
                        rating_key]:
                    logger.debug(
                        u"Tautulli TimelineHandler :: Library item %s removed from recently added queue."
                        % str(rating_key))
                    del_keys(rating_key)

                    # Remove the callback if the item is removed
                    schedule_callback('rating_key-{}'.format(rating_key),
                                      remove_job=True)
Exemple #38
0
 def __readPluginDescriptors(self):
     allDescriptors = PluginsPackageConfigFile.getAllPluginDescriptors()
     logger.debug("PluginEngine found %d plugins" % len(allDescriptors))
     return allDescriptors
Exemple #39
0
def DiscoveryMain(Framework):
    resultVector = ObjectStateHolderVector()

    ipAddress = Framework.getDestinationAttribute(
        DestinationProperty.IP_ADDRESS)
    if not ipAddress:
        msg = errormessages.makeErrorMessage(na.Protocol.DISPLAY,
                                             message="Invalid IP address")
        errorObject = errorobject.createError(
            errorcodes.INTERNAL_ERROR_WITH_PROTOCOL_DETAILS,
            [na.Protocol.DISPLAY, msg], msg)
        logger.reportErrorObject(errorObject)
        return resultVector

    credentialsId = Framework.getParameter(DestinationProperty.CREDENTIALS_ID)
    if not credentialsId:
        msg = errormessages.makeErrorMessage(
            na.Protocol.DISPLAY, pattern=errormessages.ERROR_NO_CREDENTIALS)
        errorObject = errorobject.createError(
            errorcodes.NO_CREDENTIALS_FOR_TRIGGERED_IP, [na.Protocol.DISPLAY],
            msg)
        logger.reportErrorObject(errorObject)
        return resultVector

    queryTopologyPerDevice = _getBooleanParameter(
        JobParameter.QUERY_TOPOLOGY_PER_DEVICE, Framework, False)
    discovererClass = na_discover.SingleRequestsNaDiscoverer
    if queryTopologyPerDevice:
        discovererClass = na_discover.NaDiscovererPerDevice

    reportDeviceConfigs = _getBooleanParameter(
        JobParameter.REPORT_DEVICE_CONFIGS, Framework, False)
    logger.debug('reportDeviceConfigs: ', reportDeviceConfigs)

    reportDeviceModules = _getBooleanParameter(
        JobParameter.REPORT_DEVICE_MODULES, Framework, False)
    logger.debug('reportDeviceModules:', reportDeviceModules)

    client = None
    try:
        try:

            client = na_discover.createJavaClient(Framework, ipAddress,
                                                  credentialsId)

            logger.debug("Topology is discovered by '%s'" %
                         discovererClass.__name__)

            discoverer = discovererClass(client, ipAddress, Framework)
            discoverer.setDevicePageSize(500)
            discoverer.setReportDeviceConfigs(reportDeviceConfigs)
            discoverer.setReportDeviceModules(reportDeviceModules)

            discoverResult = discoverer.discover()
            if discoverResult:
                devicesById, connectivitiesByDeviceId = discoverResult

                reporter = na.NaReporter(Framework)
                reporter.setBulkThreshold(10000)
                reporter.setReportDeviceConfigs(reportDeviceConfigs)
                reporter.setReportDeviceModules(reportDeviceModules)

                reporter.report(devicesById, connectivitiesByDeviceId)

        finally:
            client and client.close()

    except MissingSdkJarException, ex:
        msg = errormessages.makeErrorMessage(
            na.Protocol.DISPLAY,
            message="Not all jar dependencies are found in class path")
        errorObject = errorobject.createError(errorcodes.MISSING_JARS_ERROR,
                                              [na.Protocol.DISPLAY, msg], msg)
        logger.reportErrorObject(errorObject)
Exemple #40
0
    def discover(self,
                 domainName,
                 traHome,
                 tmpDir,
                 cleanTemp=1,
                 discoverJmsTopology=Boolean.TRUE):
        r'@types: str, str, str, netutils.BaseDnsResolver, bool -> '
        ts = Date().getTime()
        ucmdbTmpDir = "/forUcmdb-%s" % ts
        applications = []

        # first check if tmpDir exists
        fs = self.getFileSystem()
        if fs.exists(tmpDir):
            logger.debug(
                '[' + __name__ +
                ':BusinessWorksApplicationDiscoverer.discover] %s directory exists'
                % tmpDir)
            tmpDir = fs.getFile(tmpDir)
            logger.debug(
                '[' + __name__ +
                ':BusinessWorksApplicationDiscoverer.discover] Permissions - %s, Path = %s'
                % (tmpDir.permissions(), tmpDir.path))
            # TODO check for permissions?
        else:
            logger.errorException(
                '[' + __name__ +
                ':BusinessWorksApplicationDiscoverer.discover] %s directory does not exist'
                % tmpDir)
            # TODO: raise exception or log
            return None

        # get the tra version
        appManageFileDir = "%sbin/" % traHome
        if fs.exists(appManageFileDir):
            appManageFile = fs.getFile("%sAppManage" % appManageFileDir)
            logger.debug(
                '[' + __name__ +
                ':BusinessWorksApplicationDiscoverer.discover] Found AppManage file path - %s'
                % appManageFile.path)

            # run the AppManage command for given domainName
            tempDomainDir = "%s%s/%s" % (tmpDir.path, ucmdbTmpDir, domainName)
            shell = self.getShell()
            shell.execCmd("mkdir -p %s" % tempDomainDir)
            if shell.getLastCmdReturnCode() == 0:
                logger.debug(
                    '[' + __name__ +
                    ':BusinessWorksApplicationDiscoverer.discover] Successfully created temp UCMDB directory: %s'
                    % tempDomainDir)
                shell.execCmd("cd %s" % appManageFileDir)
                if shell.getLastCmdReturnCode() == 0:
                    buffer = shell.execCmd("pwd")
                    logger.debug(
                        '[' + __name__ +
                        ':BusinessWorksApplicationDiscoverer.discover] Changed working directory to = %s'
                        % buffer)

                    if self.__executeAppManage(tempDomainDir, domainName):
                        # let's get the applications now
                        appMap = self.__getAppConfigsMap(shell, tempDomainDir)
                        for (appName, xmlFile) in appMap.items():
                            #logger.debug("%s --> %s" % (fileName, xmlFile))
                            xmlFilePath = "%s/%s" % (tempDomainDir, xmlFile)
                            if fs.exists(xmlFilePath):

                                folder = self.getPathUtils().dirName(
                                    appName) or ""
                                name = self.getPathUtils().baseName(
                                    appName) or ""

                                if name:
                                    application = tibco.Application(
                                        name, folder)
                                    applications.append(application)
                                    xmlFile = fs.getFile(
                                        xmlFilePath, [FileAttrs.CONTENT])
                                    if shell.getLastCmdReturnCode() == 0:
                                        jmsDiscoverer = JmsDiscoverer()
                                        try:
                                            document = XmlParser().getDocument(
                                                xmlFile.content,
                                                namespaceAware=0)
                                        except JException, je:
                                            logger.warnException(
                                                "Failed to parse XML document. %s"
                                                % str(je))
                                        else:
                                            if discoverJmsTopology == Boolean.TRUE:
                                                tibco.each(
                                                    application.addJmsServer,
                                                    jmsDiscoverer.
                                                    discoverJmsServer(
                                                        document))
                                                tibco.each(
                                                    application.addJmsQueue,
                                                    jmsDiscoverer.
                                                    discoverJmsQueues(
                                                        document))
                                                tibco.each(
                                                    application.addJmsTopic,
                                                    jmsDiscoverer.
                                                    discoverJmsTopics(
                                                        document))
                                            tibco.each(
                                                application.addAdapter,
                                                self.__adapterDiscoverer.
                                                discover(document, name))
                                    else:
                                        logger.error(
                                            'Failed to get content of %s' %
                                            xmlFilePath)
                        # remove temporary directory
                        logger.debug("Clean temp folder")
                        if cleanTemp:
                            try:
                                # fs.removeFiles(self.__createdFiles)
                                self.__removeTempUcmdbDirectory(
                                    "%s%s" % (tmpDir.path, ucmdbTmpDir))
                            except Exception, ex:
                                logger.warnException(str(ex))
                                logger.reportWarning(
                                    "Unable to delete temporary folder")
Exemple #41
0
    def extractReplacingMainFolder(self, file, dir, replacement):
        logger.debug("file=%s" % file)
        logger.debug("dir=%s" % dir)

        if not dir.endswith(':') and not os.path.exists(dir):
            os.mkdir(dir)

        zf = zipfile.ZipFile(file)
        self._createstructure(file, dir, replacement)
        num_files = len(zf.namelist())

        for name in zf.namelist():
            logger.debug("name=%s" % name)
            if not name.endswith('/'):
                logger.debug("continue with file: " + name)
                try:
                    (path, filename) = os.path.split(os.path.join(dir, name))
                    logger.debug("path=%s" % path)
                    logger.debug("name=%s" % name)
                    if replacement != '':
                        logger.debug("replace original folder: " + path)
                        path = path.replace(name[:name.find("/")], replacement)
                        logger.debug("replaced [new] folder: " + path)
                    os.makedirs(path)
                except:
                    pass
                newName = ''
                if replacement != '':
                    logger.debug("replace original name: " + name)
                    newName = name.replace(name[:name.find("/")], replacement)
                    logger.debug("replaced [new] name: " + name)
                outfilename = os.path.join(dir, newName)
                logger.debug("outfilename=%s" % outfilename)
                try:
                    outfile = open(outfilename, 'wb')
                    outfile.write(zf.read(name))
                except:
                    logger.error("Something happened in file: " + outfilename)
Exemple #42
0
 def __logDecision(self, descriptorId, decision):
     params = (descriptorId, self, decision)
     logger.debug(" '%s': %r -> %s" % params)
Exemple #43
0
def gyro_integrate(conn):
    '''test gyros by integrating while rotating to the given rotations'''
    conn.ref.send('set streamrate -1\n')
    conn.test.send('set streamrate -1\n')
    util.param_set(conn.ref, 'SR0_RAW_SENS', 20)
    util.param_set(conn.test, 'SR0_RAW_SENS', 20)

    logger.info("Starting gyro integration")
    wait_quiescent(conn.refmav)
    conn.discard_messages()
    if ETE == 0:
        util.set_servo(conn.refmav, YAW_CHANNEL, ROTATIONS['level'].chan1+200)
        util.set_servo(conn.refmav, PITCH_CHANNEL, ROTATIONS['level'].chan2+200)
    if ETE == 1:
        ete = PixETE()
        ete.position(45, 180)
        time.sleep(1)
        ete.rollspeed(4000)
        ete.position(45, 0)
    logger.info("Starting gyro motion")    
    start_time = time.time()
    ref_tstart = None
    test_tstart = [None]*3
    ref_sum = Vector3()
    test_sum = [Vector3(), Vector3(), Vector3()]
    msgs = { 'RAW_IMU' : 0, 'SCALED_IMU2' : 1, 'SCALED_IMU3' : 2 }
    while time.time() < start_time+20:
        imu = conn.refmav.recv_match(type='RAW_IMU', blocking=False)
        if imu is not None:
            #gyro = util.gyro_vector(imu)
            gyro = Vector3(degrees(imu.xgyro*-0.001), degrees(imu.ygyro*0.001), degrees(imu.zgyro*-0.001))  #phil change.... when running this check from back to top, I found that the reference IMU X and Z were reversed.... this hack makes this pass, but I suspect something else is wrong, this should be reverted when that is found.
            tnow = imu.time_usec*1.0e-6
            if ref_tstart is not None:
                deltat = tnow - ref_tstart
                ref_sum += gyro * deltat
            ref_tstart = tnow
            if time.time() - start_time > 2 and gyro.length() < GYRO_TOLERANCE:
                break
        imu = conn.testmav.recv_match(type=msgs.keys(), blocking=False)
        if imu is not None:
            idx = msgs[imu.get_type()]
            gyro = util.gyro_vector(imu)
            if imu.get_type().startswith("SCALED_IMU"):
                tnow = imu.time_boot_ms*1.0e-3
            else:
                tnow = imu.time_usec*1.0e-6
            if test_tstart[idx] is not None:
                deltat = tnow - test_tstart[idx]
                test_sum[idx] += gyro * deltat
            test_tstart[idx] = tnow
    logger.debug("Gyro ref  sums: %s" % ref_sum)
    logger.debug("Gyro test sum1: %s" % test_sum[0])
    logger.debug("Gyro test sum2: %s" % test_sum[1])
    logger.debug("Gyro test sum3: %s" % test_sum[2])
    ete.yawspeed(5000)
    ete.rollspeed(5000)
    ete.position(0, 0)
    wait_quiescent(conn.refmav)
    for idx in range(3):
        err = test_sum[idx] - ref_sum
        if abs(err.x) > GYRO_SUM_TOLERANCE:
            util.failure("X gyro %u error: %.1f" % (idx, err.x))
        if abs(err.y) > GYRO_SUM_TOLERANCE:
            util.failure("Y gyro %u error: %.1f" % (idx, err.y))
        if abs(err.z) > GYRO_SUM_TOLERANCE:
            util.failure("Z gyro %u error: %.1f" % (idx, err.z))
    logger.debug("Gyro test finished")
Exemple #44
0
    def extract(self, file, dir):
        logger.debug("file=%s" % file)
        logger.debug("dir=%s" % dir)

        if not dir.endswith(':') and not os.path.exists(dir):
            os.mkdir(dir)

        zf = zipfile.ZipFile(file)
        self._createstructure(file, dir)
        num_files = len(zf.namelist())

        for name in zf.namelist():
            logger.debug("name=%s" % name)
            if not name.endswith('/'):
                logger.debug("continue with file: " + name)
                try:
                    (path, filename) = os.path.split(os.path.join(dir, name))
                    logger.debug("path=%s" % path)
                    logger.debug("name=%s" % name)
                    os.makedirs(path)
                except:
                    pass
                outfilename = os.path.join(dir, name)
                logger.debug("outfilename=%s" % outfilename)
                try:
                    outfile = open(outfilename, 'wb')
                    outfile.write(zf.read(name))
                except:
                    logger.error("Something happened in file: " + name)
Exemple #45
0
def cachePage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],modoCache=CACHE_ACTIVA, timeout=socket.getdefaulttimeout()):
    logger.info("[scrapertools.py] cachePage url="+url)
    modoCache = config.get_setting("cache.mode")

    '''
    if config.get_platform()=="plex":
        from PMS import HTTP
        try:
            logger.info("url="+url)
            data = HTTP.Request(url)
            logger.info("descargada")
        except:
            data = ""
            logger.error("Error descargando "+url)
            import sys
            for line in sys.exc_info():
                logger.error( "%s" % line )
        
        return data
    '''
    # CACHE_NUNCA: Siempre va a la URL a descargar
    # obligatorio para peticiones POST
    if modoCache == CACHE_NUNCA or post is not None:
        logger.info("[scrapertools.py] MODO_CACHE=2 (no cachear)")
        
        try:
            data = downloadpage(url,post,headers, timeout=timeout)
        except:
            data=""
    
    # CACHE_SIEMPRE: Siempre descarga de cache, sin comprobar fechas, excepto cuando no está
    elif modoCache == CACHE_SIEMPRE:
        logger.info("[scrapertools.py] MODO_CACHE=1 (cachear todo)")
        
        # Obtiene los handlers del fichero en la cache
        cachedFile, newFile = getCacheFileNames(url)
    
        # Si no hay ninguno, descarga
        if cachedFile == "":
            logger.debug("[scrapertools.py] No está en cache")
    
            # Lo descarga
            data = downloadpage(url,post,headers)
    
            # Lo graba en cache
            outfile = open(newFile,"w")
            outfile.write(data)
            outfile.flush()
            outfile.close()
            logger.info("[scrapertools.py] Grabado a " + newFile)
        else:
            logger.info("[scrapertools.py] Leyendo de cache " + cachedFile)
            infile = open( cachedFile )
            data = infile.read()
            infile.close()
    
    # CACHE_ACTIVA: Descarga de la cache si no ha cambiado
    else:
        logger.info("[scrapertools.py] MODO_CACHE=0 (automática)")
        
        # Datos descargados
        data = ""
        
        # Obtiene los handlers del fichero en la cache
        cachedFile, newFile = getCacheFileNames(url)
    
            # Lo descarga
        data = downloadpage(url,post,headers)
            
            # Lo graba en cache
        outfile = open(newFile,"w")
        outfile.write(data)
        outfile.flush()
        outfile.close()
        logger.info("[scrapertools.py] Grabado a " + newFile)
    
      
    return data
Exemple #46
0
    if predict_rating > actual_rating:
        result_sim.loc[common_index] -= learning_rate * user_ratings
    else:
        result_sim.loc[common_index] += learning_rate * user_ratings


################################################################################
# end of functions
################################################################################

logger.info('Start program: ' + __file__)
logger.info('Number of data sets: %d' % number_of_data_sets)
logger.info('Data sets dir: ' + data_sets_dir)
logger.info('Number of nearest neighbors: %d' % k)

logger.debug('======================================')

# loop throught data sets
for i in range(number_of_data_sets):
    test_file = data_sets_dir + str(i) + '/test.csv'
    train_file = data_sets_dir + str(i) + '/train.csv'
    # load test and train data
    logger.info('Test data file: ' + test_file)
    logger.info('Train data file: ' + train_file)

    logger.info('Start loading data...')
    test_df = pd.read_csv(test_file)
    train_df = pd.read_csv(train_file)
    logger.info('Done loading')

    test_productid_array = test_df.product_productid.unique()
Exemple #47
0
def ApiCall(url, method, data, head):
    requests.packages.urllib3.disable_warnings()
    if url is None or not url.startswith("http"):
        logger.error("invalid URL")
        yn = logger.input("Change URL? [y/N] > ")
        if yn == 'y' or yn == "Y":
            url = logger.input("Enter endpoint URL > ")
            updateProfileAttr("endpoint", url)
            with open(LOCAL_FOLDER + ".endpoint", "w+") as f:
                f.write(url)
        else:
            return

    res = None
    if head is not None:
        if "content-type" in head or "Content-Type" in head:
            headers = head
        else:
            head = json.dumps(head)
            headers = '{"content-type": "application/json", ' + head[1:]
            headers = json.loads(headers)
    else:
        headers = {'content-type': 'application/json'}

    logger.debug("Sending requests to: " + url)
    if method == "POST":
        data = data
        logger.debug("Request Payload: " + str(data))
        try:
            if getProfile()["proxy"] is not None:
                proxies = { 'http': "http://" + getProfile()["proxy"], 'https': "http://" + getProfile()["proxy"] }
                res = requests.post(url, data=data, headers=headers, proxies=proxies, verify=False)
            else:
                res = requests.post(url, data=data, headers=headers)

        except urllib2.HTTPError as e:
            logger.debug("RESPONSE: " + str(e))
            return '{"status": "err"}'

        except ValueError as e:
            if str(e).startswith("Invalid header value"):
                logger.error("Invalid session. Please, re-authenticate")
                updateProfileAttr("isAuth", False)
                updateProfileAttr("session", None)
                logger.pressAnyKey()
                return '{"status": "err"}'

    else:
        proxies = None
        if getProfile()["proxy"] is not None:
            # req.add_header('Host', '127.0.0.1')
            proxies = {'http': "http://" + getProfile()["proxy"], 'https': "http://" + getProfile()["proxy"]}
        try:
            res = requests.get(url, headers=headers, proxies=proxies, verify=False)

        except urllib2.HTTPError as e:
            logger.debug ("RESPONSE: " + str(e))
            return '{"status": "err"}'

        except ValueError as e:
            if str(e).startswith("Invalid header value"):
                logger.error("Invalid session. Please, re-authenticate")
                updateProfileAttr("isAuth", False)
                updateProfileAttr("session", None)
                logger.pressAnyKey()
                return '{"status": "err"}'
    if len(res.content) > 64000:
        logger.debug("Response is too big")
    else:
        logger.debug("RESPONSE: " + res.content.encode('utf-8').strip())

    return res.content
Exemple #48
0
def optimise_attitude(conn, rotation, tolerance, timeout=25):
    '''optimise attitude using servo changes'''
    expected_roll = ROTATIONS[rotation].roll
    expected_pitch = ROTATIONS[rotation].pitch
    if ETE == 0:
        chan1 = ROTATIONS[rotation].chan1
        chan2 = ROTATIONS[rotation].chan2
    elif ETE == 1:
        chan1 = ROTATIONS_ETE[rotation].chan1
        chan2 = ROTATIONS_ETE[rotation].chan2

    attitude = wait_quiescent(conn.refmav)

    if ETE == 1:
        return True

    time_start = time.time()
    # we always do at least 2 tries. This means the attitude accuracy
    # will tend to improve over time, while not adding excessive time
    # per board
    tries = 0
    
    while time.time() < time_start+timeout:
        #logger.info("============================= BEGIN ROTATIONS  try=%s =================" % (tries))
        dcm_estimated = Matrix3()
        dcm_estimated.from_euler(attitude.roll, attitude.pitch, attitude.yaw)
    
        droll = expected_roll
        if droll is None:
            droll = attitude.roll
        else:
            droll = radians(droll)
        dpitch = radians(expected_pitch)

        dcm_demanded = Matrix3()
        dcm_demanded.from_euler(droll, dpitch, attitude.yaw)

        (chan1_change, chan2_change) = gimbal_controller(dcm_estimated,
                                                         dcm_demanded, chan1)
        (err_roll, err_pitch) = attitude_error(attitude, expected_roll, expected_pitch)
        logger.info("optimise_attitude: %s err_roll=%.2f   err_pitch=%.2f   chan1=%u chan2=%u" % (rotation, err_roll, err_pitch, chan1, chan2))
        if (tries > 0 and (abs(err_roll)+abs(err_pitch) < tolerance or
                           (abs(chan1_change)<1 and abs(chan2_change)<1))):
            logger.debug("%s converged %.2f %.2f tolerance %.1f" % (rotation, err_roll, err_pitch, tolerance))

            # update optimised rotations to save on convergence time for the next board
            ROTATIONS[rotation].chan1 = chan1
            ROTATIONS[rotation].chan2 = chan2
            logger.debug("optimise_attitude: ROTATIONS[%s]  chan1:%s   chan2:%s" % (rotation, ROTATIONS[rotation].chan1, ROTATIONS[rotation].chan2) )
            
            return True
        
        chan1 += chan1_change
        chan2 += chan2_change

        if chan1 < 700 or chan1 > 2300 or chan2 < 700 or chan2 > 2300:
            logger.debug("servos out of range - failed")
            return False

        util.set_servo(conn.refmav, YAW_CHANNEL, chan1)
        util.set_servo(conn.refmav, PITCH_CHANNEL, chan2)
      
        attitude = wait_quiescent(conn.refmav)
        tries += 1
        
    logger.error("timed out rotating to %s" % rotation)
    return False
Exemple #49
0
class state():
    logger.debug('State Module Loaded')

    @staticmethod
    def init():
        state.state = {}
        events.register('alarm', state.update)

    @staticmethod
    def getDict():
        return state.state

    @staticmethod
    def setVersion(version):
        state.state['version'] = version

    @staticmethod
    def update(eventType, type, parameters, code, event, message,
               defaultStatus):
        if not type in state.state: state.state[type] = {'lastevents': []}

        #keep the last state
        try:
            prev_status = state.state[type][parameters]['status']
        except (IndexError, KeyError):
            #if we are here, we've never seen this event type, parameter combination before
            prev_status = None

        # if this event has never generated 'state' before, populate the defaults
        if not parameters in state.state[type]:
            state.state[type][parameters] = {
                'name':
                config.ZONENAMES[parameters]
                if type == 'zone' else config.PARTITIONNAMES[parameters],
                'lastevents': [],
                'status':
                defaultStatus
            }

        #update the state
        state.state[type][parameters]['status'] = dict(
            state.state[type][parameters]['status'], **event['status'])

        #if this is the first event in this zone/partition we've seen, don't do anything here.
        if prev_status != None:
            #if we've seen this before, check if it's changed
            if prev_status == state.state[type][parameters]['status']:
                logger.debug(
                    'Discarded event. State not changed. ({} {})'.format(
                        event['type'], parameters))
            else:
                events.put('statechange', type, parameters, code, event,
                           message, defaultStatus)
        else:
            events.put('stateinit', type, parameters, code, event, message,
                       defaultStatus)

        #write event
        state.state[type][parameters]['lastevents'].append({
            'datetime':
            str(datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")),
            'code':
            code,
            'message':
            message
        })

        #write to all events
        state.state[type]['lastevents'].append({
            'datetime':
            str(datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")),
            'code':
            code,
            'message':
            message
        })
Exemple #50
0
def verify_directories_created():
    import logger
    import os
    logger.info("streamondemand.core.config.verify_directories_created")

    # Force download path if empty
    download_path = get_setting("downloadpath")
    if download_path=="":
        download_path = os.path.join( get_data_path() , "downloads")
        set_setting("downloadpath" , download_path)

    # Force download list path if empty
    download_list_path = get_setting("downloadlistpath")
    if download_list_path=="":
        download_list_path = os.path.join( get_data_path() , "downloads" , "list")
        set_setting("downloadlistpath" , download_list_path)

    # Force bookmark path if empty
    bookmark_path = get_setting("bookmarkpath")
    if bookmark_path=="":
        bookmark_path = os.path.join( get_data_path() , "bookmarks")
        set_setting("bookmarkpath" , bookmark_path)

    # Create data_path if not exists
    if not os.path.exists(get_data_path()):
        logger.debug("Creating data_path "+get_data_path())
        try:
            os.mkdir(get_data_path())
        except:
            pass

    # Create download_path if not exists
    if not download_path.lower().startswith("smb") and not os.path.exists(download_path):
        logger.debug("Creating download_path "+download_path)
        try:
            os.mkdir(download_path)
        except:
            pass

    # Create download_list_path if not exists
    if not download_list_path.lower().startswith("smb") and not os.path.exists(download_list_path):
        logger.debug("Creating download_list_path "+download_list_path)
        try:
            os.mkdir(download_list_path)
        except:
            pass

    # Create bookmark_path if not exists
    if not bookmark_path.lower().startswith("smb") and not os.path.exists(bookmark_path):
        logger.debug("Creating bookmark_path "+bookmark_path)
        try:
            os.mkdir(bookmark_path)
        except:
            pass

    # Create library_path if not exists
    if not get_library_path().lower().startswith("smb") and not os.path.exists(get_library_path()):
        logger.debug("Creating library_path "+get_library_path())
        try:
            os.mkdir(get_library_path())
        except:
            pass

    # Checks that a directory "xbmc" is not present on platformcode
    old_xbmc_directory = os.path.join( get_runtime_path() , "platformcode" , "xbmc" )
    if os.path.exists( old_xbmc_directory ):
        logger.debug("Removing old platformcode.xbmc directory")
        try:
            import shutil
            shutil.rmtree(old_xbmc_directory)
        except:
            pass
Exemple #51
0
def check_github(scheduler=False, notify=False, use_cache=False):
    plexpy.COMMITS_BEHIND = 0

    if plexpy.CONFIG.GIT_TOKEN:
        headers = {'Authorization': 'token {}'.format(plexpy.CONFIG.GIT_TOKEN)}
    else:
        headers = {}

    version = github_cache('version', use_cache=use_cache)
    if not version:
        # Get the latest version available from github
        logger.info('Retrieving latest version information from GitHub')
        url = 'https://api.github.com/repos/%s/%s/commits/%s' % (
            plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO,
            plexpy.CONFIG.GIT_BRANCH)
        version = request.request_json(url,
                                       headers=headers,
                                       timeout=20,
                                       validator=lambda x: type(x) == dict)
        github_cache('version', github_data=version)

    if version is None:
        logger.warn(
            'Could not get the latest version from GitHub. Are you running a local development version?'
        )
        return plexpy.CURRENT_VERSION

    plexpy.LATEST_VERSION = version['sha']
    logger.debug("Latest version is %s", plexpy.LATEST_VERSION)

    # See how many commits behind we are
    if not plexpy.CURRENT_VERSION:
        logger.info(
            'You are running an unknown version of Tautulli. Run the updater to identify your version'
        )
        return plexpy.LATEST_VERSION

    if plexpy.LATEST_VERSION == plexpy.CURRENT_VERSION:
        logger.info('Tautulli is up to date')
        return plexpy.LATEST_VERSION

    commits = github_cache('commits', use_cache=use_cache)
    if not commits:
        logger.info(
            'Comparing currently installed version with latest GitHub version')
        url = 'https://api.github.com/repos/%s/%s/compare/%s...%s' % (
            plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO,
            plexpy.LATEST_VERSION, plexpy.CURRENT_VERSION)
        commits = request.request_json(url,
                                       headers=headers,
                                       timeout=20,
                                       whitelist_status_code=404,
                                       validator=lambda x: type(x) == dict)
        github_cache('commits', github_data=commits)

    if commits is None:
        logger.warn('Could not get commits behind from GitHub.')
        return plexpy.LATEST_VERSION

    try:
        plexpy.COMMITS_BEHIND = int(commits['behind_by'])
        logger.debug("In total, %d commits behind", plexpy.COMMITS_BEHIND)
    except KeyError:
        logger.info(
            'Cannot compare versions. Are you running a local development version?'
        )
        plexpy.COMMITS_BEHIND = 0

    if plexpy.COMMITS_BEHIND > 0:
        logger.info('New version is available. You are %s commits behind' %
                    plexpy.COMMITS_BEHIND)

        releases = github_cache('releases', use_cache=use_cache)
        if not releases:
            url = 'https://api.github.com/repos/%s/%s/releases' % (
                plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO)
            releases = request.request_json(
                url,
                timeout=20,
                whitelist_status_code=404,
                validator=lambda x: type(x) == list)
            github_cache('releases', github_data=releases)

        if releases is None:
            logger.warn('Could not get releases from GitHub.')
            return plexpy.LATEST_VERSION

        if plexpy.CONFIG.GIT_BRANCH == 'master':
            release = next((r for r in releases if not r['prerelease']),
                           releases[0])
        elif plexpy.CONFIG.GIT_BRANCH == 'beta':
            release = next(
                (r
                 for r in releases if not r['tag_name'].endswith('-nightly')),
                releases[0])
        elif plexpy.CONFIG.GIT_BRANCH == 'nightly':
            release = next((r for r in releases), releases[0])
        else:
            release = releases[0]

        plexpy.LATEST_RELEASE = release['tag_name']

        if notify:
            plexpy.NOTIFY_QUEUE.put({
                'notify_action':
                'on_plexpyupdate',
                'plexpy_download_info':
                release,
                'plexpy_update_commit':
                plexpy.LATEST_VERSION,
                'plexpy_update_behind':
                plexpy.COMMITS_BEHIND
            })

        if scheduler and plexpy.CONFIG.PLEXPY_AUTO_UPDATE and not plexpy.DOCKER and not plexpy.FROZEN:
            logger.info('Running automatic update.')
            plexpy.shutdown(restart=True, update=True)

    elif plexpy.COMMITS_BEHIND == 0:
        logger.info('Tautulli is up to date')

    return plexpy.LATEST_VERSION
Exemple #52
0
    def dump_input(self):

        if self.verbose < logger.INFO:
            return self

        logger.info(self, '')
        logger.info(self, '******** %s flags ********', self.__class__)
        logger.info(self, '* General Info')
        logger.info(self, 'Date %s' % time.ctime())
        logger.info(self, 'Python %s' % sys.version)
        logger.info(self, 'Numpy %s' % numpy.__version__)
        logger.info(self, 'Number of threads %d' % self.nthreads)
        logger.info(self, 'Verbose level %d' % self.verbose)
        logger.info(self, 'Scratch dir %s' % self.scratch)
        logger.info(self, 'Correlated calculation %s' % self.corr)
        logger.info(self, 'Input wfn data file %s' % self.wfnfile)
        logger.info(self, 'Output h5 data file %s' % self.chkfile)
        if (self.corr):
            logger.info(self, '1-RDM data file %s' % self.rdmfile)
        logger.info(self, 'Max_memory %d MB' % self.max_memory)

        logger.info(self, '* Molecular Info')
        logger.info(self, 'Num atoms %d' % self.natm)
        logger.info(self, 'Num electrons %d' % self.nelectrons)
        #logger.info(self,'Total charge %d' % self.charge)
        #logger.info(self,'Spin %d ' % self.spin)
        logger.info(self, 'Atom Coordinates (Bohr)')
        for i in range(self.natm):
            logger.info(
                self,
                'Nuclei %d %s with charge %d position : %.6f  %.6f  %.6f', i,
                self.symbols[i], self.charges[i], *self.coords[i])

        logger.info(self, '* Basis Info')
        logger.info(self, 'Cuttoff for primitives %g' % self.cuttz)
        logger.info(self, 'Number of Orbitals %d' % self.nmo)
        logger.info(self, 'Old Number of primitives %d' % self.oldnprims)
        logger.info(self, 'Number of primitives %d' % self.nprims)
        logger.info(self, 'Maximum l in the basis %d' % self.lmax)
        logger.debug(self, 'Number of primitives per center %s' % self.npc)
        logger.info(self, 'Total number of shells %d' % self.nshells)
        logger.debug(self, 'Number of shells per center %s' % self.ngroup)
        for ic in range(self.natm):
            logger.debug(self, 'Basis for center %d' % ic)
            ntfu = numpy.zeros(6, dtype=numpy.int32)
            for m in range(self.ngroup[ic]):
                zz = self.oexp[self.nuexp[m, 0, ic] - 1]
                ii = self.nuexp[m, 0, ic]
                itip = self.ityp[ii - 1] - 1
                it1 = data.nlm[itip, 0]
                it2 = data.nlm[itip, 1]
                it3 = data.nlm[itip, 2]
                isu = it1 + it2 + it3
                ntfu[isu] += 1
                nzicm = self.nzexp[m, ic]
                x1 = self.rcutte[m, ic]
                logger.debug(self, 'Shell Type %s exp %f zero at %f idx %s' % \
                (param.LTYPE[isu],zz,numpy.sqrt(x1),self.nuexp[m, :nzicm, ic]))
        logger.debug(self, 'Number of shells of each type %s' % ntfu)
        logger.debug(self, 'Ocupation of molecular orbitals %s' % self.mo_occ)
        logger.info(self, '')

        return self
Exemple #53
0
def makeReport(i__UCMDBService, i__dir, i__FileName, queryTQL):
    logger.info('************* START makeReport *************')
    l__QueryService = i__UCMDBService.getTopologyQueryService()

    #The unique name of the query node of type "node"
    #l__NodeName    = "Node"
    #Creating a query node from type "host_node" and asking for all returned nodes the display_label

    #Execute the unsaved query
    ##    l__Topology            = l__QueryService.executeQuery(l__QueryDefinition)
    l__Topology = l__QueryService.executeNamedQuery(queryTQL)
    #Get the node results
    l__NodeCollection = l__Topology.getCIsByName("Node")
    logger.debug(queryTQL + ' : ' + str(l__NodeCollection))
    #Go over the nodes and print its related IPs
    l__NodeQty = 0
    #l__new = sorted(l__NodeCollection,key=attrgetter('label'))
    #l__new = sorted(l__NodeCollection,key=lambda l__NodeCollection:l__NodeCollection[2])

    #sorted(l__NodeCollection, key=itemgetter(2))
    #logger.debug("\\\\\\\\\\\\\\\\"+l__NodeCollection.toString())

    if l__NodeCollection:
        newCI = ITAMline()
        toprow = newCI.exportHeader("UDALL")
        logger.debug("toprow : " + toprow)
        output = ''

        filename = newCI.exportfileName("UDALL")
        file = openWrite(i__dir, filename)

        if toprow != '':
            output = toprow + '\r\n'
            writeLine(file, output)

    chunks = {}
    chunkcount = 0
    chunksize = 100
    #citlist = ["host_node","unix","nt"]
    for l__CITypeNode in l__NodeCollection:
        if l__CITypeNode:
            logger.debug(l__CITypeNode)
            servername = str(l__CITypeNode.getPropertyValue("name"))
            serverid = str(l__CITypeNode.getPropertyValue("global_id"))
            serverdomain = str(l__CITypeNode.getPropertyValue("domain_name"))
            #serverscannedtime = str(l__CITypeNode.getPropertyValue("root_lastaccesstime"))
            servermodel = str(l__CITypeNode.getPropertyValue("node_model"))
            servervendor = str(l__CITypeNode.getPropertyValue("vendor"))
            serverserialnumber = str(
                l__CITypeNode.getPropertyValue("serial_number"))
            serverOS = str(
                l__CITypeNode.getPropertyValue("discovered_os_name"))
            logger.debug(servername)
            intnum = 999

            if ((chunkcount % chunksize) == 0):
                chunks[(int(chunkcount / chunksize))] = []

            chunks[(int(chunkcount / chunksize))].append(serverid)
            chunkcount = chunkcount + 1

    if (chunkcount > 0):
        for id in chunks:
            # retry the query up to 100 times
            idlist = chunks[id]
            logger.debug('ITAM %s : %s' % (id, idlist))
            max_retry = 100
            while (max_retry > 0):
                try:
                    # Base query to get software for a node which will be dynamically run each time
                    logger.debug("execyqyery")
                    l__QueryExec = l__QueryService.createExecutableQuery(
                        "UD Node Software")
                    l__QueryExec.nodeRestrictions(
                        "Node").restrictToIdsFromStrings(idlist)

                    l__Topologysw = l__QueryService.executeQuery(l__QueryExec)
                    l__NodeCollection2 = l__Topologysw.getCIsByName("Node")
                    logger.debug(l__NodeCollection2)
                    max_retry = 0
                    for swnode in l__NodeCollection2:
                        logger.debug(swnode)
                        logger.debug("getall nnode related cis")
                        servername = removeExtra(
                            swnode.getPropertyValue("name"))
                        serverid = removeExtra(
                            swnode.getPropertyValue("global_id"))
                        serverdomain = removeExtra(
                            swnode.getPropertyValue("domain_name"))
                        serverscannedtime = getUDAScantime(swnode)
                        servermodel = removeExtra(
                            swnode.getPropertyValue("node_model"))
                        servervendor = removeExtra(
                            swnode.getPropertyValue("vendor"))
                        serverserialnumber = removeExtra(
                            swnode.getPropertyValue("serial_number"))
                        serverOS = removeExtra(
                            swnode.getPropertyValue("discovered_os_name"))
                        logger.debug(servername)
                        cpudata = getCPUsummary(swnode)

                        for l__TopologyRelation in swnode.getOutgoingRelations(
                        ):

                            relcitype = removeExtra(
                                str(l__TopologyRelation.getEnd2CI().
                                    getPropertyValue("root_class")))

                            if relcitype == 'installed_software':
                                logger.debug("in mainloop")
                                newCI = ITAMline()
                                newCI.devicelabel = servername
                                newCI.deviceid = serverid
                                newCI.softwarevendor = removeExtra(
                                    l__TopologyRelation.getEnd2CI(
                                    ).getPropertyValue("discovered_vendor"))
                                newCI.softwarename = removeExtra(
                                    l__TopologyRelation.getEnd2CI(
                                    ).getPropertyValue("name"))
                                newCI.releasename = removeExtra(
                                    l__TopologyRelation.getEnd2CI(
                                    ).getPropertyValue("release"))
                                newCI.productversion = removeExtra(
                                    l__TopologyRelation.getEnd2CI(
                                    ).getPropertyValue("version"))
                                newCI.domain = serverdomain
                                newCI.lastscannedtime = serverscannedtime
                                newCI.model = servermodel
                                newCI.servercompany = servervendor
                                newCI.serialnumber = serverserialnumber
                                newCI.installedpath = removeExtra(
                                    l__TopologyRelation.getEnd2CI(
                                    ).getPropertyValue("file_system_path"))
                                newCI.OS = serverOS
                                newCI.physicalcpus = cpudata.physicalcpus
                                newCI.socketcount = cpudata.sockets
                                newCI.corecount = cpudata.corecount
                                newCI.cputype = cpudata.cputype
                                newCI.cpuvendor = cpudata.cpuvendor
                                newCI.logicalcpus = cpudata.logicalcpucount
                                newCI.cpuspeed = cpudata.cpuspeed

                                outstr = newCI.exportData("UDALL")
                                if outstr != '':
                                    logger.debug("datathere")
                                    writeLine(file, outstr + '\r\n')
                except Exception as e:
                    logger.debug(
                        'UCMDB query for %s failed with %s, pausing for 5 seconds and retrying (count = %d)'
                        % (servername, e, max_retry))
                    time.sleep(5)
                    max_retry = max_retry - 1
                    if (max_retry == 0):
                        logger.debug('Abandoning query attempts')

                        #outputlist.append(newCI)
                        #logger.debug(servername + ' : ' + newCI.applicationname)

        filename = filename[:-3] + 'success'
        fileok = openWrite(i__dir, filename)
        writeLine(fileok, 'Report generated successfully')

    logger.info('************* END makeReport *************')
def readPropertiesFile(fname):
    """
    Uses ConfigReader to read all sections in to a dictionary. Each sections options will
    be kept as nested dictionary under each section key.
    e.g.:
    {
        'WHITELIST': {
            '<j2eetype-1>': {
                '<value-1>',
                '<value-2>',
                :
                '<value-n>'
                }
            '<j2eetype-2>': {
                '<value-1>',
                '<value-2>',
                :
                '<value-n>'
                }
    }
    """
    l.debug("looking for file: '%s'", fname)
    if not os.path.isfile(fname):
        l.debug("file not found: '%s'", fname)
        return {}

    # discover comments per regex
    re_comment = re.compile('^\s*#', re.IGNORECASE)
    # discover enumeration on first level: eg "DATASOURCE3"
    re_enumeration = re.compile('^([A-z]+)(\d+)\.', re.IGNORECASE)

    reader = ConfigParser.ConfigParser()
    l.debug("reading file: '%s'", fname)
    reader.read(fname)
    # read all sections and items therein
    allSectionsMap = {}
    sectionNames = reader.sections()
    sectionNames.sort()
    for sectionName in sectionNames:
        if l.isDebugEnabled():
            l.logLF()
            l.debug("found section === %s === ", sectionName)
        sectionMap = {}
        allSectionsMap[sectionName.upper()] = sectionMap

        lastBlockNumber = -1
        expectedBlockNumber = 0
        configBlockNumber = 0
        # read all option lines from current section into sectionMap dictionary
        # eg: "datasource1.connectionpool.reaptime = 7"
        sectionOptions = reader.options(sectionName)
        # in Python 2.1 (WAS8.x) the option() lines of a configParser section may be unsorted!
        # but I cant use default sort, because Datasource1, datasource10 will be sorted before Datasource2
        if l.isHellLevelEnabled():
            for optionKey in sectionOptions:
                l.debug666("read original-ordered section/line: %-20s :  %s",
                           sectionName, optionKey)

        sortedSectionOptions = sorted(sectionOptions,
                                      key=configReaderOptionSort)
        if l.isHellLevelEnabled():
            for optionKey in sortedSectionOptions:
                l.debug666("read num.sorted section/line: %s :\t%s",
                           sectionName, optionKey)

        for optionKey in sortedSectionOptions:
            if (re_comment.search(optionKey)) is not None:
                l.debug666("skipping comment: %s", optionKey)
                continue
            optionValue = reader.get(sectionName, optionKey)

            # now fix the *1st* level of enumeration, if it is not done sequentially.
            # eg. when somebody declares 20 datasources, but deletes the 3rd.
            # This way, one does not need to re-enumerate all the
            # other datasource entries in the config file.
            match = re_enumeration.search(optionKey)
            if (match) is not None:
                configBlockNumber = int(match.group(2))
                if configBlockNumber > lastBlockNumber:
                    # new block found -> increase block counter
                    if l.isDebugEnabled():
                        l.logLF()
                        l.debug("=== read new config block # %d ===",
                                configBlockNumber)
                    expectedBlockNumber = expectedBlockNumber + 1
                    lastBlockNumber = configBlockNumber

                l.debug2("config #:%2d   expected #:%2d", configBlockNumber,
                         expectedBlockNumber)
                # check for non-sequential block numbering!
                if configBlockNumber != expectedBlockNumber:
                    l.debug("FIX block numbering: %d -> %d in option: '%s'",
                            configBlockNumber, expectedBlockNumber, optionKey)
                    optionKey = re_enumeration.sub(
                        lambda m, num=expectedBlockNumber: "%s%s." %
                        (m.group(1), num),
                        optionKey)
                    # and store the original block number in special hash key:
                    originalNumberKey = "%s%d.%s" % (match.group(1),
                                                     expectedBlockNumber,
                                                     "__ORIGINAL_NUMBER")
                    sectionMap[originalNumberKey.upper()] = configBlockNumber

            l.debug("data: %s = %s", optionKey, optionValue)
            # finally, add key to sectionMap hash:
            sectionMap[optionKey.upper()] = optionValue

    return allSectionsMap
def import_from_plexwatch(database=None,
                          table_name=None,
                          import_ignore_interval=0):

    try:
        connection = sqlite3.connect(database, timeout=20)
        connection.row_factory = sqlite3.Row
    except sqlite3.OperationalError:
        logger.error(u"Tautulli Importer :: Invalid filename.")
        return None
    except ValueError:
        logger.error(u"Tautulli Importer :: Invalid filename.")
        return None

    try:
        connection.execute('SELECT ratingKey from %s' % table_name)
    except sqlite3.OperationalError:
        logger.error(
            u"Tautulli Importer :: Database specified does not contain the required fields."
        )
        return None

    logger.debug(u"Tautulli Importer :: PlexWatch data import in progress...")

    ap = activity_processor.ActivityProcessor()
    user_data = users.Users()

    # Get the latest friends list so we can pull user id's
    try:
        users.refresh_users()
    except:
        logger.debug(
            u"Tautulli Importer :: Unable to refresh the users list. Aborting import."
        )
        return None

    query = 'SELECT time AS started, ' \
            'stopped, ' \
            'cast(ratingKey as text) AS rating_key, ' \
            'null AS user_id, ' \
            'user, ' \
            'ip_address, ' \
            'paused_counter, ' \
            'platform AS player, ' \
            'null AS platform, ' \
            'null as machine_id, ' \
            'parentRatingKey as parent_rating_key, ' \
            'grandparentRatingKey as grandparent_rating_key, ' \
            'null AS media_type, ' \
            'null AS view_offset, ' \
            'xml, ' \
            'rating as content_rating,' \
            'summary,' \
            'title AS full_title,' \
            '(case when orig_title_ep = "" then orig_title else ' \
            'orig_title_ep end) as title,' \
            '(case when orig_title_ep != "" then orig_title else ' \
            'null end) as grandparent_title ' \
            'FROM ' + table_name + ' ORDER BY id'

    result = connection.execute(query)

    for row in result:
        # Extract the xml from the Plexwatch db xml field.
        extracted_xml = extract_plexwatch_xml(row['xml'])

        # If we get back None from our xml extractor skip over the record and log error.
        if not extracted_xml:
            logger.error(
                u"Tautulli Importer :: Skipping record with ratingKey %s due to malformed xml."
                % str(row['rating_key']))
            continue

        # Skip line if we don't have a ratingKey to work with
        if not row['rating_key']:
            logger.error(
                u"Tautulli Importer :: Skipping record due to null ratingKey.")
            continue

        # If the user_id no longer exists in the friends list, pull it from the xml.
        if user_data.get_user_id(user=row['user']):
            user_id = user_data.get_user_id(user=row['user'])
        else:
            user_id = extracted_xml['user_id']

        session_history = {
            'started':
            row['started'],
            'stopped':
            row['stopped'],
            'rating_key':
            row['rating_key'],
            'title':
            row['title'],
            'parent_title':
            extracted_xml['parent_title'],
            'grandparent_title':
            row['grandparent_title'],
            'original_title':
            extracted_xml['original_title'],
            'full_title':
            row['full_title'],
            'user_id':
            user_id,
            'user':
            row['user'],
            'ip_address':
            row['ip_address']
            if row['ip_address'] else extracted_xml['ip_address'],
            'paused_counter':
            row['paused_counter'],
            'player':
            row['player'],
            'platform':
            extracted_xml['platform'],
            'machine_id':
            extracted_xml['machine_id'],
            'parent_rating_key':
            row['parent_rating_key'],
            'grandparent_rating_key':
            row['grandparent_rating_key'],
            'media_type':
            extracted_xml['media_type'],
            'view_offset':
            extracted_xml['view_offset'],
            'video_decision':
            extracted_xml['video_decision'],
            'audio_decision':
            extracted_xml['audio_decision'],
            'transcode_decision':
            extracted_xml['transcode_decision'],
            'duration':
            extracted_xml['duration'],
            'width':
            extracted_xml['width'],
            'height':
            extracted_xml['height'],
            'container':
            extracted_xml['container'],
            'video_codec':
            extracted_xml['video_codec'],
            'audio_codec':
            extracted_xml['audio_codec'],
            'bitrate':
            extracted_xml['bitrate'],
            'video_resolution':
            extracted_xml['video_resolution'],
            'video_framerate':
            extracted_xml['video_framerate'],
            'aspect_ratio':
            extracted_xml['aspect_ratio'],
            'audio_channels':
            extracted_xml['audio_channels'],
            'transcode_protocol':
            extracted_xml['transcode_protocol'],
            'transcode_container':
            extracted_xml['transcode_container'],
            'transcode_video_codec':
            extracted_xml['transcode_video_codec'],
            'transcode_audio_codec':
            extracted_xml['transcode_audio_codec'],
            'transcode_audio_channels':
            extracted_xml['transcode_audio_channels'],
            'transcode_width':
            extracted_xml['transcode_width'],
            'transcode_height':
            extracted_xml['transcode_height']
        }

        session_history_metadata = {
            'rating_key': helpers.latinToAscii(row['rating_key']),
            'parent_rating_key': row['parent_rating_key'],
            'grandparent_rating_key': row['grandparent_rating_key'],
            'title': row['title'],
            'parent_title': extracted_xml['parent_title'],
            'grandparent_title': row['grandparent_title'],
            'original_title': extracted_xml['original_title'],
            'media_index': extracted_xml['media_index'],
            'parent_media_index': extracted_xml['parent_media_index'],
            'thumb': extracted_xml['thumb'],
            'parent_thumb': extracted_xml['parent_thumb'],
            'grandparent_thumb': extracted_xml['grandparent_thumb'],
            'art': extracted_xml['art'],
            'media_type': extracted_xml['media_type'],
            'year': extracted_xml['year'],
            'originally_available_at':
            extracted_xml['originally_available_at'],
            'added_at': extracted_xml['added_at'],
            'updated_at': extracted_xml['updated_at'],
            'last_viewed_at': extracted_xml['last_viewed_at'],
            'content_rating': row['content_rating'],
            'summary': row['summary'],
            'tagline': extracted_xml['tagline'],
            'rating': extracted_xml['rating'],
            'duration': extracted_xml['duration'],
            'guid': extracted_xml['guid'],
            'section_id': extracted_xml['section_id'],
            'directors': extracted_xml['directors'],
            'writers': extracted_xml['writers'],
            'actors': extracted_xml['actors'],
            'genres': extracted_xml['genres'],
            'studio': extracted_xml['studio'],
            'labels': extracted_xml['labels'],
            'full_title': row['full_title'],
            'width': extracted_xml['width'],
            'height': extracted_xml['height'],
            'container': extracted_xml['container'],
            'video_codec': extracted_xml['video_codec'],
            'audio_codec': extracted_xml['audio_codec'],
            'bitrate': extracted_xml['bitrate'],
            'video_resolution': extracted_xml['video_resolution'],
            'video_framerate': extracted_xml['video_framerate'],
            'aspect_ratio': extracted_xml['aspect_ratio'],
            'audio_channels': extracted_xml['audio_channels']
        }

        # On older versions of PMS, "clip" items were still classified as "movie" and had bad ratingKey values
        # Just make sure that the ratingKey is indeed an integer
        if session_history_metadata['rating_key'].isdigit():
            ap.write_session_history(
                session=session_history,
                import_metadata=session_history_metadata,
                is_import=True,
                import_ignore_interval=import_ignore_interval)
        else:
            logger.debug(u"Tautulli Importer :: Item has bad rating_key: %s" %
                         session_history_metadata['rating_key'])

    logger.debug(u"Tautulli Importer :: PlexWatch data import complete.")
    import_users()
Exemple #56
0
def DiscoveryMain(Framework):

    errMsg = "Export Directory is not valid. Ensure export directory exists on the probe system."
    testConnection = Framework.getTriggerCIData("testConnection")
    if testConnection == 'true':
        # check if valid export directory exists
        isValid = validateDirectory(Framework)
        if not isValid:
            raise Exception, errMsg
            return
        else:
            logger.debug("Test connection was successful")
            return

    i__Framework = Framework
    logger.info('************* START MAIN *************')
    l__LocalShell = shellutils.ShellUtils(
        i__Framework.createClient(ClientsConsts.LOCAL_SHELL_PROTOCOL_NAME))
    l__OSHVResult = ObjectStateHolderVector()
    l__UCMDBService = createCMDBConnection(l__LocalShell)

    logger.debug(str(l__UCMDBService))

    if not validateDirectory(Framework):
        logger.error(errMsg)
        raise Exception, errMsg
        return

    expDirPath = Framework.getTriggerCIData("Export Directory")
    queryTQL = Framework.getTriggerCIData("TQL Input Name")
    isLastChunk = Framework.getTriggerCIData("isLastChunk")

    logger.debug("Generating ITAM report output")
    # clean up XML
    empty = 0  ##isEmpty(addResult, "addResult")
    if not empty:
        ##addResult = replace(addResult, iplookup)
        if l__UCMDBService:
            logger.debug("Calling makereport")
            #addResult = makeReport(l__UCMDBService, expDirPath, 'testout.txt', queryTQL)
            makeReport(l__UCMDBService, expDirPath, 'testout.txt', queryTQL)

        logger.debug("Back from makereport")
        reports = []  #'ITAMALL']
        addResult = 0
        if addResult:

            for rep in reports:
                toprow = addResult[0].exportHeader(rep)
                logger.debug("report : " + rep)
                logger.debug("toprow : " + toprow)
                output = ''

                try:
                    filename = addResult[0].exportfileName(rep)
                    file = openWrite(expDirPath, filename)

                    if toprow != '':
                        output = toprow + '\r\n'
                        writeLine(file, output)
                    for ddmi in addResult:
                        outstr = ddmi.exportData(rep)
                        #logger.debug('out: ' + ddmi.exportData(rep))
                        if outstr != '':
                            #output = output + outstr + '\r\n'
                            writeLine(file, outstr + '\r\n')

                except:
                    logger.debug("Error writing to file for report " + rep +
                                 " filename - " + expDirPath + "/" +
                                 addResult[0].exportfileName(rep))
def verify_directories_created():
    import logger
    import os
    logger.info("verify_directories_created")

    # Force download path if empty
    download_path = get_setting("downloadpath")
    if download_path == "":
        download_path = os.path.join(get_data_path(), "downloads")
        set_setting("downloadpath", download_path)

    # Force download list path if empty
    download_list_path = get_setting("downloadlistpath")
    if download_list_path == "":
        download_list_path = os.path.join(get_data_path(), "downloads", "list")
        set_setting("downloadlistpath", download_list_path)

    # Force bookmark path if empty
    bookmark_path = get_setting("bookmarkpath")
    if bookmark_path == "":
        bookmark_path = os.path.join(get_data_path(), "bookmarks")
        set_setting("bookmarkpath", bookmark_path)

    # Create data_path if not exists
    if not os.path.exists(get_data_path()):
        logger.debug("Creating data_path " + get_data_path())
        try:
            os.mkdir(get_data_path())
        except:
            pass

    # Create download_path if not exists
    if not download_path.lower().startswith("smb") and not os.path.exists(
            download_path):
        logger.debug("Creating download_path " + download_path)
        try:
            os.mkdir(download_path)
        except:
            pass

    # Create download_list_path if not exists
    if not download_list_path.lower().startswith("smb") and not os.path.exists(
            download_list_path):
        logger.debug("Creating download_list_path " + download_list_path)
        try:
            os.mkdir(download_list_path)
        except:
            pass

    # Create bookmark_path if not exists
    if not bookmark_path.lower().startswith("smb") and not os.path.exists(
            bookmark_path):
        logger.debug("Creating bookmark_path " + bookmark_path)
        try:
            os.mkdir(bookmark_path)
        except:
            pass

    # Create library_path if not exists
    if not get_library_path().lower().startswith("smb") and not os.path.exists(
            get_library_path()):
        logger.debug("Creating library_path " + get_library_path())
        try:
            os.mkdir(get_library_path())
        except:
            pass
Exemple #58
0
    def get_plex_downloads(self):
        logger.debug(u"Tautulli PlexTV :: Retrieving current server version.")

        pms_connect = pmsconnect.PmsConnect()
        pms_connect.set_server_version()

        update_channel = pms_connect.get_server_update_channel()

        logger.debug(u"Tautulli PlexTV :: Plex update channel is %s." %
                     update_channel)
        plex_downloads = self.get_plextv_downloads(
            plexpass=(update_channel == 'beta'))

        try:
            available_downloads = json.loads(plex_downloads)
        except Exception as e:
            logger.warn(
                u"Tautulli PlexTV :: Unable to load JSON for get_plex_updates."
            )
            return {}

        # Get the updates for the platform
        pms_platform = common.PMS_PLATFORM_NAME_OVERRIDES.get(
            plexpy.CONFIG.PMS_PLATFORM, plexpy.CONFIG.PMS_PLATFORM)
        platform_downloads = available_downloads.get('computer').get(pms_platform) or \
            available_downloads.get('nas').get(pms_platform)

        if not platform_downloads:
            logger.error(
                u"Tautulli PlexTV :: Unable to retrieve Plex updates: Could not match server platform: %s."
                % pms_platform)
            return {}

        v_old = helpers.cast_to_int("".join(
            v.zfill(4)
            for v in plexpy.CONFIG.PMS_VERSION.split('-')[0].split('.')[:4]))
        v_new = helpers.cast_to_int("".join(
            v.zfill(4) for v in platform_downloads.get('version', '').split(
                '-')[0].split('.')[:4]))

        if not v_old:
            logger.error(
                u"Tautulli PlexTV :: Unable to retrieve Plex updates: Invalid current server version: %s."
                % plexpy.CONFIG.PMS_VERSION)
            return {}
        if not v_new:
            logger.error(
                u"Tautulli PlexTV :: Unable to retrieve Plex updates: Invalid new server version: %s."
                % platform_downloads.get('version'))
            return {}

        # Get proper download
        releases = platform_downloads.get('releases', [{}])
        release = next(
            (r for r in releases
             if r['distro'] == plexpy.CONFIG.PMS_UPDATE_DISTRO
             and r['build'] == plexpy.CONFIG.PMS_UPDATE_DISTRO_BUILD),
            releases[0])

        download_info = {
            'update_available': v_new > v_old,
            'platform': platform_downloads.get('name'),
            'release_date': platform_downloads.get('release_date'),
            'version': platform_downloads.get('version'),
            'requirements': platform_downloads.get('requirements'),
            'extra_info': platform_downloads.get('extra_info'),
            'changelog_added': platform_downloads.get('items_added'),
            'changelog_fixed': platform_downloads.get('items_fixed'),
            'label': release.get('label'),
            'distro': release.get('distro'),
            'distro_build': release.get('build'),
            'download_url': release.get('url'),
        }

        return download_info
Exemple #59
0
def adjust_ahrs_trim(conn, level_attitude):
    '''
    force the AHRS trim to zero, which removes the effect of the jig not being quite
    level. This is only incorrect if the accels are not aligned on the board, which
    we check in check_accel_cal later

    As a side effect this function set AHRS_ORIENTATION for the test board to 12
    '''

    # start with board right way up
    rotate.set_rotation(conn, 'level')

    # we need to work out what the error in attitude of the 3 IMUs on the test jig is
    # to do that we start with it level, and measure the roll/pitch as compared to the reference
    conn.discard_messages()
    ref_imu = conn.refmav.recv_match(type='RAW_IMU', blocking=True, timeout=3)
    test_imu1 = conn.testmav.recv_match(type='RAW_IMU',
                                        blocking=True,
                                        timeout=3)
    test_imu2 = conn.testmav.recv_match(type='SCALED_IMU2',
                                        blocking=True,
                                        timeout=3)
    test_imu3 = conn.testmav.recv_match(type='SCALED_IMU3',
                                        blocking=True,
                                        timeout=3)
    if ref_imu is None:
        util.failure("Lost comms to reference board in ahrs trim")
    if test_imu1 is None or test_imu2 is None or test_imu3 is None:
        util.failure("Lost comms to test board in ahrs trim")

    (ref_roll, ref_pitch) = util.attitude_estimate(ref_imu)
    (test_roll1, test_pitch1) = util.attitude_estimate(test_imu1)
    (test_roll2, test_pitch2) = util.attitude_estimate(test_imu2)
    (test_roll3, test_pitch3) = util.attitude_estimate(test_imu3)

    # get the roll and pitch errors
    roll_error1 = (test_roll1 - ref_roll)
    roll_error2 = (test_roll2 - ref_roll)
    roll_error3 = (test_roll3 - ref_roll)
    pitch_error1 = (test_pitch1 - ref_pitch)
    pitch_error2 = (test_pitch2 - ref_pitch)
    pitch_error3 = (test_pitch3 - ref_pitch)

    conn.discard_messages()
    ref_imu = conn.refmav.recv_match(type='RAW_IMU', blocking=True, timeout=3)
    test_imu1 = conn.testmav.recv_match(type='RAW_IMU',
                                        blocking=True,
                                        timeout=3)
    test_imu2 = conn.testmav.recv_match(type='SCALED_IMU2',
                                        blocking=True,
                                        timeout=3)
    test_imu3 = conn.testmav.recv_match(type='SCALED_IMU3',
                                        blocking=True,
                                        timeout=3)
    if ref_imu is None:
        util.failure("Lost comms to reference board in ahrs trim")
    if test_imu1 is None or test_imu2 is None or test_imu3 is None:
        util.failure("Lost comms to test board in ahrs trim")

    logger.debug(
        "Tilt Ref=(%.1f %.1f) Test1=(%.1f %.1f) Test2=(%.1f %.1f) Test3=(%.1f %.1f)"
        % (ref_roll, ref_pitch, test_roll1, test_pitch1, test_roll2,
           test_pitch2, test_roll3, test_pitch3))

    if (abs(ref_roll) > ROTATION_TOLERANCE
            or abs(ref_pitch) > ROTATION_TOLERANCE):
        util.failure("Reference board rotation error")

    logger.debug("Tilt offsets: Roll(%.1f %.1f %.1f) Pitch(%.1f %.1f %.1f) " %
                 (roll_error1, roll_error2, roll_error3, pitch_error1,
                  pitch_error2, pitch_error3))

    if (abs(roll_error1) > TILT_TOLERANCE1
            or abs(roll_error2) > TILT_TOLERANCE1
            or abs(roll_error3) > TILT_TOLERANCE3):
        util.failure("Test board roll error")

    if (abs(pitch_error1) > TILT_TOLERANCE1
            or abs(pitch_error2) > TILT_TOLERANCE1
            or abs(pitch_error3) > TILT_TOLERANCE3):
        util.failure("Test board pitch error")

    # flip upside down for the trim calculation
    rotate.set_rotation(conn, 'back')

    # set orientation upside down for trim measurement
    util.param_set(conn.test, 'AHRS_ORIENTATION', 12)

    # sleep an extra four seconds - we need to be very sure the board is still for trim
    time.sleep(4)
    conn.discard_messages()

    # average over 30 samples for trim
    num_samples = 30
    test_roll = [0] * 3
    test_pitch = [0] * 3
    msgs = ['RAW_IMU', 'SCALED_IMU2', 'SCALED_IMU3']

    for i in range(num_samples):
        for j in range(3):
            test_imu = conn.testmav.recv_match(type=msgs[j],
                                               blocking=True,
                                               timeout=3)
            if test_imu is None:
                util.failure("Lost comms to test board in ahrs trim")
            (roll, pitch) = util.attitude_estimate(test_imu)
            test_roll[j] += roll
            test_pitch[j] += pitch

    for j in range(3):
        test_roll[j] /= num_samples
        test_pitch[j] /= num_samples

    logger.debug(
        "Average Trim tilt Test1=(%.1f %.1f) Test2=(%.1f %.1f) Test3=(%.1f %.1f)"
        % (test_roll[0], test_pitch[0], test_roll[1], test_pitch[1],
           test_roll[2], test_pitch[2]))

    # setting a positive trim value reduces the attitude that is
    # read. So setting a trim of 0.1 when level results in a attitude
    # reading of -5.8 degrees

    # this approach assumes the mpu6000 on the FMU (IMU3) is level
    # with respect to the board, and that any attitude error is due to
    # the isolation board mount. We use the average of the error from
    # IMU1 and IMU2
    trim_x = radians((test_roll[0] + test_roll[1]) * 0.5 - test_roll[2])
    trim_y = radians((test_pitch[0] + test_pitch[1]) * 0.5 - test_pitch[2])

    util.param_set(conn.test, 'AHRS_TRIM_X', trim_x)
    time.sleep(0.2)
    util.param_set(conn.test, 'AHRS_TRIM_Y', trim_y)
    time.sleep(0.2)
    logger.debug("Set trims AHRS_TRIM_X=%.4f AHRS_TRIM_Y=%.4f" %
                 (trim_x, trim_y))
Exemple #60
0
def processManuallyEnrichedScanFile(Framework):
    files = File(
        CollectorsParameters.PROBE_MGR_INVENTORY_XMLENRICHER_FILES_FOLDER +
        XmlEnricherConstants.SENDING_FOLDER_NAME).listFiles(XsfFilter())

    if not files or not len(files):
        logger.debug('no manually file found!')
        Framework.setStepExecutionStatus(WorkflowStepStatus.SUCCESS)
        return

    filesNum = len(files)
    count = 0

    noFileProcessedSuccess = filesNum

    while count < filesNum:
        file = files[count]
        count += 1
        path = file.getAbsolutePath()
        try:
            logger.debug(file.getAbsolutePath())
            ParseEnrichedScanFile.parseFile(Framework,
                                            path,
                                            isManual=1,
                                            reportWarning=1)
            if Framework.getStepExecutionStatus(
            ) == WorkflowStepStatus.SUCCESS:
                noFileProcessedSuccess = 0
        except:
            errorMessage = str(sys.exc_info()[1])
            logger.debug('Failed parsing file: ' + path)
            logger.debugException(errorMessage)
            Framework.reportWarning(
                inventoryerrorcodes.INVENTORY_DISCOVERY_FAILED_PARSING, [path])

    logger.debug(noFileProcessedSuccess)
    if noFileProcessedSuccess:
        logger.debug('All Scan file(s) processed failed')
        Framework.setStepExecutionStatus(WorkflowStepStatus.FAILURE)
        Framework.reportError(
            inventoryerrorcodes.INVENTORY_DISCOVERY_FAILED_PARSING,
            ['All Scan file(s) processed failed'])
    else:
        logger.debug(
            'OK, due to some files successed process, set the status as SUCCESS'
        )
        Framework.setStepExecutionStatus(WorkflowStepStatus.SUCCESS)