Beispiel #1
0
 def __init__(self):
     super().__init__()
     try:
         self.currentInstance = os.listdir(config.MC_DIR + "/instances")[0]
     except:
         self.currentInstance = ""
     self.loggedIn = False  # Tells the launcher if the user is logged in.
     utils.areYouThere(config.MC_DIR + "/instances")
     screen_resolution = app.desktop().screenGeometry()  # Gets primary monitor resolution.
     self.title = config.NAME + " " + config.VER
     config.ICON = utils.loadImage("favicon.ico", self.currentInstance)
     self.setWindowIcon(QIcon(config.ICON))
     config.LOGO = utils.loadImage("logo.png", self.currentInstance)
     config.BACKGROUND = utils.loadImage("background.png", self.currentInstance)
     config.BLOG_BACKGROUND = utils.getFile("blogbackground.png", self.currentInstance)
     config.BLOG = utils.getFile("blog.html", self.currentInstance)
     self.left = (screen_resolution.width() / 2) - 427
     self.top = (screen_resolution.height() / 2) - 240
     self.launcherConfig = utils.loadSettings(self)
     self.instanceConfig = utils.loadInstanceSettings(self, self.currentInstance)
     try:
         self.pres = pypresence.Presence("548208354196062228")  # Go ahead and use this if you want, provided you are modifying the launcher. Not that I can really stop you.
         self.pres.connect()
         self.pres.update(details="In launcher", large_image="pymcllogo512", state="Selected modpack: " + self.currentInstance)
     except:
         self.pres = None
     self.checkAlive(threadingEvent)
     self.update = utils.checkOnline()
     self.initUI()
def main():
    try:
        globalSettings = utils.loadSettings(
            os.path.join(env.configPath, "globalSettings.json"))
        if len(sys.argv) < 2:
            printUsage()
        directive = sys.argv[1].lower()
        if directive == 'producer':
            content = {
                'objects': [{
                    'class_name': 'URL',
                    'identifier': 1,
                    'data': {
                        'name': 'https://cmsconstruct.com'
                    }
                }],
                'source':
                'apiKafkaTest',
                'links': []
            }
            producerWrapper(globalSettings, content)
        elif directive == 'consumer':
            consumerWrapper(globalSettings)
        else:
            printUsage()

    except:
        stacktrace = traceback.format_exception(sys.exc_info()[0],
                                                sys.exc_info()[1],
                                                sys.exc_info()[2])
        logger.error('Error in apiKafkaTest utility: {}'.format(
            str(stacktrace).strip()))
def psLocalTest():
	try:
		thisPath = os.path.dirname(os.path.abspath(__file__))
		basePath = os.path.abspath(os.path.join(thisPath, '..'))
		if basePath not in sys.path:
			sys.path.append(basePath)
		import env
		env.addLibPath()
		import utils
		import twisted.logger

		## Setup requested log handlers
		globalSettings = utils.loadSettings(os.path.join(env.configPath, 'globalSettings.json'))
		logFiles = utils.setupLogFile('JobDetail', env, globalSettings['fileContainingContentGatheringLogSettings'], directoryName='client')
		logObserver  = utils.setupObservers(logFiles, 'JobDetail', env, globalSettings['fileContainingContentGatheringLogSettings'])
		logger = twisted.logger.Logger(observer=logObserver, namespace='JobDetail')

		client = PwshLocal(logger)
		version = client.open()
		logger.debug('version: {version!r}', version=version)

		logger.debug('sleep should timeout and reinitialize shell...')
		results = client.run('sleep 5', timeout=2)
		logger.debug('sleep output: {results!r}', results=results)

		osAttrDict = {}
		queryOperatingSystem(client, logger, osAttrDict)
		logger.debug('osAttrDict: {osAttrDict!r}', osAttrDict=osAttrDict)
		client.close()

	except:
		stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
		print('Main Exception: {}'.format(stacktrace))
		client.close()
Beispiel #4
0
def baselinePackagesInDatabase():
	"""Called by the installer to load all content on server into the DB."""
	dbClient = None
	try:
		globalSettings = utils.loadSettings(os.path.join(env.configPath, 'globalSettings.json'))
		logger = utils.setupLogger('Packages', env, globalSettings['fileContainingCoreLogSettings'])
		dbClient = getDbConnection(logger)

		## Work through each package system type containing packages to load
		for system,context in validPackageSystems.items():
			systemName = context.get('name')
			systemPath = context.get('path')
			logger.info('Working on {} packages: {}'.format(systemName, systemPath))
			if os.path.isdir(systemPath):
				packageList = os.listdir(systemPath)
				for packageName in packageList:
					thisPath = os.path.join(systemPath, packageName)
					if not os.path.isdir(thisPath):
						continue
					loadPackageIntoDB(logger, packageName, systemName, ['content', systemName], dbClient, thisPath)

	except:
		stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
		logger.error('Exception in baselinePackagesInDatabase:  {}'.format(stacktrace))

	with suppress(Exception):
		dbClient.session.close()
		dbClient.close()

	## end baselinePackagesInDatabase
	return
Beispiel #5
0
    def setupLogging(self, logPath, fileContainingLogSettings):
        """Setup requested log handlers.

		Arguments:
		  logPath (str)                   : string containing path to the log directory
		  fileContainingLogSettings (str) : config file path for log settings
		"""
        ## Open defined configurations
        logSettings = utils.loadSettings(fileContainingLogSettings)
        ## Create if they don't exist
        thisPath = os.path.join(logPath, 'client')
        if not os.path.exists(thisPath):
            os.makedirs(thisPath)
        for entityName in ['ClientStartup', 'ClientStatus']:
            ## Set each log handler as defined; no reason to wrap with exception
            ## handling yet because we haven't established the logs to report it
            logEntry = logSettings.get(entityName)
            logFile = os.path.join(logPath, logEntry.get('fileName'))
            logger = logging.getLogger(entityName)
            logger.setLevel(logEntry.get('logLevel'))
            thisHandler = logging.handlers.RotatingFileHandler(
                logFile,
                maxBytes=int(logEntry.get('maxSizeInBytes')),
                backupCount=int(logEntry.get('maxRollovers')))
            fmt = logging.Formatter(logEntry.get('lineFormat'),
                                    datefmt=logEntry.get('dateFormat'))
            thisHandler.setFormatter(fmt)
            logger.addHandler(thisHandler)

        ## end setupLogging
        return
Beispiel #6
0
    def getSharedLogger(self):
        """Create an asynchronous shared logger to be used by the WSGI threads."""
        ## Open defined configurations
        logSettingsFileName = self.globalSettings[
            'fileContainingServiceLogSettings']
        ## Create requested shared log handler for the threads
        logSettings = loadSettings(
            os.path.join(env.configPath, logSettingsFileName))
        logSettings = logSettings.get(self.multiProcessingLogContext)
        logFile = os.path.join(env.logPath, 'service',
                               logSettings.get('fileName'))
        sharedLogger = logging.getLogger(self.multiProcessingLogContext)
        sharedLogger.setLevel(logSettings.get('logLevel'))
        mainHandler = RFHandler(
            logFile,
            maxBytes=int(logSettings.get('maxSizeInBytes')),
            backupCount=int(logSettings.get('maxRollovers')))
        fmt = logging.Formatter(logSettings.get('lineFormat'),
                                datefmt=logSettings.get('dateFormat'))
        mainHandler.setFormatter(fmt)
        sharedLogger.addHandler(mainHandler)

        ## Setup a queue for all the threads/processes to send messages through,
        ## so we are only writing to the log from the main thread
        multiprocessing_logging.install_mp_handler()

        ## Initialize the log
        sharedLogger.info('Initializing log from apiService')
        self.logger.info('Initialized shared log')

        ## end getSharedLogger
        return
Beispiel #7
0
    def serviceClient(self):
        """Main entry point for the service platform.

		This function loads global settings, sets up logging, calls the service
		flows, waits for completion, and then cleans up.
		"""
        try:
            print(
                'Starting Open Content Platform.\nTo stop, you can use the Services console or invoke the command directly: python {} stop'
                .format(__file__))
            if not os.path.exists(env.logPath):
                os.makedirs(env.logPath)
            utils.masterLog(env, 'openContentClientMasterLog')
            ## Parse global settings
            settings = utils.loadSettings(
                os.path.join(env.configPath, 'globalSettings.json'))
            ## Setup requested log handlers
            startTime = time.time()
            self.setupLogging(
                env.logPath,
                os.path.join(env.configPath,
                             settings['fileContainingClientLogSettings']))
            logger = logging.getLogger('ClientStartup')
            logger.info('Starting {}'.format(clientLabel))
            logger.info(' Main-process identifier (PID): {}'.format(
                os.getpid()))
            logger.info(
                ' Started from a service; use the Services console to stop, or invoke the command directly:  python {} stop'
                .format(__file__))
            ## Create and monitor the service processes
            self.clientLoop(settings)
            ## Finish up
            logger = logging.getLogger('ClientStartup')
            endTime = time.time()
            runTime = utils.prettyRunTime(startTime, endTime)
            logger.warning('Stopped {}. Total runtime was {}'.format(
                clientLabel, runTime))
            print('Stopped {}. Total runtime was {}'.format(
                clientLabel, runTime))

        except:
            stacktrace = traceback.format_exception(sys.exc_info()[0],
                                                    sys.exc_info()[1],
                                                    sys.exc_info()[2])
            ## The basic print is here for a console message in case we weren't able
            ## to connect the logging mechanism before encountering a failure.
            print('Exception in serviceClient: {}'.format(stacktrace))
            with suppress(Exception):
                logger.debug(stacktrace)

        ## end serviceClient
        return
Beispiel #8
0
 def __init__(self, parent=None):
     super().__init__(parent)
     self.setWindowModality(Qt.ApplicationModal)
     self.progressWin = installWindow(self)
     self.getModpackRepo.result.connect(self.updateRepo)
     optionWindow.launcherConfig = utils.loadSettings(self)
     screen_resolution = app.desktop().screenGeometry()
     self.title = config.NAME + " " + config.VER + " Instance Manager"
     self.setWindowIcon(QIcon(config.ICON))
     self.left = screen_resolution.width() / 2 - 290
     self.top = screen_resolution.height() / 2 - 170
     self.initUI()
     self.getModpackRepo.start()
    def servicePlatform(self):
        """Main entry point for the service platform.

		This function loads global settings, sets up logging, calls the service
		flows, waits for completion, and then cleans up.
		"""
        try:
            print(
                'Starting Open Content Platform.\nTo stop, you can use the Services console or invoke the command directly: python {}\windowsPlatformService.py stop'
                .format(path))
            if not os.path.exists(env.logPath):
                os.makedirs(env.logPath)
            startTime = time.time()

            ## Parse global settings
            globalSettings = utils.loadSettings(
                os.path.join(env.configPath, 'globalSettings.json'))

            ## Setup logging
            logger = self.setupLogging(globalSettings)
            logger.info('Starting Open Content Platform.')
            logger.info(' Main-process identifier (PID): {}'.format(
                os.getpid()))
            logger.info(
                ' Started from a service; use the Services console to stop, or invoke the command directly:  python {}\windowsPlatformService.py stop'
                .format(path))

            ## Create and monitor the service processes
            self.serviceLoop(globalSettings, logger)

            ## Finish up
            endTime = time.time()
            runTime = utils.prettyRunTime(startTime, endTime)
            logger.warning(
                'Open Content Platform stopped. Total runtime was {}'.format(
                    runTime))
            print('Open Content Platform stopped. Total runtime was {}'.format(
                runTime))

        except:
            stacktrace = traceback.format_exception(sys.exc_info()[0],
                                                    sys.exc_info()[1],
                                                    sys.exc_info()[2])
            ## The basic print is here for a console message in case we weren't able
            ## to connect the logging mechanism before encountering a failure.
            print('Exception in servicePlatform: {}'.format(stacktrace))
            with suppress(Exception):
                logger.debug(stacktrace)

        ## end servicePlatform
        return
Beispiel #10
0
def main():
    parser = argparse.ArgumentParser(description='Reasonable link helper')
    parser.add_argument('recipe',
                        metavar='recipe',
                        type=str,
                        nargs=1,
                        help='Recipe generated by CMaker')
    parser.add_argument('--clean-linking',
                        action="store_true",
                        help='clean old linking targets before linking')
    parser.add_argument('--clean',
                        '-c',
                        action="store_true",
                        help='clean old results before starting')

    args = parser.parse_args()

    if len(sys.argv) >= 2 and os.access(sys.argv[1], os.R_OK):
        console.log("Cmaker result found:", sys.argv[1])
    else:
        console.error("Cmaker result not found or unreadable.")
        sys.exit(1)

    try:
        json_data = json.load(open(sys.argv[1]))
        assert json_data["scripts"]
        assert json_data["compile"]
    except:
        console.error("Failed to parse Cmaker file")
        sys.exit(1)
    try:
        utils.loadSettings(open("settings.json"))
        console.success("Settings loaded.")
    except Exception as e:
        console.warn("Settings not loaded, because of:", e)
    console.debug("Settings", utils.settings)
    works.do_process(json_data, args)
Beispiel #11
0
def updatePackage(pkgName, pkgSystem='contentGathering', pkgPath=None, forceUpdate=True):
	globalSettings = utils.loadSettings(os.path.join(env.configPath, 'globalSettings.json'))
	logger = utils.setupLogger('Packages', env, globalSettings['fileContainingCoreLogSettings'])
	dbClient = getDbConnection(logger)
	try:
		## Initialize directories for this work
		(packageBasePath, newPackagePath, oldPackagePath) = initializePaths(logger, pkgName)

		## Check the target system for this package
		if pkgSystem.lower() not in validPackageSystems:
			raise EnvironmentError('Content management expecting package for a valid system {}, but received unknown type: {}.'.format(validPackageSystems, pkgType))
		packageSystemName = validPackageSystems[pkgSystem.lower()]['name']
		packageSystemPath = validPackageSystems[pkgSystem.lower()]['path']
		## Default the new package location to the exploded dir on the server
		newPackagePath = os.path.join(packageSystemPath, pkgName)
		if pkgPath is not None:
			## And reset that location if one was designated in the call
			newPackagePath = pkgPath

		## If package is in the database already, extract into side-by-side path
		pkgExists = getContentPackage(logger, dbClient, oldPackagePath, pkgName, stripString='content,{},{}'.format(packageSystemName, pkgName))
		if pkgExists:
			## Compare the files with filecmp/difflib and present differences...
			changes = []
			comparePackageVersions(logger, pkgName, oldPackagePath, newPackagePath, changes)
			if len(changes) <= 0:
				logger.info('No changes found; package {} remains unchanged.'.format(pkgName))
			else:
				logger.info('Changes found in package {}, with the following files: {}'.format(pkgName, str(changes)))
				if not forceUpdate:
					logger.info('Leaving package unchanged because the forceUpdate flag was not set.')
				else:
					logger.info('Overwriting previous version...')
					loadPackageIntoDB(logger, pkgName, packageSystemName, ['content', packageSystemName], dbClient, newPackagePath)

		else:
			## First time load of the package into the database
			logger.error('Attempting to update a package that did not previously exist in the database. Please compress and then add the package first.')
			print('Attempting to update a package that did not previously exist in the database. Please compress and then add the package first.')

		## Cleanup
		logger.info('Finished content management work on package {}; cleaning up.'.format(pkgName))

	except:
		stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
		logger.error('Exception in updatePackage:  {}'.format(str(stacktrace)))

	## end updatePackage
	return
Beispiel #12
0
    def __init__(self, serviceName, globalSettings, canceledEvent,
                 shutdownEvent):
        """Constructor for the Query service."""
        self.canceledEvent = canceledEvent
        self.shutdownEvent = shutdownEvent
        self.logFiles = utils.setupLogFile(
            serviceName,
            env,
            globalSettings['fileContainingServiceLogSettings'],
            directoryName='service')
        self.logObserver = utils.setupObservers(
            self.logFiles, serviceName, env,
            globalSettings['fileContainingServiceLogSettings'])
        self.logger = twisted.logger.Logger(observer=self.logObserver,
                                            namespace=serviceName)
        self.logger.info('Started logger for {serviceName!r}',
                         serviceName=serviceName)

        ## Allow the dbClient to get created in the main thread, to reuse pool
        self.dbClient = None
        super().__init__(serviceName, globalSettings, getDbClient=True)
        self.dbClient.session.close()

        self.localSettings = utils.loadSettings(
            os.path.join(env.configPath,
                         globalSettings['fileContainingQuerySettings']))
        self.logger.info(
            'waitSecondsBetweenCacheCleanupJobs: {secs!r}',
            secs=self.localSettings['waitSecondsBetweenCacheCleanupJobs'])

        ## Twisted import here to avoid issues with epoll on Linux
        from twisted.internet import task, threads

        ## TODO: modify looping calls to use threads.deferToThread(); avoid
        ## time delays/waits from being blocking to the main reactor thread
        self.loopingCleanUpCache = task.LoopingCall(self.cleanUpCache)
        self.loopingCleanUpCache.start(
            self.localSettings['waitSecondsBetweenCacheCleanupJobs'])

        ## Make checking kafka and processing results a looping call, to give a
        ## break to the main reactor thread; otherwise it blocks other looping
        ## calls, like those in coreService for health and environment details:
        self.kafkaConsumer = self.createKafkaConsumer(
            globalSettings['kafkaQueryTopic'])
        self.loopingGetKafkaResults = task.LoopingCall(self.getKafkaResults,
                                                       self.kafkaConsumer)
        ## Give a second break before starting the main LoopingCall
        self.loopingGetKafkaResults.start(1, now=False)
        self.logger.debug('Leaving Query constructor')
    def __init__(self, serviceName, globalSettings, canceledEvent,
                 shutdownEvent):
        """Constructor for the ResultProcessingFactory."""
        self.canceledEvent = canceledEvent
        self.shutdownEvent = shutdownEvent
        self.logFiles = utils.setupLogFile(
            serviceName,
            env,
            globalSettings['fileContainingServiceLogSettings'],
            directoryName='service')
        self.logObserver = utils.setupObservers(
            self.logFiles, serviceName, env,
            globalSettings['fileContainingServiceLogSettings'])
        self.logger = twisted.logger.Logger(observer=self.logObserver,
                                            namespace=serviceName)
        self.logger.info('Started logger for {serviceName!r}',
                         serviceName=serviceName)
        self.localSettings = utils.loadSettings(
            os.path.join(
                env.configPath,
                globalSettings['fileContainingResultProcessingSettings']))
        self.globalSettings = globalSettings
        self.clientEndpointTable = platformSchema.ServiceResultProcessingEndpoint
        self.serviceJobTable = None
        self.serviceHealthTable = platformSchema.ServiceResultProcessingHealth
        self.validActions = [
            'connectionRequest', 'healthResponse', 'cacheResponse',
            'getKafkaPartitionCount', 'kafkaHealth'
        ]
        self.actionMethods = [
            'doConnectionRequest', 'doHealthResponse', 'doCacheResponse',
            'doGetKafkaPartitionCount', 'doKafkaHealth'
        ]
        super().__init__(serviceName, globalSettings)
        if self.canceledEvent.is_set() or self.shutdownEvent.is_set():
            self.logger.error('Cancelling startup of {serviceName!r}',
                              serviceName=serviceName)
            return

        ## Twisted import here to avoid issues with epoll on Linux
        from twisted.internet import task

        self.loopingGetKafkaPartitionCount = task.LoopingCall(
            self.doGetKafkaPartitionCount)
        self.loopingGetKafkaPartitionCount.start(
            self.
            localSettings['waitSecondsBetweenRequestingKafkaPartitionCount'])
def startJob(runtime):
    """Standard job entry point.

	Arguments:
	  runtime (dict)   : object used for providing I/O for jobs and tracking
	                     the job thread through its runtime
	"""
    try:
        ## Setup requested log handlers
        jobRuntimePath = utilities.verifyJobRuntimePath(__file__)
        runtime.logger.report('path jobRuntimePath: {jobRuntimePath!r}',
                              jobRuntimePath=jobRuntimePath)
        logPath = os.path.join(runtime.env.contentGatheringPkgPath,
                               'checkUrlResponse', 'runtime')
        logDefinitions = os.path.join(runtime.env.contentGatheringPkgPath,
                                      'checkUrlResponse', 'conf',
                                      'logSettings.json')
        ## Create log handlers
        logHandlers = {}
        logSettings = utils.loadSettings(logDefinitions)
        configureLogging(logPath, logSettings, logHandlers)

        urlList = []
        ## Get URLs from the API
        getCurrentURLs(runtime, urlList)
        ## Read user-defined job parameters
        waitSecondsBeforeWarn = runtime.parameters.get('waitSecondsBeforeWarn',
                                                       3)
        waitSecondsBeforeError = runtime.parameters.get(
            'waitSecondsBeforeError', 10)
        waitSecondsBeforeTimeOut = runtime.parameters.get(
            'waitSecondsBeforeTimeOut', 62)
        ## Start the work
        getUrlReponse(runtime, urlList, waitSecondsBeforeWarn,
                      waitSecondsBeforeError, waitSecondsBeforeTimeOut)
        ## Destroy log handlers
        destroyLogging(logHandlers)
        ## Update the runtime status to success
        if runtime.getStatus() == 'UNKNOWN':
            runtime.status(1)

    except:
        runtime.setError(__name__)

    ## end startJob
    return
def servicePlatform():
    """Entry point for the Open Content Platform.

	This function loads global settings, sets up logging, calls the service
	flows, waits for completion, and then cleans up.

	Usage::
	  $ python openContentPlatform.py

	"""
    try:
        print('Starting Open Content Platform.')
        print(' Main-process identifier (PID): {}.'.format(os.getpid()))
        print(' Press Ctrl+C to exit.\n'.format(os.getpid()))
        ## Parse global settings
        globalSettings = loadSettings(
            os.path.join(env.configPath, 'globalSettings.json'))
        startTime = time.time()
        ## Setup logging
        logger = setupLogging(globalSettings)
        ## Register signals
        registerSignals()
        ## Create and monitor the service processes
        serviceLoop(logger, globalSettings)
        ## Finish up
        endTime = time.time()
        runTime = prettyRunTime(startTime, endTime)
        logger.debug(
            'Open Content Platform stopped. Total runtime was {}'.format(
                runTime))
        print('Open Content Platform stopped. Total runtime was {}'.format(
            runTime))

    except:
        stacktrace = traceback.format_exception(sys.exc_info()[0],
                                                sys.exc_info()[1],
                                                sys.exc_info()[2])
        ## The print is here for a console message in case we weren't able
        ## to connect the logging mechanism before encountering a failure.
        print('Exception in servicePlatform: {}'.format(stacktrace))
        with suppress(Exception):
            logger.debug(str(stacktrace))

    ## end servicePlatform
    return
    def __init__(self, serviceName, globalSettings, canceledEvent,
                 shutdownEvent):
        """Constructor for the LogCollectionForJobs service."""
        self.canceledEvent = canceledEvent
        self.shutdownEvent = shutdownEvent
        self.logFiles = setupLogFile(
            serviceName,
            env,
            globalSettings['fileContainingServiceLogSettings'],
            directoryName='service')
        self.logObserver = setupObservers(
            self.logFiles, serviceName, env,
            globalSettings['fileContainingServiceLogSettings'])
        self.logger = twisted.logger.Logger(observer=self.logObserver,
                                            namespace=serviceName)
        self.logger.info('Started logger for {serviceName!r}',
                         serviceName=serviceName)
        super().__init__(serviceName, globalSettings)
        self.localSettings = loadSettings(
            os.path.join(
                env.configPath,
                globalSettings['fileContainingLogCollectionForJobsSettings']))
        self.secondsBetweenLogCleanup = int(
            self.localSettings['waitHoursBetweenLogCleanupChecks']) * 60 * 60
        self.secondsToRetainLogFiles = int(
            self.localSettings['numberOfHoursToRetainLogFiles']) * 60 * 60

        ## Twisted import here to avoid issues with epoll on Linux
        from twisted.internet import task

        ## Make checking kafka and processing results a looping call, to give a
        ## break to the main reactor thread; otherwise it blocks other looping
        ## calls, like those in coreService for health and environment details:
        self.kafkaConsumer = self.createKafkaConsumer(
            globalSettings['kafkaLogForJobsTopic'])
        self.loopingGetKafkaResults = task.LoopingCall(self.getKafkaResults,
                                                       self.kafkaConsumer)
        ## Give a second break before starting the main LoopingCall
        self.loopingGetKafkaResults.start(1, now=False)
        ## Regularly check logs for cleanup; avoid filling disk with old logs
        self.loopingCleanupLogs = task.LoopingCall(self.deferCleanupLogs)
        self.loopingCleanupLogs.start(self.secondsBetweenLogCleanup)
        self.logger.debug('Leaving LogCollectionForJobs constructor')
	def __init__(self, serviceName, globalSettings, canceledEvent, shutdownEvent):
		"""Constructor for the ResultProcessingClientFactory.

		Arguments:
		  serviceName (str)     : class name of the client ('ResultProcessingClient')
		  globalSettings (dict) : global globalSettings
		"""
		try:
			self.canceledEvent = canceledEvent
			self.shutdownEvent = shutdownEvent
			self.logFiles = utils.setupLogFile(serviceName, env, globalSettings['fileContainingClientLogSettings'], directoryName='client')
			self.logObserver = utils.setupObservers(self.logFiles, serviceName, env, globalSettings['fileContainingClientLogSettings'])
			self.logger = twisted.logger.Logger(observer=self.logObserver, namespace=serviceName)
			self.globalSettings = globalSettings
			self.localSettings = utils.loadSettings(os.path.join(env.configPath, globalSettings['fileContainingResultProcessingClientSettings']))
			self.dbClient = None
			self.validActions = ['connectionResponse', 'healthRequest', 'tokenExpired', 'unauthorized', 'partitionCountResponse']
			self.actionMethods = ['doConnectionResponse', 'doHealthRequest', 'doTokenExpired', 'doUnauthorized', 'doPartitionCountResponse']
			self.kafkaErrorCount = 0
			self.kafkaErrorLimit = 5
			self.kafkaConsumer = None
			self.partitionCount = 0
			self.connectedToKafkaConsumer = False
			self.resultProcessingUtility = None
			self.maintenanceMode = True
			self.pauseKafkaProcessing = True
			super().__init__(serviceName, globalSettings)
			self.initialize(True)
			## Looping call to build objectCache and start kafka processing
			self.loopingStartProcessing = task.LoopingCall(self.startProcessing)
			self.loopingStartProcessing.start(int(self.localSettings['waitSecondsBetweenRequestingFullSyncCacheUpdates'])).addErrback(self.logger.error)
			## Looping call to delta update (in-place) the objectCache
			self.loopingDeltaSync = task.LoopingCall(self.updateObjectCache)
			self.loopingDeltaSync.start(int(self.localSettings['waitSecondsBetweenRequestingDeltaSyncCacheUpdates'])).addErrback(self.logger.error)

		except:
			exception = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
			print('Exception in ResultProcessingClientFactory constructor: {}'.format(str(exception)))
			with suppress(Exception):
				self.logger.error('Exception in ResultProcessingClientFactory: {exception!r}', exception=exception)
			self.logToKafka(sys.exc_info()[1])
			self.shutdownEvent.set()
			reactor.stop()
Beispiel #18
0
def Options(screen):
    settings = utils.loadSettings()
    continuer = True
    font = pygame.font.SysFont("Arial", 21)

    drawSettings(screen, settings, font)
    pygame.display.update()

    while continuer:
        for event in pygame.event.get():
            if event.type == QUIT:
                continuer = False
                return ["Quit", "Scene1", 1]
            elif event.type == MOUSEMOTION and event.buttons[0] == 1:
                if event.pos[0] < 570 and event.pos[0] > 14 and event.pos[1] < 201 and event.pos[1] > 179:
                    settings["Volume"] = round(event.pos[0] * 100 / 570, 1)
                    drawSettings(screen, settings, font)
                    pygame.display.update()
            elif event.type == MOUSEBUTTONDOWN:
                if event.pos[0] <= 55 and event.pos[0] >= 5 and event.pos[1] <= 55 and event.pos[1] >= 5:
                    utils.saveSettings(settings)
                    return ["Title", "Scene1", 1]
Beispiel #19
0
def basicSetup(argv):
	"""Resolve basic dependencies and pre-reqs for client startup."""
	if not os.path.exists(env.logPath):
		os.makedirs(env.logPath)
	## Setup master Twisted catch-all log
	utils.masterLog(env, 'openContentClientMasterLog')
	## Parse global settings
	globalSettings = utils.loadSettings(os.path.join(env.configPath, 'globalSettings.json'))
	## Setup requested log handlers
	setupLogging(env.logPath, os.path.join(env.configPath, globalSettings['fileContainingClientLogSettings']))
	logger = logging.getLogger('ClientStartup')
	## Ensure the command specified the desired client type
	if len(argv) != 2 or argv[1].lower() not in ['contentgathering', 'resultprocessing', 'universaljob']:
		print('\nPlease pass the client type as an input parameter. Usage:\n  $ python {} [contentGathering|universalJob|resultProcessing]\n'.format(__file__))
		logger.error('Please pass the client type as an input parameter.')
		raise EnvironmentError('Please pass the client type as an input parameter.')

	logger.info('Starting client {}'.format(argv[1]))
	logger.info(' Main-process identifier (PID): {}'.format(os.getpid()))
	logger.info(' Started on the command line; press Ctrl+C to exit.')

	## end basicSetup
	return globalSettings
Beispiel #20
0
def listPackages():
	globalSettings = utils.loadSettings(os.path.join(env.configPath, 'globalSettings.json'))
	logger = utils.setupLogger('Packages', env, globalSettings['fileContainingCoreLogSettings'])
	dbClient = getDbConnection(logger)
	## Cleanup this package result; prep work for comparison operation
	dbClass = platformSchema.ContentPackage
	packages = dbClient.session.query(dbClass).order_by(dbClass.system.desc(),dbClass.name.desc()).all()
	jsonReport = {}
	for package in packages:
		packageSystem = package.system
		packageName = package.name
		packageDate = package.time_created
		if packageSystem not in jsonReport:
			jsonReport[packageSystem] = []
		jsonReport[packageSystem].append({ 'name' : packageName, 'created' : packageDate })
	dbClient.session.commit()
	## And finally report
	strReport = json.dumps(jsonReport, default=utils.customJsonDumpsConverter, indent=4)
	logger.debug('Valid packages: \n{}'.format(str(strReport)))
	print('Valid packages: \n{}'.format(str(strReport)))

	## end listPackages
	return
Beispiel #21
0
def removePackage(packageName):
	globalSettings = utils.loadSettings(os.path.join(env.configPath, 'globalSettings.json'))
	logger = utils.setupLogger('Packages', env, globalSettings['fileContainingCoreLogSettings'])
	dbClient = getDbConnection(logger)
	try:
		logger.info('Searching for package in database: {}'.format(packageName))
		dbClass = platformSchema.ContentPackage
		package = dbClient.session.query(dbClass).filter(dbClass.name == packageName).first()
		if package is None:
			logger.info('Package {} not found in database... nothing to do.'.format(packageName))
		else:
			logger.info('Removing package files...'.format(packageName))
			dbClient.session.commit()
			## Remove all files first
			removeFiles(logger, dbClient, packageName)
			## And now remove the package
			package = dbClient.session.query(dbClass).filter(dbClass.name == packageName).first()
			packageType = package.system
			dbClient.session.delete(package)
			dbClient.session.commit()
			## Remove from filesystem
			packageSystemPath = validPackageSystems[pkgSystem.lower()]['path']
			packagePath = os.path.join(packageSystemPath, packageName)
			logger.info('Removing files from server directory: {}...'.format(packagePath))
			utils.cleanDirectory(logger, packagePath)
			logger.info('Package {} removed.'.format(packageName))

	except:
		stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
		logger.error('Exception in removePackage:  {}'.format(stacktrace))

	with suppress(Exception):
		dbClient.session.close()
		dbClient.close()

	## end removePackage
	return
Beispiel #22
0
def powerShell():
    import sys
    import traceback
    import os
    import re
    try:
        ## Add openContentPlatform directories onto the sys path
        thisPath = os.path.dirname(os.path.abspath(__file__))
        basePath = os.path.abspath(os.path.join(thisPath, '..'))
        if basePath not in sys.path:
            sys.path.append(basePath)
        import env
        env.addLibPath()
        env.addDatabasePath()
        env.addExternalPath()

        ## Setup requested log handlers
        globalSettings = utils.loadSettings(
            os.path.join(env.configPath, 'globalSettings.json'))
        logEntity = 'Protocols'
        logger = utils.setupLogger(logEntity, env, 'logSettingsCore.json')
        logger.info('Starting protocolWrapperPowershell...')

        import twisted.logger
        logFiles = utils.setupLogFile(
            'JobDetail',
            env,
            globalSettings['fileContainingContentGatheringLogSettings'],
            directoryName='client')
        logObserver = utils.setupObservers(
            logFiles, 'JobDetail', env,
            globalSettings['fileContainingContentGatheringLogSettings'])
        logger = twisted.logger.Logger(observer=logObserver,
                                       namespace='JobDetail')

        from remoteRuntime import Runtime
        runtime = Runtime(logger, env, 'TestPkg', 'TestJob', 'endpoint', {},
                          None, {}, None, {}, None)

        ## Manual creation of a protocol via protocolHandler
        externalProtocolHandler = utils.loadExternalLibrary(
            'externalProtocolHandler', env, globalSettings)
        protocolHandler = externalProtocolHandler.ProtocolHandler(
            None, globalSettings, env, logger)
        protocolType = 'ProtocolPowerShell'
        protocolData = {'user': '******', 'password': '******'}
        protocolHandler.createManual(runtime, protocolType, protocolData)
        protocol = externalProtocolHandler.getProtocolObject(runtime, 1)
        print('protocol to use: {}'.format(protocol))
        print('protocols: {}'.format(
            externalProtocolHandler.getProtocolObjects(runtime)))

        endpoint = '192.168.1.100'
        client = PowerShell(runtime, logger, endpoint, 1, protocol)
        client.open()

        osAttrDict = {}
        queryOperatingSystem(client, logger, osAttrDict)
        logger.debug('osAttrDict: {osAttrDict!r}', osAttrDict=osAttrDict)
        client.close()

    except:
        stacktrace = traceback.format_exception(sys.exc_info()[0],
                                                sys.exc_info()[1],
                                                sys.exc_info()[2])
        msg = str(sys.exc_info()[1])
        ## Cleanup message when we know what it is:
        ## "<x_wmi: The RPC server is unavailable.  (-2147023174, 'The RPC server is unavailable. ', (0, None, 'The RPC server is unavailable. ', None, None, -2147023174), None)>"
        if re.search(
                'The client cannot connect to the destination specified in the request',
                msg, re.I):
            ## Remove the rest of the fluff
            msg = 'The client cannot connect to the destination specified in the request. Verify that the service on the destination is running and is accepting requests.'
            logger.debug('Main Exception: {exception!r}', exception=msg)
        else:
            logger.debug('Main Exception: {exception!r}', exception=stacktrace)
def getAutoBuildPath():
    settings = utils.loadSettings("LuaCWC")
    auto_build_path = settings.get("auto_build_path", "")
    return auto_build_path
def main():
    """Entry point for the database configuration utility.

	Usage::

	  $ python configureDatabase.py

	"""
    try:
        ## Setup requested log handlers
        logEntity = 'Database'
        logger = utils.setupLogger(logEntity, env, 'logSettingsCore.json')
        logger.info('Starting configureDatabase utility.')

        ## Using an externally provided library to create and/or update a local
        ## config file for database parameters; defined in globalSettings and
        ## located in '<install_path>/external/'.
        globalSettings = utils.loadSettings(
            os.path.join(env.configPath, 'globalSettings.json'))
        externalLibrary = utils.loadExternalLibrary('externalDatabaseLibrary',
                                                    env)

        dbClient = None
        while dbClient is None:
            try:
                ## Get settings from user (new or updated) and save to file
                externalLibrary.updateSettingsFile(globalSettings, logger)
                ## Attempt connection with the new or updated settings
                dbClient = DatabaseClient(logger)
                ## In case we fall through the cracks without an exception...
                if dbClient is None:
                    ## The settings didn't work; request a new set
                    print(
                        'Failed to connect to database with provided settings; try again...'
                    )
                    logger.debug(
                        'Failed to connect to database with provided settings; try again...'
                    )

            except exc.OperationalError:
                ## Intentionally catch database connection errors
                print('\nException in configureDatabase: {}'.format(
                    str(sys.exc_info()[1])))
                logger.error('Exception in configureDatabase: {}'.format(
                    str(sys.exc_info()[1])))
                ## The settings didn't work; request a new set
                print(
                    '\nFailed to connect to database with provided settings; try again...'
                )
                logger.debug(
                    'Failed to connect to database with provided settings; try again...'
                )

        print('\nDatabase connection successful\n')
        logger.debug('Database connection successful')

        ## Close the connection
        dbClient.close()
        logger.info('Exiting configureDatabase utility.')

    except:
        stacktrace = traceback.format_exception(sys.exc_info()[0],
                                                sys.exc_info()[1],
                                                sys.exc_info()[2])
        print('Failure in configureDatabase.main: {}'.format(stacktrace))
        with suppress(Exception):
            logger.debug('Failure in configureDatabase: {}'.format(stacktrace))

    ## end main
    return
Beispiel #25
0
def validatePackage(packageName, compressedFile, pkgSystem='contentGathering', forceUpdate=False):
	"""Entry function.

	Arguments:
	  compressedFile (str) : fully qualified package to be deployed
	  forceUpdate (bool)   : whether to force update if package already exists
	  pkgSystem (str)      : package type (target system/service to deploy into)

	"""
	globalSettings = utils.loadSettings(os.path.join(env.configPath, 'globalSettings.json'))
	logger = utils.setupLogger('Packages', env, globalSettings['fileContainingCoreLogSettings'])
	dbClient = getDbConnection(logger)
	try:
		## Check the extension
		(pkgName, pkgExtension) = compressedFile.split('.')
		if pkgExtension.lower() != 'zip' and pkgExtension.lower() != 'tar':
			raise EnvironmentError('Content management expecting package in either ZIP or TAR format; unable to work with this format: {}.'.format(pkgExtension))

		## Initialize directories for this work
		(packageBasePath, newPackagePath, oldPackagePath) = initializePaths(logger, packageName)

		## Check the target system for this package
		if pkgSystem.lower() not in validPackageSystems:
			raise EnvironmentError('Content management expecting package for a valid system {}, but received unknown type: {}.'.format(validPackageSystems, pkgType))
		packageSystemName = validPackageSystems[pkgSystem.lower()]['name']
		packageSystemPath = validPackageSystems[pkgSystem.lower()]['path']

		## Extract contents into a temp runtime directory
		extractContents(logger, compressedFile, packageName, pkgExtension, packageBasePath, newPackagePath)

		## If package is in the database already, extract into side-by-side path
		pkgExists = getContentPackage(logger, dbClient, oldPackagePath, packageName, stripString='content,{},{}'.format(packageSystemName, packageName))
		if pkgExists:
			## Compare the files with filecmp/difflib and present differences...
			changes = []
			comparePackageVersions(logger, packageName, oldPackagePath, newPackagePath, changes)
			if len(changes) <= 0:
				logger.info('No changes found; package {} remains unchanged.'.format(packageName))
			else:
				logger.info('Changes found in package {}, with the following files: {}'.format(packageName, str(changes)))
				if not forceUpdate:
					logger.info('Leaving package unchanged because the forceUpdate flag was not set.')
					print('Leaving package unchanged because the forceUpdate flag was not set.')
				else:
					logger.info('Overwriting previous version...')
					print('Overwriting previous version...')
					loadPackageIntoDB(logger, packageName, packageSystemName, ['content', packageSystemName], dbClient, newPackagePath)

		else:
			## First time load of the package into the database
			logger.info('Attempting to load new package into database...')
			loadPackageIntoDB(logger, packageName, packageSystemName, ['content', packageSystemName], dbClient, newPackagePath)

		## Cleanup
		logger.info('Finished content management work on package {}; cleaning up.'.format(packageName))
		utils.cleanDirectory(logger, packageBasePath)

	except:
		stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
		logger.error('Exception in validatePackage:  {}'.format(str(stacktrace)))

	with suppress(Exception):
		dbClient.session.close()
		dbClient.close()
	## end validatePackage
	return
def getAutoBuildPath():
    settings = utils.loadSettings("LuaCWC")
    auto_build_path = settings.get("auto_build_path", "")
    return auto_build_path
Beispiel #27
0
### loading global settings
import os
from utils import ln, loadSettings
#print line with '-----------...'
ln()

#### init settings: look if passed as first arg on command line,
#### or from other script as gobal or local var, use default from SettingsDefault.py otherwise
import sys
if __name__ == "__main__" and len(sys.argv) > 1 and not sys.argv[0].endswith(".log"):
  SettingsImportName = sys.argv[1]
else:
  SettingsImportName = None

settings = loadSettings(SettingsImportName, SettingNames)

settings.TestcasesDir = os.path.abspath(settings.TestcasesDir)

ln()


####################

import re
import glob
from os.path import join
from subprocess import call
from shutil import copy2

import threading