def main(): """Entry point for this utility. Usage:: $ python createApiUser.py """ try: ## Setup requested log handlers globalSettings = utils.loadSettings( os.path.join(env.configPath, "globalSettings.json")) logFiles = utils.setupLogFile( "ApiApplication", env, globalSettings['fileContainingServiceLogSettings'], directoryName='service') logObserver = utils.setupObservers( logFiles, "ApiApplication", env, globalSettings['fileContainingServiceLogSettings']) logger = twisted.logger.Logger(observer=logObserver, namespace="ApiApplication") logger.info('Starting createApiUser') ## Connect to database dbClient = DatabaseClient(logger) if dbClient is None: raise SystemError( 'Failed to connect to database; unable to initialize tables.') ## Get list of valid users users = {} getUsers(dbClient, logger, users) ## Create and insert a new credential createUserEntry(dbClient, logger, users) ## Cleanup dbClient.session.remove() dbClient.close() logger.info('Exiting configureDatabase utility.') except: stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) ## The basic print is here for a console message in case we weren't ## able to use the logging mechanism before encountering the failure. print('Failure in createApiUser: {}'.format(stacktrace)) try: logger.debug('Failure in createApiUser: {}'.format(stacktrace)) except: pass ## end main return
def createUserEntry(dbClient, logger, users): """Create and insert a new entry.""" print('Generating a new API user...') ## Name while 1: name = input(' Enter Name: ') if len(name) <= 0: print(' ==> Name is required.') elif name in users: print( ' ==> user {} already exists, please choose a unique name.'. format(name)) else: break ## Owner owner = input(' Enter Owner: ') while len(owner) == 0: print(' ==> Owner is required.') owner = input(' Enter Owner: ') print( ' Key must be at least 32 chars; if left empty one will be generated for you.' ) ## Key key = input(' Enter Key: ') while len(key) > 0 and len(key) < 32: print( ' ==> Key must either be empty, or have at least 32 characters...' ) key = input(' Enter Key: ') if len(key) == 0: key = uuid.uuid4().hex if len(key) > 32: key = key[:32] ## Security access level print(' Saved User Key: {}'.format(key)) print( ' Access criteria (true|yes|1==True); defaults to false if left empty:' ) access_write = utils.valueToBoolean(input(' Write Access ? ')) access_delete = utils.valueToBoolean(input(' Delete Access ? ')) access_admin = utils.valueToBoolean(input(' Admin Access ? ')) ## Create the entry in the DB newApiUser = ApiConsumerAccess(name=name, key=key, owner=owner, access_write=access_write, access_delete=access_delete, access_admin=access_admin) dbClient.session.add(newApiUser) dbClient.session.commit() logger.debug('Created new API user: {}'.format(name)) ## end createUserEntry return
def psLocalTest(): try: thisPath = os.path.dirname(os.path.abspath(__file__)) basePath = os.path.abspath(os.path.join(thisPath, '..')) if basePath not in sys.path: sys.path.append(basePath) import env env.addLibPath() import utils import twisted.logger ## Setup requested log handlers globalSettings = utils.loadSettings(os.path.join(env.configPath, 'globalSettings.json')) logFiles = utils.setupLogFile('JobDetail', env, globalSettings['fileContainingContentGatheringLogSettings'], directoryName='client') logObserver = utils.setupObservers(logFiles, 'JobDetail', env, globalSettings['fileContainingContentGatheringLogSettings']) logger = twisted.logger.Logger(observer=logObserver, namespace='JobDetail') client = PwshLocal(logger) version = client.open() logger.debug('version: {version!r}', version=version) logger.debug('sleep should timeout and reinitialize shell...') results = client.run('sleep 5', timeout=2) logger.debug('sleep output: {results!r}', results=results) osAttrDict = {} queryOperatingSystem(client, logger, osAttrDict) logger.debug('osAttrDict: {osAttrDict!r}', osAttrDict=osAttrDict) client.close() except: stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) print('Main Exception: {}'.format(stacktrace)) client.close()
def servicePlatform(): """Entry point for the Open Content Platform. This function loads global settings, sets up logging, calls the service flows, waits for completion, and then cleans up. Usage:: $ python openContentPlatform.py """ try: print('Starting Open Content Platform.') print(' Main-process identifier (PID): {}.'.format(os.getpid())) print(' Press Ctrl+C to exit.\n'.format(os.getpid())) ## Parse global settings globalSettings = loadSettings( os.path.join(env.configPath, 'globalSettings.json')) startTime = time.time() ## Setup logging logger = setupLogging(globalSettings) ## Register signals registerSignals() ## Create and monitor the service processes serviceLoop(logger, globalSettings) ## Finish up endTime = time.time() runTime = prettyRunTime(startTime, endTime) logger.debug( 'Open Content Platform stopped. Total runtime was {}'.format( runTime)) print('Open Content Platform stopped. Total runtime was {}'.format( runTime)) except: stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) ## The print is here for a console message in case we weren't able ## to connect the logging mechanism before encountering a failure. print('Exception in servicePlatform: {}'.format(stacktrace)) with suppress(Exception): logger.debug(str(stacktrace)) ## end servicePlatform return
def getUsers(dbClient, logger, users): """Get all valid API users.""" try: results = dbClient.session.query(ApiConsumerAccess).all() logger.debug('Current users: ') for result in results: users[result.name] = result.key logger.debug(' {userName!r} : {userKey}', userName=result.name, userKey=result.key) except: stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) print('Failure in getUsers: {}'.format(stacktrace)) ## end getUsers return
def powerShell(): import sys import traceback import os import re try: ## Add openContentPlatform directories onto the sys path thisPath = os.path.dirname(os.path.abspath(__file__)) basePath = os.path.abspath(os.path.join(thisPath, '..')) if basePath not in sys.path: sys.path.append(basePath) import env env.addLibPath() env.addDatabasePath() env.addExternalPath() ## Setup requested log handlers globalSettings = utils.loadSettings( os.path.join(env.configPath, 'globalSettings.json')) logEntity = 'Protocols' logger = utils.setupLogger(logEntity, env, 'logSettingsCore.json') logger.info('Starting protocolWrapperPowershell...') import twisted.logger logFiles = utils.setupLogFile( 'JobDetail', env, globalSettings['fileContainingContentGatheringLogSettings'], directoryName='client') logObserver = utils.setupObservers( logFiles, 'JobDetail', env, globalSettings['fileContainingContentGatheringLogSettings']) logger = twisted.logger.Logger(observer=logObserver, namespace='JobDetail') from remoteRuntime import Runtime runtime = Runtime(logger, env, 'TestPkg', 'TestJob', 'endpoint', {}, None, {}, None, {}, None) ## Manual creation of a protocol via protocolHandler externalProtocolHandler = utils.loadExternalLibrary( 'externalProtocolHandler', env, globalSettings) protocolHandler = externalProtocolHandler.ProtocolHandler( None, globalSettings, env, logger) protocolType = 'ProtocolPowerShell' protocolData = {'user': '******', 'password': '******'} protocolHandler.createManual(runtime, protocolType, protocolData) protocol = externalProtocolHandler.getProtocolObject(runtime, 1) print('protocol to use: {}'.format(protocol)) print('protocols: {}'.format( externalProtocolHandler.getProtocolObjects(runtime))) endpoint = '192.168.1.100' client = PowerShell(runtime, logger, endpoint, 1, protocol) client.open() osAttrDict = {} queryOperatingSystem(client, logger, osAttrDict) logger.debug('osAttrDict: {osAttrDict!r}', osAttrDict=osAttrDict) client.close() except: stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) msg = str(sys.exc_info()[1]) ## Cleanup message when we know what it is: ## "<x_wmi: The RPC server is unavailable. (-2147023174, 'The RPC server is unavailable. ', (0, None, 'The RPC server is unavailable. ', None, None, -2147023174), None)>" if re.search( 'The client cannot connect to the destination specified in the request', msg, re.I): ## Remove the rest of the fluff msg = 'The client cannot connect to the destination specified in the request. Verify that the service on the destination is running and is accepting requests.' logger.debug('Main Exception: {exception!r}', exception=msg) else: logger.debug('Main Exception: {exception!r}', exception=stacktrace)
def serviceLoop(logger, settings): """Monitor service managers. Starts up the managers and then actively monitor their status. Arguments: settings (json) : global settings """ watcherWaitCycle = int(settings['statusReportingInterval']) serviceWaitCycle = int(settings['waitSecondsBeforeStartingNextService']) exitWaitCycle = int(settings['waitSecondsBeforeExiting']) shutdownEvent = multiprocessing.Event() ## As of Python 3.7, dict remembers insertion order; so, we no longer need a ## list or OrderedDict to ensure that transport and api services start first activeServices = {} activeServices['transport'] = { 'class': TransportService, 'canceledEvent': None, 'instance': None } activeServices['api'] = { 'class': ApiService, 'canceledEvent': None, 'instance': None } activeServices['contentGathering'] = { 'class': ContentGatheringService, 'canceledEvent': multiprocessing.Event(), 'instance': None } activeServices['resultProcessing'] = { 'class': ResultProcessingService, 'canceledEvent': multiprocessing.Event(), 'instance': None } activeServices['queryService'] = { 'class': QueryService, 'canceledEvent': multiprocessing.Event(), 'instance': None } activeServices['universalJob'] = { 'class': UniversalJobService, 'canceledEvent': multiprocessing.Event(), 'instance': None } activeServices['logCollection'] = { 'class': LogCollectionService, 'canceledEvent': multiprocessing.Event(), 'instance': None } activeServices['logCollectionForJobs'] = { 'class': LogCollectionForJobsService, 'canceledEvent': multiprocessing.Event(), 'instance': None } ## Considering deprecation of server side service: ## Conditionally start/add the local service (ServerSideService) # if settings['startServerSideService']: # activeServices['serverSide'] = { 'class': ServerSideService, 'canceledEvent': None, 'instance': None } ## Start the services as separate processes for alias, content in activeServices.items(): startService(content, shutdownEvent, settings) thisService = content['instance'] logger.info(' Started {} with PID {}'.format(thisService.name, thisService.pid)) ## Default 2 second sleep between service starts time.sleep(serviceWaitCycle) ## Wait loop logger.info('Starting main loop - {}'.format(time.strftime('%X %x'))) logger.info( 'Interval for checking services: {} seconds'.format(watcherWaitCycle)) while True: try: logger.debug('Checking services:') ## Evaluate the running services for alias, content in activeServices.items(): thisService = content['instance'] thisClass = content['class'] thisEvent = content['canceledEvent'] if thisService.is_alive( ) and thisEvent is not None and thisEvent.is_set(): ## The service is telling us it needs restarted logger.error( ' {}: still alive (PID: {}), but requested a restart'. format(thisService.name, thisService.pid)) thisService.terminate() thisEvent.clear() del thisService startService(content, shutdownEvent, settings) thisService = content['instance'] logger.info(' Started {} with PID {}'.format( thisService.name, thisService.pid)) elif not thisService.is_alive(): logger.error(' {}: stopped with exit code [{}]'.format( thisService.name, thisService.exitcode)) if thisEvent is not None and thisEvent.is_set(): ## The service is telling us it needs restarted logger.info( ' Service {} requested a restart'.format( thisService.name)) thisEvent.clear() del thisService startService(content, shutdownEvent, settings) thisService = content['instance'] logger.info(' Started {} with PID {}'.format( thisService.name, thisService.pid)) else: logger.debug(' {}: running'.format(thisService.name)) ## Avoiding join() with the processes (from the multiprocessing ## internals), since we're not waiting for them to finish. They ## will always be running, so this loop is just for monitoring ## and messaging. Any interrupt signals will be sent to the sub- ## processes, and intentional shutdown requests are handled here. time.sleep(watcherWaitCycle) except (KeyboardInterrupt, SystemExit): print('Interrupt received; notifying services to stop...') logger.debug('Interrupt received; notifying services to stop...') shutdownEvent.set() ## Wait for threads to finish graceful shutdown time.sleep(exitWaitCycle) print( 'Wait cycle complete for threads to finish; commencing cleanup.' ) ## Kill any process that is still running for alias, content in activeServices.items(): try: thisService = content['instance'] logger.debug('Evaluating {}'.format(thisService.name)) if thisService.is_alive(): logger.debug( ' process still running; stopping {} with PID {}'. format(thisService.name, thisService.pid)) thisService.terminate() except: exceptionOnly = traceback.format_exception_only( sys.exc_info()[0], sys.exc_info()[1]) print('Exception in killing process in serviceLoop: {}'. format(str(exceptionOnly))) with suppress(Exception): logger.debug( 'Exception in killing process in serviceLoop: {}'. format(str(exceptionOnly))) break except: stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) print('Exception in watcher loop: {}'.format(stacktrace)) logger.debug('Exception in watcher loop: {}'.format( str(stacktrace))) logger.debug('Notifying services to stop...') shutdownEvent.set() time.sleep(exitWaitCycle) break ## end serviceLoop return