예제 #1
0
 def __init__(self,log=None,verbose=False,plot=False):
     """This calculates the brightness temperature of the planets.
        It must be used with atmosphere and alpha"""
     self.verbose = verbose
     self.plot = plot
     self.log = utils.setupLogFile(log)
     print '\n---Brightness---\n'
예제 #2
0
    def __init__(self, mode='normal', config=None, log=None, **kwargs):
        """Reads in absorption formalisms
           Note that they are all in GHz"""

        kwargs = state_variables.init_state_variables(mode, **kwargs)
        self.state_vars = kwargs.keys()
        self.set_state(set_mode='init', **kwargs)
        self.log = utils.setupLogFile(log)
        self.freqs = None

        # get config
        if type(config) == str:
            config = pcfg.planetConfig(self.planet, configFile=config, log=log)
        self.config = config

        # copy config back into otherPar
        self.otherPar = {}
        self.otherPar['h2state'] = self.config.h2state
        self.otherPar['h2newset'] = self.config.h2newset
        self.otherPar['water'] = self.config.water_p
        self.otherPar['ice'] = self.config.ice_p
        self.otherPar['nh4sh'] = self.config.nh4sh_p
        self.otherPar['nh3ice'] = self.config.nh3ice_p
        self.otherPar['h2sice'] = self.config.h2sice_p
        self.otherPar['ch4'] = self.config.ch4_p
        self.alpha_data = None
        if self.use_existing_alpha or self.scale_existing_alpha:
            self.existing_alpha_setup()
        else:
            self.formalisms()
        if self.generate_alpha:
            self.start_generate_alpha()
예제 #3
0
    def __init__(self, planet, mode='normal', config='config.par', log=None, **kwargs):
        """reads/computes atmospheres.  This returns:
               self.gas
               self.cloud
               self.layerProperty
            on the appropriate grid."""

        self.planet = planet.capitalize()
        kwargs = state_variables.init_state_variables(mode, **kwargs)
        self.state_vars = kwargs.keys()
        self.set_state(set_mode='init', **kwargs)
        if self.verbose:
            print('\n---Atmosphere of {}---'.format(planet))
        self.logFile = utils.setupLogFile(log)

        if isinstance(config, six.string_types):
            config = os.path.join(self.planet, config)
            config = pcfg.planetConfig(self.planet, configFile=config, log=log)
        self.config = config

        # ##Create function dictionaries
        self.gasGen = {}
        self.gasGen['read'] = self.readGas
        self.cloudGen = {}
        self.cloudGen['read'] = self.readCloud
        self.propGen = {}
        self.propGen['compute'] = self.computeProp

        if self.verbose == 'loud':
            print('Planet ' + self.planet)
            self.config.display()
        if self.config.gasType == 'read':  # this assumes that cloudType is then also 'read'
            utils.log(self.logFile, '\tReading from: ' + self.config.filename, self.verbose)
            utils.log(self.logFile, '\tAtmosphere file:  ' + self.config.gasFile, self.verbose)
            utils.log(self.logFile, '\tCloud file:  ' + self.config.cloudFile, self.verbose)
def psLocalTest():
	try:
		thisPath = os.path.dirname(os.path.abspath(__file__))
		basePath = os.path.abspath(os.path.join(thisPath, '..'))
		if basePath not in sys.path:
			sys.path.append(basePath)
		import env
		env.addLibPath()
		import utils
		import twisted.logger

		## Setup requested log handlers
		globalSettings = utils.loadSettings(os.path.join(env.configPath, 'globalSettings.json'))
		logFiles = utils.setupLogFile('JobDetail', env, globalSettings['fileContainingContentGatheringLogSettings'], directoryName='client')
		logObserver  = utils.setupObservers(logFiles, 'JobDetail', env, globalSettings['fileContainingContentGatheringLogSettings'])
		logger = twisted.logger.Logger(observer=logObserver, namespace='JobDetail')

		client = PwshLocal(logger)
		version = client.open()
		logger.debug('version: {version!r}', version=version)

		logger.debug('sleep should timeout and reinitialize shell...')
		results = client.run('sleep 5', timeout=2)
		logger.debug('sleep output: {results!r}', results=results)

		osAttrDict = {}
		queryOperatingSystem(client, logger, osAttrDict)
		logger.debug('osAttrDict: {osAttrDict!r}', osAttrDict=osAttrDict)
		client.close()

	except:
		stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
		print('Main Exception: {}'.format(stacktrace))
		client.close()
예제 #5
0
 def __init__(self, mode='normal', log=None, **kwargs):
     """This calculates the brightness temperature of the planets.
        It must be used with atmosphere and alpha"""
     kwargs = state_variables.init_state_variables(mode, **kwargs)
     self.state_vars = kwargs.keys()
     self.set_state(set_mode='init', **kwargs)
     self.log = utils.setupLogFile(log)
     self.layerAlpha = None
예제 #6
0
 def __init__(self, mode='normal', log=None, **kwargs):
     """This calculates the brightness temperature of the planets.
        It must be used with atmosphere and alpha"""
     kwargs = state_variables.init_state_variables(mode, **kwargs)
     self.state_vars = kwargs.keys()
     self.set_state(set_mode='init', **kwargs)
     self.log = utils.setupLogFile(log)
     self.layerAlpha = None
예제 #7
0
def main():
    """Entry point for this utility.

	Usage::

	  $ python createApiUser.py

	"""
    try:
        ## Setup requested log handlers
        globalSettings = utils.loadSettings(
            os.path.join(env.configPath, "globalSettings.json"))
        logFiles = utils.setupLogFile(
            "ApiApplication",
            env,
            globalSettings['fileContainingServiceLogSettings'],
            directoryName='service')
        logObserver = utils.setupObservers(
            logFiles, "ApiApplication", env,
            globalSettings['fileContainingServiceLogSettings'])
        logger = twisted.logger.Logger(observer=logObserver,
                                       namespace="ApiApplication")
        logger.info('Starting createApiUser')

        ## Connect to database
        dbClient = DatabaseClient(logger)
        if dbClient is None:
            raise SystemError(
                'Failed to connect to database; unable to initialize tables.')

        ## Get list of valid users
        users = {}
        getUsers(dbClient, logger, users)

        ## Create and insert a new credential
        createUserEntry(dbClient, logger, users)

        ## Cleanup
        dbClient.session.remove()
        dbClient.close()
        logger.info('Exiting configureDatabase utility.')

    except:
        stacktrace = traceback.format_exception(sys.exc_info()[0],
                                                sys.exc_info()[1],
                                                sys.exc_info()[2])
        ## The basic print is here for a console message in case we weren't
        ## able to use the logging mechanism before encountering the failure.
        print('Failure in createApiUser: {}'.format(stacktrace))
        try:
            logger.debug('Failure in createApiUser: {}'.format(stacktrace))
        except:
            pass

    ## end main
    return
예제 #8
0
파일: planet.py 프로젝트: emolter/pyplanet
    def __init__(self, name, mode='normal', config='config.par', **kwargs):
        """This is the 'executive function class to compute overall planetary emission.
           For both mode and kwargs look at state_variables.py
           Inputs:
                name:  'Jupiter', 'Saturn', 'Uranus', 'Neptune'
                config:  config file name.  If 'planet' sets to <name>/config.par
                mode:  sets up for various special modes '[normal]/batch/mcmc/scale_alpha/use_alpha'
                kwargs: 'verbose' and 'plot' (and other state_vars - see show_state())"""

        planet_list = ['Jupiter', 'Saturn', 'Neptune', 'Uranus']
        self.planet = name.capitalize()
        self.header = {}
        self.freqs = None
        self.freqUnit = None
        self.b = None

        print('Planetary modeling  (ver {})'.format(version))
        if self.planet not in planet_list:
            print("{} not found.".format(self.planet))
            return

        # Set up state_variables
        kwargs = state_variables.init_state_variables(mode.lower(), **kwargs)
        self.state_vars = kwargs.keys()
        self.set_state(set_mode='init', **kwargs)
        self.mode = mode
        self.kwargs = kwargs

        #  ##Set up log file
        if self.write_log_file:
            runStart = datetime.datetime.now()
            self.logFile = 'Logs/{}_{}.log'.format(
                self.planet, runStart.strftime("%Y%m%d_%H%M"))
            self.log = utils.setupLogFile(self.logFile)
            utils.log(self.log, self.planet + ' start ' + str(runStart),
                      self.verbose)
        else:
            self.log = None

        #  ## Get config
        config = os.path.join(self.planet, config)
        if self.verbose:
            print('Reading config file:  ', config)
            print("\t'{}.config.display()' to see config parameters.".format(
                name[0].lower()))
        self.config = pcfg.planetConfig(self.planet,
                                        configFile=config,
                                        log=self.log)

        if self.initialize:
            self.initialize_run()
예제 #9
0
    def __init__(self, serviceName, globalSettings, canceledEvent,
                 shutdownEvent):
        """Constructor for the Query service."""
        self.canceledEvent = canceledEvent
        self.shutdownEvent = shutdownEvent
        self.logFiles = utils.setupLogFile(
            serviceName,
            env,
            globalSettings['fileContainingServiceLogSettings'],
            directoryName='service')
        self.logObserver = utils.setupObservers(
            self.logFiles, serviceName, env,
            globalSettings['fileContainingServiceLogSettings'])
        self.logger = twisted.logger.Logger(observer=self.logObserver,
                                            namespace=serviceName)
        self.logger.info('Started logger for {serviceName!r}',
                         serviceName=serviceName)

        ## Allow the dbClient to get created in the main thread, to reuse pool
        self.dbClient = None
        super().__init__(serviceName, globalSettings, getDbClient=True)
        self.dbClient.session.close()

        self.localSettings = utils.loadSettings(
            os.path.join(env.configPath,
                         globalSettings['fileContainingQuerySettings']))
        self.logger.info(
            'waitSecondsBetweenCacheCleanupJobs: {secs!r}',
            secs=self.localSettings['waitSecondsBetweenCacheCleanupJobs'])

        ## Twisted import here to avoid issues with epoll on Linux
        from twisted.internet import task, threads

        ## TODO: modify looping calls to use threads.deferToThread(); avoid
        ## time delays/waits from being blocking to the main reactor thread
        self.loopingCleanUpCache = task.LoopingCall(self.cleanUpCache)
        self.loopingCleanUpCache.start(
            self.localSettings['waitSecondsBetweenCacheCleanupJobs'])

        ## Make checking kafka and processing results a looping call, to give a
        ## break to the main reactor thread; otherwise it blocks other looping
        ## calls, like those in coreService for health and environment details:
        self.kafkaConsumer = self.createKafkaConsumer(
            globalSettings['kafkaQueryTopic'])
        self.loopingGetKafkaResults = task.LoopingCall(self.getKafkaResults,
                                                       self.kafkaConsumer)
        ## Give a second break before starting the main LoopingCall
        self.loopingGetKafkaResults.start(1, now=False)
        self.logger.debug('Leaving Query constructor')
예제 #10
0
def setupLogging(globalSettings):
    """Logger for the parent process."""
    logFiles = setupLogFile('Main', env,
                            globalSettings['fileContainingServiceLogSettings'])
    logObserver = setupObservers(
        logFiles, 'Main', env,
        globalSettings['fileContainingServiceLogSettings'])
    logger = twisted.logger.Logger(observer=logObserver, namespace='Main')
    if not os.path.exists(env.logPath):
        os.makedirs(env.logPath)
    logger.info('Starting Open Content Platform.')
    logger.info(' Main-process identifier (PID): {}.'.format(os.getpid()))
    logger.info(' Started on the command line; press Ctrl+C to exit.')

    ## end setupLogging
    return logger
    def __init__(self, serviceName, globalSettings, canceledEvent,
                 shutdownEvent):
        """Constructor for the ResultProcessingFactory."""
        self.canceledEvent = canceledEvent
        self.shutdownEvent = shutdownEvent
        self.logFiles = utils.setupLogFile(
            serviceName,
            env,
            globalSettings['fileContainingServiceLogSettings'],
            directoryName='service')
        self.logObserver = utils.setupObservers(
            self.logFiles, serviceName, env,
            globalSettings['fileContainingServiceLogSettings'])
        self.logger = twisted.logger.Logger(observer=self.logObserver,
                                            namespace=serviceName)
        self.logger.info('Started logger for {serviceName!r}',
                         serviceName=serviceName)
        self.localSettings = utils.loadSettings(
            os.path.join(
                env.configPath,
                globalSettings['fileContainingResultProcessingSettings']))
        self.globalSettings = globalSettings
        self.clientEndpointTable = platformSchema.ServiceResultProcessingEndpoint
        self.serviceJobTable = None
        self.serviceHealthTable = platformSchema.ServiceResultProcessingHealth
        self.validActions = [
            'connectionRequest', 'healthResponse', 'cacheResponse',
            'getKafkaPartitionCount', 'kafkaHealth'
        ]
        self.actionMethods = [
            'doConnectionRequest', 'doHealthResponse', 'doCacheResponse',
            'doGetKafkaPartitionCount', 'doKafkaHealth'
        ]
        super().__init__(serviceName, globalSettings)
        if self.canceledEvent.is_set() or self.shutdownEvent.is_set():
            self.logger.error('Cancelling startup of {serviceName!r}',
                              serviceName=serviceName)
            return

        ## Twisted import here to avoid issues with epoll on Linux
        from twisted.internet import task

        self.loopingGetKafkaPartitionCount = task.LoopingCall(
            self.doGetKafkaPartitionCount)
        self.loopingGetKafkaPartitionCount.start(
            self.
            localSettings['waitSecondsBetweenRequestingKafkaPartitionCount'])
예제 #12
0
    def __init__(self, name, mode='normal', config='config.par', **kwargs):
        """This is the 'executive function class to compute overall planetary emission.
           For both mode and kwargs look at state_variables.py
           Inputs:
                name:  'Jupiter', 'Saturn', 'Uranus', 'Neptune'
                config:  config file name.  If 'planet' sets to <name>/config.par
                mode:  sets up for various special modes '[normal]/batch/mcmc/scale_alpha/use_alpha'
                kwargs: 'verbose' and 'plot' (and other state_vars - see show_state())"""

        planet_list = ['Jupiter', 'Saturn', 'Neptune', 'Uranus']
        self.planet = name.capitalize()
        self.header = {}
        self.freqs = None
        self.freqUnit = None
        self.b = None

        print('Planetary modeling  (ver {})'.format(version))
        if self.planet not in planet_list:
            print("{} not found.".format(self.planet))
            return

        # Set up state_variables
        kwargs = state_variables.init_state_variables(mode.lower(), **kwargs)
        self.state_vars = kwargs.keys()
        self.set_state(set_mode='init', **kwargs)
        self.mode = mode
        self.kwargs = kwargs

        #  ##Set up log file
        if self.write_log_file:
            runStart = datetime.datetime.now()
            self.logFile = 'Logs/{}_{}.log'.format(self.planet, runStart.strftime("%Y%m%d_%H%M"))
            self.log = utils.setupLogFile(self.logFile)
            utils.log(self.log, self.planet + ' start ' + str(runStart), self.verbose)
        else:
            self.log = None

        #  ## Get config
        config = os.path.join(self.planet, config)
        if self.verbose:
            print('Reading config file:  ', config)
            print("\t'{}.config.display()' to see config parameters.".format(name[0].lower()))
        self.config = pcfg.planetConfig(self.planet, configFile=config, log=self.log)
        self.config.show()

        if self.initialize:
            self.initialize_run()
예제 #13
0
    def getLocalLogger(self):
        """Setup a log handler."""
        logFiles = setupLogFile(
            self.serviceName,
            env,
            self.globalSettings['fileContainingServiceLogSettings'],
            directoryName='service')
        logObserver = setupObservers(
            logFiles, 'ApiService', env,
            self.globalSettings['fileContainingServiceLogSettings'])
        self.logger = twisted.logger.Logger(observer=logObserver,
                                            namespace='ApiService')
        self.logger.info('Started logger for {serviceName!r}',
                         serviceName=self.serviceName)

        ## end getLocalLogger
        return
예제 #14
0
    def __init__(self,
                 planet,
                 mode='normal',
                 config='config.par',
                 log=None,
                 **kwargs):
        """reads/computes atmospheres.  This returns:
               self.gas
               self.cloud
               self.layerProperty
            on the appropriate grid."""

        self.planet = planet.capitalize()
        kwargs = state_variables.init_state_variables(mode, **kwargs)
        self.state_vars = kwargs.keys()
        self.set_state(set_mode='init', **kwargs)
        if self.verbose:
            print('\n---Atmosphere of {}---'.format(planet))
        self.logFile = utils.setupLogFile(log)

        if type(config) == str:
            config = os.path.join(self.planet, config)
            config = pcfg.planetConfig(self.planet, configFile=config, log=log)
        self.config = config

        # ##Create function dictionaries
        self.gasGen = {}
        self.gasGen['read'] = self.readGas
        self.cloudGen = {}
        self.cloudGen['read'] = self.readCloud
        self.propGen = {}
        self.propGen['compute'] = self.computeProp

        if self.verbose == 'loud':
            print('Planet ' + self.planet)
            self.config.display()
        if self.config.gasType == 'read':  # this assumes that cloudType is then also 'read'
            utils.log(self.logFile, '\tReading from: ' + self.config.filename,
                      self.verbose)
            utils.log(self.logFile,
                      '\tAtmosphere file:  ' + self.config.gasFile,
                      self.verbose)
            utils.log(self.logFile, '\tCloud file:  ' + self.config.cloudFile,
                      self.verbose)
    def __init__(self, serviceName, globalSettings, canceledEvent,
                 shutdownEvent):
        """Constructor for the LogCollectionForJobs service."""
        self.canceledEvent = canceledEvent
        self.shutdownEvent = shutdownEvent
        self.logFiles = setupLogFile(
            serviceName,
            env,
            globalSettings['fileContainingServiceLogSettings'],
            directoryName='service')
        self.logObserver = setupObservers(
            self.logFiles, serviceName, env,
            globalSettings['fileContainingServiceLogSettings'])
        self.logger = twisted.logger.Logger(observer=self.logObserver,
                                            namespace=serviceName)
        self.logger.info('Started logger for {serviceName!r}',
                         serviceName=serviceName)
        super().__init__(serviceName, globalSettings)
        self.localSettings = loadSettings(
            os.path.join(
                env.configPath,
                globalSettings['fileContainingLogCollectionForJobsSettings']))
        self.secondsBetweenLogCleanup = int(
            self.localSettings['waitHoursBetweenLogCleanupChecks']) * 60 * 60
        self.secondsToRetainLogFiles = int(
            self.localSettings['numberOfHoursToRetainLogFiles']) * 60 * 60

        ## Twisted import here to avoid issues with epoll on Linux
        from twisted.internet import task

        ## Make checking kafka and processing results a looping call, to give a
        ## break to the main reactor thread; otherwise it blocks other looping
        ## calls, like those in coreService for health and environment details:
        self.kafkaConsumer = self.createKafkaConsumer(
            globalSettings['kafkaLogForJobsTopic'])
        self.loopingGetKafkaResults = task.LoopingCall(self.getKafkaResults,
                                                       self.kafkaConsumer)
        ## Give a second break before starting the main LoopingCall
        self.loopingGetKafkaResults.start(1, now=False)
        ## Regularly check logs for cleanup; avoid filling disk with old logs
        self.loopingCleanupLogs = task.LoopingCall(self.deferCleanupLogs)
        self.loopingCleanupLogs.start(self.secondsBetweenLogCleanup)
        self.logger.debug('Leaving LogCollectionForJobs constructor')
	def __init__(self, serviceName, globalSettings, canceledEvent, shutdownEvent):
		"""Constructor for the ResultProcessingClientFactory.

		Arguments:
		  serviceName (str)     : class name of the client ('ResultProcessingClient')
		  globalSettings (dict) : global globalSettings
		"""
		try:
			self.canceledEvent = canceledEvent
			self.shutdownEvent = shutdownEvent
			self.logFiles = utils.setupLogFile(serviceName, env, globalSettings['fileContainingClientLogSettings'], directoryName='client')
			self.logObserver = utils.setupObservers(self.logFiles, serviceName, env, globalSettings['fileContainingClientLogSettings'])
			self.logger = twisted.logger.Logger(observer=self.logObserver, namespace=serviceName)
			self.globalSettings = globalSettings
			self.localSettings = utils.loadSettings(os.path.join(env.configPath, globalSettings['fileContainingResultProcessingClientSettings']))
			self.dbClient = None
			self.validActions = ['connectionResponse', 'healthRequest', 'tokenExpired', 'unauthorized', 'partitionCountResponse']
			self.actionMethods = ['doConnectionResponse', 'doHealthRequest', 'doTokenExpired', 'doUnauthorized', 'doPartitionCountResponse']
			self.kafkaErrorCount = 0
			self.kafkaErrorLimit = 5
			self.kafkaConsumer = None
			self.partitionCount = 0
			self.connectedToKafkaConsumer = False
			self.resultProcessingUtility = None
			self.maintenanceMode = True
			self.pauseKafkaProcessing = True
			super().__init__(serviceName, globalSettings)
			self.initialize(True)
			## Looping call to build objectCache and start kafka processing
			self.loopingStartProcessing = task.LoopingCall(self.startProcessing)
			self.loopingStartProcessing.start(int(self.localSettings['waitSecondsBetweenRequestingFullSyncCacheUpdates'])).addErrback(self.logger.error)
			## Looping call to delta update (in-place) the objectCache
			self.loopingDeltaSync = task.LoopingCall(self.updateObjectCache)
			self.loopingDeltaSync.start(int(self.localSettings['waitSecondsBetweenRequestingDeltaSyncCacheUpdates'])).addErrback(self.logger.error)

		except:
			exception = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
			print('Exception in ResultProcessingClientFactory constructor: {}'.format(str(exception)))
			with suppress(Exception):
				self.logger.error('Exception in ResultProcessingClientFactory: {exception!r}', exception=exception)
			self.logToKafka(sys.exc_info()[1])
			self.shutdownEvent.set()
			reactor.stop()
예제 #17
0
    def __init__(self,planet,config='config.par',path=None,log=None,verbose=False,plot=True):
        """reads/computes atmospheres.  This should return:
               self.gas
               self.cloud
               self.layerProperty
            on the appropriate grid
            Note that the research is in the input files and modifying the tweak modules
            All of the default config parameters are hard-coded here:  see __init__, setConfig, showConfig."""

        planet = string.capitalize(planet)
        self.planet = planet
        self.verbose=verbose
        self.plot=plot
        self.logFile = utils.setupLogFile(log)
        self.batch = False
        
        print '\n---Atmosphere of %s---' % (planet)
        if type(config) == str:
            config = pcfg.planetConfig(self.planet,configFile=config,log=log,verbose=verbose)
        self.config = config

        ###Create function dictionaries
        self.gasGen = {}
        self.gasGen['read'] = self.readGas
        self.gasGen['compute'] = self.computeGas
        self.cloudGen = {}
        self.cloudGen['read'] = self.readCloud
        self.cloudGen['compute'] = self.computeCloud
        self.propGen = {}
        self.propGen['read'] = self.readProp
        self.propGen['compute'] = self.computeProp

        print 'Planet '+self.planet
        if self.config.gasType == 'read':  # this assumes that cloudType is then also 'read'
            utils.log(self.logFile,'\tReading from: '+self.config.path,True)
            utils.log(self.logFile,'\tAtmosphere file:  '+self.config.gasFile,True)
            utils.log(self.logFile,'\tCloud file:  '+self.config.cloudFile,True)
        if verbose:
            print self.config.show()
예제 #18
0
    def __init__(self, planet, configFile, log=None, printHelp=False):
        """reads in config file"""
        planet = planet.capitalize()
        self.planet = planet
        self.filename = configFile
        self.path = planet
        self.logFile = utils.setupLogFile(log)

        with open('config.json', 'r') as f:
            config_data = json.load(f)
        self.toks = config_data['toks']

        # Set defaults
        for tok in self.toks:
            val = self.toks[tok]['default'][self.planet]
            if isinstance(val, (six.string_types, int, float)):
                val = set_single_val(val, self.toks[tok]['unit'])
            setattr(self, self.toks[tok]['name'], val)
        self.setConfig(configFile)
        pars = self.show()
        utils.log(self.logFile, planet, False)
        utils.log(self.logFile, configFile, False)
        utils.log(self.logFile, pars, False)
예제 #19
0
파일: config.py 프로젝트: emolter/pyplanet
    def __init__(self, planet, configFile, log=None, printHelp=False):
        """reads in config file"""
        planet = string.capitalize(planet)
        self.planet = planet
        self.filename = configFile
        self.path = planet
        self.logFile = utils.setupLogFile(log)

        with open('config.json', 'r') as f:
            config_data = json.load(f)
        self.toks = config_data['toks']

        # Set defaults
        for tok in self.toks:
            val = self.toks[tok]['default'][self.planet]
            if type(val) in (str, unicode, int, float):
                val = set_single_val(val, self.toks[tok]['unit'])
            setattr(self, self.toks[tok]['name'], val)
        self.setConfig(configFile)
        pars = self.show()
        utils.log(self.logFile, planet, False)
        utils.log(self.logFile, configFile, False)
        utils.log(self.logFile, pars, False)
예제 #20
0
    def __init__(self,planet,config=None,log=None,verbose=False,plot=False):
        """reads/computes atmospheres.  This should return:
               self.gas
               self.cloud
               self.layerProperty
            on the appropriate grid
            Note that the research is in the input files and modifying the tweak modules"""

        planet = string.capitalize(planet)
        self.planet = planet
        self.verbose=verbose
        self.plot=plot
        self.logFile = utils.setupLogFile(log)
        
        self.LP = lyrProp
        ###Set default input files and Constituent(C)/Particulate(Cl) dictionaries
        if planet=='Neptune':
            self.path = 'Neptune/'
            self.gasFile = 'neptune.paulCO_cloud21_fletcher_best_dry'
            self.cloudFile = 'nepcloud_CO.cloud21_fletcher_best_dry'
            self.otherFile = None
            self.C = CNeptune
            self.Cl= ClNeptune
            self.gasType = 'read'     ### read vs compute
            self.cloudType = 'read'   ###     "
            self.otherType = 'compute'   ###     "
            self.g_ref = 1130.0         #[cm/s^2]
            self.r_ref = 2.45e9         #[cm]
            self.p_ref = 1.0            #[bars]
        elif planet=='Jupiter':
            self.path = 'Jupiter/'
            self.gasFile = 'jupiter.paulSolar'
            self.cloudFile = 'jupiter.paulclSolar'
            self.otherFile = None
            self.C = CJupiter
            self.Cl= ClJupiter
            self.gasType = 'read'       ### read vs compute
            self.cloudType = 'read'     ###     "
            self.otherType = 'compute'  ###     "
            self.g_ref = 2417.0         #[cm/s^2]
            self.r_ref = 1.0e12         #[cm]
            self.p_ref = 1.0            #[bars]
        else:
            self.gasFile = None
            self.cloudFile = None
            self.otherFile = None
            self.path = None
            self.C = None
            self.Cl= None
            print 'No planet values set for '+planet
        self.tweakType = self.planet               ### should be a planet name (default); if not, tweaking gets skipped
        self.regridType = 'z simple lin 1.0 km'    ### see the regrid module to default
        configFile = 'none'
        if config!=None:
            configFile = self.path+config
            self.readConfig(configFile)
        utils.log(self.logFile,planet,False)
        pars = self.dispPar()
        utils.log(self.logFile,configFile,False)
        utils.log(self.logFile,pars,False)

        ###Create function dictionaries
        self.gasGen = {}
        self.gasGen['read'] = self.readGas
        self.gasGen['compute'] = self.computeGas
        self.cloudGen = {}
        self.cloudGen['read'] = self.readCloud
        self.cloudGen['compute'] = self.computeCloud
        self.otherGen = {}
        self.otherGen['read'] = self.readOther
        self.otherGen['compute'] = self.computeOther
        self.tweakAtm = {}
        self.tweakAtm['Neptune'] = self.tweakAtmNeptune
        self.tweakAtm['Jupiter'] = self.tweakAtmJupiter

        print 'Planet '+self.planet
        if self.gasType == 'read':  # this assumes that cloudType is then also 'read'
            utils.log(self.logFile,'\tReading from: '+self.path,True)
            utils.log(self.logFile,'\tAtmosphere file:  '+self.gasFile,True)
            utils.log(self.logFile,'\tCloud file:  '+self.cloudFile,True)
        if verbose:
            print self.dispPar()
        print '\tIf in interactive mode, change any parameters in setpar() before run()'
예제 #21
0
def main(args):
    in_csv_path = Path(args.in_csv)
    y_name = args.y_var
    utils.setupLogFile(in_csv_path.parent)

    logging.info(' --- RUN for outvar {}, target {} ----- '.format(
        args.outvar, y_name))
    if not in_csv_path.exists():
        logging.error('Could not find the input file')

    try:
        dataframe = read_csv(str(in_csv_path))
        for column_name in dataframe.columns:
            if column_name.startswith('_'):
                dataframe.pop(column_name)

        for header in [
                'fl_1', 'bp_1', 'hc_1', 'ac_1', 'mom_age_edd', 'mom_weight_lb',
                'mom_height_in'
        ]:
            r = max(dataframe[header]) - min(dataframe[header])
            dataframe[header] = (dataframe[header] -
                                 min(dataframe[header])) / r

        train_whole = dataframe[(dataframe[y_name] != '.')
                                & (notna(dataframe[y_name])) &
                                (notnull(dataframe[y_name]))].copy()
        train_whole = train_whole.astype({y_name: 'int32'})

        logging.info(
            ' Number of trainig samples in the selected set: {}'.format(
                len(train_whole)))

        batch_size = 32  # A small batch sized is used for demonstration purposes
        feature_columns = []
        feature_names = []
        for header in [
                'fl_1', 'bp_1', 'hc_1', 'ac_1', 'mom_age_edd', 'mom_weight_lb',
                'mom_height_in'
        ]:
            feature_columns.append(feature_column.numeric_column(header))
            feature_names.append(header)

        for header in [
                'hiv', 'current_smoker', 'former_smoker', 'chronic_htn',
                'preg_induced_htn', 'diabetes', 'gest_diabetes'
        ]:
            col = feature_column.categorical_column_with_identity(header, 2)
            col = feature_column.indicator_column(col)
            feature_columns.append(col)
            feature_names.append(header)

        all_pred = [0] * len(dataframe)

        print('****** args.all_train is: {}'.format(args.all_train))
        if not args.all_train:
            trimester = train_whole['trimester'].values.tolist()
            min_trim = min(trimester)
            max_trim = max(trimester)

            model_trim = None
            model_2trim = None
            model_3trim = None

            if min_trim == max_trim:
                # This data is only for one of the trimesters, run the training for one of them.
                logging.info(
                    'Training only for Trimester regression: {}'.format(
                        min_trim + 2))
                if min_trim == 0:
                    model_2trim = train_trimester_2(train_whole,
                                                    feature_columns,
                                                    batch_size, y_name)
                    if model_2trim is None:
                        raise Exception('2nd trimester model empty')
                else:
                    model_3trim = train_trimester_3(train_whole,
                                                    feature_columns,
                                                    batch_size, y_name)
                    if model_3trim is None:
                        raise Exception('3rd trimester model empty')
            else:
                model_trim = train_trimester(train_whole, feature_columns,
                                             batch_size, 'trimester')
                trim_2_df = train_whole[train_whole['trimester'] == 0]
                model_2trim = train_trimester_2(trim_2_df, feature_columns,
                                                batch_size, y_name)
                trim_3_df = train_whole[train_whole['trimester'] == 1]
                model_3trim = train_trimester_3(trim_3_df, feature_columns,
                                                batch_size, y_name)
                logging.info('-- done training for all three ')
                if model_trim is None or model_2trim is None and model_3trim is None:
                    raise Exception(
                        'One of the models came back empty during the classification/regression phase'
                    )

            # Classify the dataset if this is a multi-trimester dataset
            if model_trim is not None and model_2trim is not None and model_3trim is not None:
                logging.info('Creating predictions for the full dataset')
                ds = df_to_dataset(dataframe,
                                   shuffle=False,
                                   batch_size=32,
                                   labels_name=y_name)
                ga_2trim = model_2trim.predict(ds)
                ga_3trim = model_3trim.predict(ds)

                ds = df_to_dataset(dataframe,
                                   shuffle=False,
                                   batch_size=32,
                                   labels_name='trimester')
                c_p = (model_trim.predict(ds) > 0).astype("int32")

                all_pred = [
                    g_2[0] if c == 0 else g_3[0]
                    for (g_2, g_3, c) in zip(ga_2trim, ga_3trim, c_p)
                ]
                logging.info('Length of all predictions list is: {}'.format(
                    len(all_pred)))

            elif min_trim == max_trim:
                ds = df_to_dataset(dataframe,
                                   shuffle=False,
                                   batch_size=32,
                                   labels_name=y_name)
                if min_trim == 0 and model_2trim is not None:
                    all_pred = model_2trim.predict(ds)
                elif min_trim == 1 and model_3trim is not None:
                    all_pred = model_3trim.predict(ds)
                else:
                    logging.error('Either 2nd or 3rd trimester data is null')
            else:
                logging.error('We are in unknown territory, exiting')

        else:  # Per trimester if/else
            model_g = train_general(train_whole, feature_columns, batch_size,
                                    y_name)
            ds = df_to_dataset(dataframe,
                               shuffle=False,
                               batch_size=32,
                               labels_name=y_name)
            all_pred = model_g.predict(ds)

        logging.info('Creating output dataset')
        out_df = dataframe[['PatientID', 'filename', 'studydate']].copy()
        out_df[args.outvar] = all_pred
        out_path = in_csv_path.parent / (args.outvar + '.csv')
        logging.info('Should output to: {}'.format(out_path))
        out_df.to_csv(out_path)
    except Exception as e:
        logging.error('Error: \n{}'.format(e))
        logging.error(e)
예제 #22
0
파일: alpha.py 프로젝트: jtollefs/pyplanet
    def __init__(self,path=None,config=None,log=None,verbose=False,plot=False):
        """Reads in absorption formalisms
           Note that they are all in GHz"""

        self.verbose = verbose
        self.plot = plot
        self.log = utils.setupLogFile(log)
        print '\n---Alpha---\n'
            
        #Get possible constituents
        if path is None:
            path = pyPlanetPath
        possible = []
        self.constituentsAreAt=os.path.join(path,'constituents')
        utils.log(self.log,'Reading in absorption modules from '+self.constituentsAreAt+'\n',True)
        for d in os.listdir(self.constituentsAreAt):
            fnd = os.path.join(self.constituentsAreAt,d)
            if os.path.isdir(fnd):
                possible.append(d)
        #Import used ones - note this dynamically imports the absorption modules.  It checks that directory's use.txt file.
        self.constituent = {}
        self.absorptionModule = {}
        for c in possible:
            fn = os.path.join(self.constituentsAreAt,c,'use.txt')
            try:
                fp = open(fn,'r')
            except:
                #utils.log(self.log,'No file '+fn,True)
                continue
            absorber = fp.readline().strip()
            testabs = absorber.split('.')
            if len(testabs)==2:
                absorber=testabs[0]
            fp.close()
            constituentPath=os.path.join(self.constituentsAreAt,c)
            if string.lower(absorber) != 'none':
                sys.path.append(constituentPath)
                try:
                    __import__(absorber)
                    self.absorptionModule[c]=sys.modules[absorber]
                    self.constituent[c] = absorber
                except ImportError:
                    utils.log(self.log,"Can't load "+absorber,True)
        utils.log(self.log,'Using modules:',True)
        for k in self.constituent:
            utils.log(self.log,'\t'+k+':  '+self.constituent[k],True)

        # get config
        if type(config) == str:
            config = pcfg.planetConfig(self.planet,configFile=config,log=log,verbose=verbose)
        self.config = config

        # copy config back into otherPar
        self.otherPar = {}
        self.otherPar['h2state'] = self.config.h2state
        self.otherPar['h2newset'] = self.config.h2newset
        self.otherPar['water'] = self.config.water_p
        self.otherPar['ice'] = self.config.ice_p
        self.otherPar['nh4sh'] = self.config.nh4sh_p
        self.otherPar['nh3ice'] = self.config.nh3ice_p
        self.otherPar['h2sice'] = self.config.h2sice_p
        self.otherPar['ch4'] = self.config.ch4_p
예제 #23
0
파일: config.py 프로젝트: jtollefs/pyplanet
    def __init__(self, planet, configFile="config.par", path=None, log=None, verbose=False, printHelp=False):
        """reads in config file"""
        self.toks = {
            "gasfile": ["gasFile", str],
            "cloudfile": ["cloudFile", str],
            "gasfilehdr": ["gasFileHdr", int],
            "cloudfilehdr": ["cloudFileHdr", int],
            "constituents": ["C", str],
            "clouds": ["Cl", str],
            "tweakfile": ["tweakFile", str],
            "regridtype": ["regridType", str],
            "pmin": ["pmin", float],
            "pmax": ["pmax", float],
            "omega": ["omega_m", float],
            "jn": ["Jn", str],
            "gm": ["GM_ref", float],
            "req": ["Req", float],
            "rpol": ["Rpol", float],
            "distance": ["distance", float],
            "rj": ["RJ", float],
            "p_ref": ["p_ref", float],
            "zonal": ["zonal", str],
            "gtype": ["gtype", str],
            "orientation": ["orientation", str],
            "h2state": ["h2state", str],
            "doppler": ["Doppler", str],
            "h2newset": ["h2newset", str],
            "water": ["water_p", float],
            "ice": ["ice_p", float],
            "nh4sh": ["nh4sh_p", float],
            "nh3ice": ["nh3ice_p", float],
            "h2sice": ["h2sice_p", float],
            "ch4": ["ch4_p", float],
            "limb": ["limb", str],
        }
        planet = string.capitalize(planet)
        self.planet = planet
        self.verbose = verbose
        self.logFile = utils.setupLogFile(log)
        if path:
            self.path = path
        else:
            self.path = planet
        configFile = os.path.join(self.path, configFile)
        self.possibleConstituents = ["H2", "HE", "CH4", "NH3", "H2O", "H2S", "PH3", "CO", "CO13", "HCN"]
        self.possibleClouds = ["SOLN", "H2O", "NH4SH", "NH3", "H2S", "CH4", "AR", "PH3"]

        print "\n---Setting config for %s---\n" % (planet)

        # Set "universal" defaults (i.e. things not currently set but still used internally)
        self.gasType = "read"  # "read vs compute -- normally read (currently can't compute)"
        self.cloudType = "read"  # "read vs compute -- normally read (currently can't compute)"
        self.otherType = "compute"  # "read vs compute -- normally compute (currently can't read)"
        self.otherFile = None  # if otherType could 'read', this file would have the data...
        self.LP = {
            "Z": 0,
            "R": 1,
            "P": 2,
            "GM": 3,
            "AMU": 4,
            "REFR": 5,
            "N": 6,
            "H": 7,
            "LAPSE": 8,
            "g": 9,
        }  # ...and this would hold its dictionary properties

        # These are the current config values -- get seeded with help text
        #   note that the tok names largely differ from variable name, hence the tok dictionary (although generally just all lowercase)
        self.gasFile = "atmospheric constituent file - column order set by C (string)"
        self.gasFileHdr = "number of header lines (to ignore) in gasFile (int)"
        self.cloudFile = "atmospheric cloud file - column order set by Cl (string)"
        self.cloudFileHdr = "number of header lines (to ignore) in cloudFile (int)"
        self.tweakFile = "module that tweaks the read atmosphere (string)"
        self.C = "This 'C'onstituent dictionary has atm layer gas data.   Needs to correspond to datafile if not computing {string}"
        self.Cl = "This 'Cl'oud dictionary has the cloud parameters.  Needs to correspond to datafile if not computing {string}"
        self.regridType = "instructions or file to regrid data (see regrid.py) (string/int)"
        self.h2state = "hydrogen state [e or n] (char)"
        self.pmin = "pmin that is used in the regrid (none uses file min) [bars] (float)"
        self.pmax = "pmax that is used in the regrid (none uses file max) [bars] (float)"
        self.distance = "distance to the planet [AU] (float)"
        self.p_ref = "the pressure where the radii are specified (Req/Rpol) [bars] (float)"
        self.Req = "equatorial radius at p_ref [km] (float)"
        self.Rpol = "polar radius at p_ref [km] (float)"
        self.gtype = "type for planet shape [ellipse/reference/gravity/sphere] (string)"
        self.orientation = "position angle and sub-earth latitude (planetographic) [float,float]"
        self.RJ = "radius for gravity terms (usually Req) [km] (float)"
        self.GM_ref = "GM at radius RJ [km3/s2] (float)"
        self.Jn = "gravity terms (float,float,float,float,float,float)"
        self.omega_m = "rotation velocity [rad/s] (float)"
        self.zonal = "file with zonal winds (string)"
        self.h2newset = "related to h2_orton - can be deprecated? (bool)"
        self.water_p = "water particle size [um?] (float)"
        self.ice_p = "ice particle size [um?] (float)"
        self.nh4sh_p = " nh4sh particle size [um?] (float)"
        self.nh3ice_p = "ammonia-ice particle size [um?] (float)"
        self.h2sice_p = "h2s-ice particle size [um?] (float)"
        self.ch4_p = "methane particle size [um?] (float)"
        self.Doppler = "use Doppler or not (bool)"
        self.limb = "limb type - used in compute_ds to test limb darkening [shape/sec] (str)"
        if printHelp:
            print "eventually will print out tok help..."

        self.planetaryDefaults()  # this sets to base defaults, which get overwritten if valid config file
        self.setConfig(configFile)
        pars = self.show()
        utils.log(self.logFile, planet, False)
        utils.log(self.logFile, configFile, False)
        utils.log(self.logFile, pars, True)
예제 #24
0
def powerShell():
    import sys
    import traceback
    import os
    import re
    try:
        ## Add openContentPlatform directories onto the sys path
        thisPath = os.path.dirname(os.path.abspath(__file__))
        basePath = os.path.abspath(os.path.join(thisPath, '..'))
        if basePath not in sys.path:
            sys.path.append(basePath)
        import env
        env.addLibPath()
        env.addDatabasePath()
        env.addExternalPath()

        ## Setup requested log handlers
        globalSettings = utils.loadSettings(
            os.path.join(env.configPath, 'globalSettings.json'))
        logEntity = 'Protocols'
        logger = utils.setupLogger(logEntity, env, 'logSettingsCore.json')
        logger.info('Starting protocolWrapperPowershell...')

        import twisted.logger
        logFiles = utils.setupLogFile(
            'JobDetail',
            env,
            globalSettings['fileContainingContentGatheringLogSettings'],
            directoryName='client')
        logObserver = utils.setupObservers(
            logFiles, 'JobDetail', env,
            globalSettings['fileContainingContentGatheringLogSettings'])
        logger = twisted.logger.Logger(observer=logObserver,
                                       namespace='JobDetail')

        from remoteRuntime import Runtime
        runtime = Runtime(logger, env, 'TestPkg', 'TestJob', 'endpoint', {},
                          None, {}, None, {}, None)

        ## Manual creation of a protocol via protocolHandler
        externalProtocolHandler = utils.loadExternalLibrary(
            'externalProtocolHandler', env, globalSettings)
        protocolHandler = externalProtocolHandler.ProtocolHandler(
            None, globalSettings, env, logger)
        protocolType = 'ProtocolPowerShell'
        protocolData = {'user': '******', 'password': '******'}
        protocolHandler.createManual(runtime, protocolType, protocolData)
        protocol = externalProtocolHandler.getProtocolObject(runtime, 1)
        print('protocol to use: {}'.format(protocol))
        print('protocols: {}'.format(
            externalProtocolHandler.getProtocolObjects(runtime)))

        endpoint = '192.168.1.100'
        client = PowerShell(runtime, logger, endpoint, 1, protocol)
        client.open()

        osAttrDict = {}
        queryOperatingSystem(client, logger, osAttrDict)
        logger.debug('osAttrDict: {osAttrDict!r}', osAttrDict=osAttrDict)
        client.close()

    except:
        stacktrace = traceback.format_exception(sys.exc_info()[0],
                                                sys.exc_info()[1],
                                                sys.exc_info()[2])
        msg = str(sys.exc_info()[1])
        ## Cleanup message when we know what it is:
        ## "<x_wmi: The RPC server is unavailable.  (-2147023174, 'The RPC server is unavailable. ', (0, None, 'The RPC server is unavailable. ', None, None, -2147023174), None)>"
        if re.search(
                'The client cannot connect to the destination specified in the request',
                msg, re.I):
            ## Remove the rest of the fluff
            msg = 'The client cannot connect to the destination specified in the request. Verify that the service on the destination is running and is accepting requests.'
            logger.debug('Main Exception: {exception!r}', exception=msg)
        else:
            logger.debug('Main Exception: {exception!r}', exception=stacktrace)
예제 #25
0
파일: planet.py 프로젝트: jtollefs/pyplanet
    def __init__(self, name, freqs=None, b=None, freqUnit='GHz', config='config.par', log='auto', verbose=False, plot=True):
        """This is the 'executive function class to compute overall planetary emission
           Arguments here set defaults, however often get set specifically in run. See pyPlanet.pdf for documentation.
           Inputs:
                name:  'Jupiter', 'Saturn', 'Uranus', 'Neptune' [or 'functions' if you just want to load without running]
                freqs: options are:
                    - int/float:  does that one frequency
                    - list of length 3:  assumes it is [start,stop,step]
                    - list not of length 3:   does those frequencies
                b: 'impact parameter' b=1 is the radius of the maximum projected disc.
                   Determines outType from 'spectrum','profile','image' (along with freqs to some extent)
                    - doublet list is one position, [0,0] is the center
                    - float will generate a grid at that spacing, may need to set blocks during run
                    - list of length > 2, assumes a line of those locations at angle of first entry (deg)
                         if the length is four it assumes [angle,start,stop,step]
                    - 'disc' for disc-averaged
                    - 'stamp' for postage stamp (queries values)
                    - list of doublet lists, evaluate at those locations
               freqUnit: unit for above
               config:  config file name [config.par], 'manual' [equivalent none]
               log:  log data from run, either a file name, a 'no', or 'auto' (for auto filename)
               verbose:  True/False
               plot:  True/False"""

        if name.lower()[0:4] == 'func':
            return

        planetList = ['Jupiter','Saturn','Neptune','Uranus']
        self.planet = string.capitalize(name)

        runStart = datetime.datetime.now()
        self.header = {}

        print 'Planetary modeling  (ver '+version+')\n'
        print "PLANET.PY_L51:  In alpha, clouds_idp need otherPar['refr'] - still?"

        if self.planet in planetList:
            ### Set up log file
            if string.lower(log)=='auto':
                self.logFile = '%s_%d%02d%02d_%02d%02d.log' % (self.planet,runStart.year,runStart.month,runStart.day,runStart.hour,runStart.minute)
            elif string.lower(log)=='no':
                self.logFile=None
            else:
                self.logFile = log
            self.log=utils.setupLogFile(self.logFile,path='Logs/')
            utils.log(self.log,self.planet+' start '+str(runStart),True)
            self.plot = plot
            self.verbose = verbose

            ### Some convenience values for the specific Neptune observations
            self.fvla_old = [4.86,8.46,14.94,22.46,43.34]
            self.fvla_new = [1.5,3.0,6.0,10.,15.,22.,33.,45.]
            self.fvla = [3.0, 6.0, 10.0, 15.0, 33.0]
            anglecap = 13.24
            bvalcap = [0.5,0.6,0.7,0.8,0.9,0.925,0.95]
            self.bvla = []
            for bval in bvalcap:
                self.bvla.append([-bval*math.sin(math.pi*anglecap/180.0),-bval*math.cos(math.pi*anglecap/180.0)])
            
            ### Get frequencies
            if freqs != None:
                freqs = self.__freqRequest__(freqs, freqUnit)
            else:
                self.freqUnit = freqUnit
            
            ### Get viewing
            self.imRow = False
            if b!= None:
                b = self.__bRequest__(b,[1,1])

            ### Get config
            if config == 'manual' or config=='none':
                config = None
            self.config = pcfg.planetConfig(self.planet,configFile=config,log=self.log,verbose=verbose)

            ### Create atmosphere:  outputs are self.atm.gas, self.atm.cloud and self.atm.layerProperty
            self.atm = atm.Atmosphere(self.planet,config=self.config,log=self.log,verbose=verbose,plot=plot)
            self.atm.run()
            self.log.flush()
예제 #26
0
def main(args):
    data_folder = Path(args.dir)
    out_images_dir = Path(args.out_dir)

    utils.checkDir(out_images_dir, False)    
    utils.setupLogFile(out_images_dir, args.debug)
    
    studies = []
    for dirname, dirnames, __ in os.walk(str(data_folder)):
        if len(dirnames) == 0:
            studies.append(Path(dirname))
            
    logging.info('Found {} studies '.format(len(studies)))
    print('Found {} studies '.format(len(studies)))
    
    # read the list of acceptable tags in the ultrasound file
    tag_list = utils.getTagsList()
    tag_statistic = dict.fromkeys(tag_list, 0)
    tag_statistic['Unknown'] = 0
    tag_statistic['Undecided'] = 0
    tag_statistic['No tag'] = 0
    
    # Approximate bounding box of where the tag is written acoording to the 
    # us model
    tag_bounding_box = { 'V830':[[40,75], [255,190]],
                         'LOGIQe':  [[0,55], [200,160]],
                         'Voluson S': [[40,75], [255,190]],
                         'LOGIQeCine': [[0,0],[135,90]],
                         'Turbo': [[75,20,],[230,80]],
                         'Voluson E8': [[40,75], [255,250]]
                        }

    # list of ultrasound image types whose tags we do not care about right now.
    non_tag_us = ['Unknown', 'Secondary capture image report',
                    'Comprehensive SR', '3D Dicom Volume']

    
    # Also read in study directories that might have been finished by a previous run - do not want to rerun them again
    finished_study_file = out_images_dir/'finished_studies.txt'
    finished_studies = None
    if finished_study_file.exists():
        with open(finished_study_file) as f:
            finished_studies = f.read().splitlines()
            finished_studies = [study for study in finished_studies if study.strip()]
    if finished_studies is not None:
        logging.info('Found {} finished studies'.format(len(finished_studies)))
        cleaned_studies = [study for study in studies if str(study) not in finished_studies]
        # Get statistics for the finished studies
        for study in finished_studies:
            logging.info('Will skip: {}'.format(study))
            try:
                infocsv_dir = getStudyOutputFolder(Path(study), data_folder, out_images_dir)
                logging.info('Opening: {}'.format(infocsv_dir))
                tag_file_man =taginfo.TagInfoFile(infocsv_dir)
                tag_file_man.read()
                if tag_file_man.getNumFiles() > 0:
                    for tag in tag_file_man.tag_statistic:
                        if tag not in tag_statistic:
                            tag_statistic[tag] = 0
                        tag_statistic[tag] += tag_file_man.tag_statistic[tag]        
            except (OSError, ValueError) as err:
                logging.warning('Error reading previously created tags.csv for subject: {}: {}'.format(study, err))
            except:
                logging.warning('Error reading previously created tags.csv for subject: {}'.format(study))
                logging.warning('Unknown except while reading csvt: {}'.format(sys.exc_info()[0]))
    else:
        cleaned_studies = studies
    del studies

    if args.use_threads:
        with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
            # Start the load operations and mark each future with its URL
            future_tags = {executor.submit(extractTagForStudy, study, 
                                            data_folder, out_images_dir, tag_list,
                                            non_tag_us, tag_bounding_box, 
                                            Path(args.server_path), args.greedy ): study for study in cleaned_studies}
            for future in concurrent.futures.as_completed(future_tags):
                d = future_tags[future] 
                logging.info('Finished processing: {}'.format(d))
                this_tag_statistic = future.result()
                #logging.info(future.result())
                for key, value in this_tag_statistic.items():
                    tag_statistic[key] += value
                with open(finished_study_file, "a+") as f:
                    f.write(str(d)+os.linesep)
    else:
        i=1
        for study in cleaned_studies:
            this_tag_statistic = extractTagForStudy(study, data_folder, out_images_dir, 
                                                    tag_list, non_tag_us, tag_bounding_box, 
                                                    Path(args.server_path), args.greedy)
            logging.info('Finished processing: {}'.format(study))
            for key, value in this_tag_statistic.items():
                tag_statistic[key] += value
            endstr = "\n" if i%50 == 0 else "."
            print("",end=endstr)
            with open(finished_study_file, "a+") as f:
                f.write(str(study)+os.linesep)
            i+=1
    
    pprint(tag_statistic)
    with open(out_images_dir/"NumberOfTags.json", "w") as outfile:
        json.dump(tag_statistic, outfile, indent=4) 
    logging.info(pformat(tag_statistic))
    logging.info('---- DONE ----')
    print('------DONE-----------')
예제 #27
0
def main(args):
    data_folder = Path(args.dir)
    out_folder = Path(args.out_dir)
    utils.checkDir(out_folder, False)

    #  Setup logging:
    utils.setupLogFile(out_folder, args.debug)
    if args.cine_mode:
        tags = utils.getCineTagsList(args.tags)
    else:
        tags = utils.getTagsList(args.tags)
    print('Tags: {}'.format(tags))

    try:
        for tag in tags:
            out_folder_tag, out_tag_list_file_path, out_tag = getTagDirPathListFile(
                out_folder, tag)
            utils.checkDir(out_folder_tag, args.delete_existing)

    except Exception as e:
        logging.error("Couldn't split the tags string: {}".format(e))
        return

    gt_ga = {}
    if args.gt_ga_list:
        try:
            with open(args.gt_ga_list) as f:
                csv_reader = csv.DictReader(f)
                for line in csv_reader:
                    gt_ga[line['StudyID']] = {}
                    if line['ga_boe'] != ".":
                        gt_ga[line['StudyID']]['ga_boe'] = int(line['ga_boe'])
                    else:
                        gt_ga[line['StudyID']]['ga_boe'] = -1

                    if line['ga_avua'] != ".":
                        gt_ga[line['StudyID']]['ga_avua'] = int(
                            line['ga_avua'])
                    else:
                        gt_ga[line['StudyID']]['ga_avua'] = -1
        except OSError as e:
            logging.error(
                'Error reading the gt ga file {} \n Error: {}'.format(
                    args.gt_ga_list, e))
            gt_ga = {}
        print('Found {} studies with GT Ga'.format(len(gt_ga)))

    bounding_box = [[0, 0], [255, 250]]
    # Find all the info.csv files:
    tag_file_names = list(
        data_folder.glob('**/' + taginfo.TagInfoFile.file_name))
    tag_file_list_rows = []

    for tag_file in tag_file_names:
        logging.info('--- PROCESSING: {}'.format(tag_file))
        files_to_copy = []
        tag_file_info = taginfo.TagInfoFile(tag_file.parent)
        tag_file_info.read()
        file_tag_pairs = tag_file_info.getFileNamesWithTags(tags)

        if len(file_tag_pairs) == 0:
            continue
        # print(file_tag_pairs[0])

        for file_tag_dict in file_tag_pairs:
            file_name = Path(file_tag_dict['File']).name
            name_no_suffix = Path(file_name).stem
            jpg_file_name = tag_file.parent / (name_no_suffix + '.jpg')

            cropped = None
            if jpg_file_name.exists():
                simage = sitk.ReadImage(str(jpg_file_name))
                if args.crop_images:
                    size = simage.GetSize()
                    cropped = sitk.Crop(
                        simage, bounding_box[0],
                        [size[i] - bounding_box[1][i] for i in range(2)])
                else:
                    cropped = simage

            tag = file_tag_dict['tag']
            tag_folder, out_tag_list_file_path, out_tag = getTagDirPathListFile(
                out_folder, tag)

            target_simlink_name = tag_folder / file_name

            # Get the data for the global list
            if args.create_global_list:
                if target_simlink_name.exists():
                    tag_file_row = {}
                    study_name = (tag_file.parent).name
                    pos = study_name.find('_')
                    if pos == -1:
                        logging.warning(
                            "Study name in path {} not in the correct format for a valid study"
                            .format(study_path))
                        continue

                    study_id = study_name[:pos]
                    study_date = study_name[pos + 1:pos + 9]
                    tag_file_row['study_id'] = study_id
                    tag_file_row['study_date'] = study_date
                    if len(gt_ga) > 0 and study_id in gt_ga:
                        tag_file_row['ga_boe'] = str(
                            gt_ga[study_id]['ga_boe']
                        ) if gt_ga[study_id]['ga_boe'] > 0 else ''
                        tag_file_row['ga_avua'] = str(
                            gt_ga[study_id]['ga_avua']
                        ) if gt_ga[study_id]['ga_avua'] > 0 else ''
                    else:
                        tag_file_row['ga_boe'] = ''
                        tag_file_row['ga_avua'] = ''

                    tag_file_row['file_path'] = target_simlink_name
                    tag_file_row['tag'] = out_tag
                    tag_file_list_rows.append(tag_file_row)
                else:
                    logging.info(
                        'The file: {}, study id: {} does not exist'.format(
                            target_simlink_name, (tag_file.parent).name))
                continue

            # If not in global list generation mode, deal with the file based on what has been requested.
            out_jpg_name = tag_folder / (name_no_suffix + '.jpg')
            if os.path.exists(target_simlink_name):
                # count all files with that link
                logging.info('<---Found duplicates! ----> ')
                ext = Path(file_name).suffix
                all_target_simlink_files = list(
                    Path(tag_folder).glob(stem + '*' + ext))
                new_name = stem + '_' + str(
                    len(all_target_simlink_files)) + ext
                target_simlink_name = tag_folder / new_name
                new_name = stem + '_' + str(
                    len(all_target_simlink_files)) + '.jpg'
                out_jpg_name = tag_folder / (new_name + '.jpg')

            if cropped is not None:
                logging.info('Writing jpg image: {}'.format(out_jpg_name))
                sitk.WriteImage(cropped, str(out_jpg_name))

            source_file = Path(args.som_home_dir) / Path(file_tag_dict['File'])
            if not args.create_only_lists:
                logging.info('Copying file: {} -> {}, study:{}'.format(
                    file_name, target_simlink_name, (tag_file.parent).stem))
                try:
                    shutil.copyfile(source_file, target_simlink_name)
                except FileNotFoundError:
                    logging.warning("Couldn't find file: {}".format(file))
                    continue
                except PermissionError:
                    logging.warning(
                        "Didn't have enough permissions to copy to target: {}".
                        format(target_simlink_name))
                    continue
            else:
                with open(out_tag_list_file_path, "a") as fh:
                    fh.write(str(source_file) + "\n")

    if args.create_global_list and len(tag_file_list_rows) > 0:
        logging.info('Number of tag file rows: {}, writing'.format(
            len(tag_file_list_rows)))
        outfilepath = out_folder / 'all_files_gt_ga.csv'
        try:
            csvwrap.CSVWrap.writeCSV(tag_file_list_rows, outfilepath)
        except IOError as e:
            logging.error(
                'Error writing the output file: {} \n Error: {}'.format(
                    outfilepath, e))
    logging.info('----- DONE -----')