def main(): """ *The main function used when ``plotting`` run as a single script from the cl* """ ################ > IMPORTS ################ ## STANDARD LIB ## ## THIRD PARTY ## ## LOCAL APPLICATION ## import dryxPython.commonutils as cu dbConn, log = settings( dbConn=False, log=True) ## START LOGGING ## startTime = cu.get_now_sql_datetime() log.info('--- STARTING TO RUN THE plotting AT %s' % (startTime,)) # WRITE CODE HERE if dbConn: dbConn.commit() dbConn.close() ## FINISH LOGGING ## endTime = cu.get_now_sql_datetime() runningTime = cu.calculate_time_difference(startTime, endTime) log.info('-- FINISHED ATTEMPT TO RUN THE plotting AT %s (RUNTIME: %s) --' % (endTime, runningTime, )) return
def main(arguments=None): """ *The main function used when ``add_mavericks_tags_to_voodoopad.py`` is run as a single script from the cl, or when installed as a cl command* """ ########## IMPORTS ########## ## STANDARD LIB ## ## THIRD PARTY ## ## LOCAL APPLICATION ## ## ACTIONS BASED ON WHICH ARGUMENTS ARE RECIEVED ## # PRINT COMMAND-LINE USAGE IF NO ARGUMENTS PASSED if arguments == None: arguments = docopt(__doc__) # SETUP LOGGER -- DEFAULT TO CONSOLE LOGGER IF NONE PROVIDED IN SETTINGS if "--logger" not in arguments or arguments["--logger"] is None: log = dl.console_logger( level="DEBUG" ) log.debug('logger setup') # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): varname = arg.replace("--", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) ## START LOGGING ## startTime = dcu.get_now_sql_datetime() log.info( '--- STARTING TO RUN THE add_mavericks_tags_to_voodoopad.py AT %s' % (startTime,)) # call the worker function add_mavericks_tags_to_voodoopad( log=log, pathToVpspotlight=pathToVpspotlight, ) if "dbConn" in locals() and dbConn: dbConn.commit() dbConn.close() ## FINISH LOGGING ## endTime = dcu.get_now_sql_datetime() runningTime = dcu.calculate_time_difference(startTime, endTime) log.info( '-- FINISHED ATTEMPT TO RUN THE add_mavericks_tags_to_voodoopad.py AT %s (RUNTIME: %s) --' % (endTime, runningTime, )) return
def main(): """ *Used for debugging Key Arguments: - - dbConn -- mysql database connection - log -- logger Return: - None* """ ################ > IMPORTS ################ ## STANDARD LIB ## ## THIRD PARTY ## ## LOCAL APPLICATION ## import pesstoMarshallPythonPath as pp pp.set_python_path() import pmCommonUtils as p import dryxPython.commonutils as cu ################ > SETUP ################## # SETUP DB CONNECTION AND A LOGGER dbConn, log = p.settings() ## START LOGGING ## startTime = cu.get_now_sql_datetime() log.info('--- STARTING TO RUN THE fitstools AT %s' % (startTime,)) ################ > VARIABLE SETTINGS ###### ################ >ACTION(S) ############### dbConn.commit() dbConn.close() ## FINISH LOGGING ## endTime = cu.get_now_sql_datetime() runningTime = cu.calculate_time_difference(startTime, endTime) log.info('-- FINISHED ATTEMPT TO RUN THE fitstools AT %s (RUNTIME: %s) --' % (endTime, runningTime,)) return
def main(): """ *The main function used when ``csvtools.py`` run as a single script from the cl* """ ########## PRE-IMPORT SETUP ########## relativePathToProjectRoot = "../../../" import dryxPython.projectsetup as dps projectSetup = dps.projectSetup( dbConn=False, relativePathToProjectRoot=relativePathToProjectRoot ) global settings, contentPaths dbConn, log, settings, contentPaths = projectSetup.get_project_atrributes() ########## IMPORTS ########## ## STANDARD LIB ## ## THIRD PARTY ## ## LOCAL APPLICATION ## import dryxPython.commonutils as cu ## START LOGGING ## startTime = cu.get_now_sql_datetime() log.info('--- STARTING TO RUN THE csvtools.py AT %s' % (startTime,)) # SET GLOBAL VARIABLES # WRITE CODE HERE if dbConn: dbConn.commit() dbConn.close() ## FINISH LOGGING ## endTime = cu.get_now_sql_datetime() runningTime = cu.calculate_time_difference(startTime, endTime) log.info( '-- FINISHED ATTEMPT TO RUN THE csvtools.py AT %s (RUNTIME: %s) --' % (endTime, runningTime, )) return
def main(arguments=None): """ The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command """ # setup the command-line util settings su = setup_main_clutil( arguments=arguments, docString=__doc__, logLevel="DEBUG", options_first=False, projectName="sherlock" ) arguments, settings, log, dbConn = su.setup() # tab completion for raw_input readline.set_completer_delims(' \t\n;') readline.parse_and_bind("tab: complete") readline.set_completer(tab_complete) # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) ## START LOGGING ## startTime = dcu.get_now_sql_datetime() log.debug( '--- STARTING TO RUN THE cl_utils.py AT %s' % (startTime,)) # set options interactively if user requests if "interactiveFlag" in locals() and interactiveFlag: # load previous settings moduleDirectory = os.path.dirname(__file__) + "/resources" pathToPickleFile = "%(moduleDirectory)s/previousSettings.p" % locals() try: with open(pathToPickleFile): pass previousSettingsExist = True except: previousSettingsExist = False previousSettings = {} if previousSettingsExist: previousSettings = pickle.load(open(pathToPickleFile, "rb")) # x-raw-input # x-boolean-raw-input # x-raw-input-with-default-value-from-previous-settings # save the most recently used requests pickleMeObjects = [] pickleMe = {} theseLocals = locals() for k in pickleMeObjects: pickleMe[k] = theseLocals[k] pickle.dump(pickleMe, open(pathToPickleFile, "wb")) # call the worker function # x-if-settings-or-database-credientials if match: sherlock = classifier( log=log, settings=settings, update=updateFlag, transientIdList=[] ) sherlock.get() if clean: cleaner = cleanup_database_tables( log=log, settings=settings ) cleaner.get() if wiki: updateWiki = update_wiki_pages( log=log, settings=settings ) updateWiki.get() if "dbConn" in locals() and dbConn: dbConn.commit() dbConn.close() ## FINISH LOGGING ## endTime = dcu.get_now_sql_datetime() runningTime = dcu.calculate_time_difference(startTime, endTime) log.debug('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' % (endTime, runningTime, )) return
def main(arguments=None): """ The main function used when ``importers_clutils.py`` is run as a single script from the cl, or when installed as a cl command """ # setup the command-line util settings su = setup_main_clutil( arguments=arguments, docString=__doc__, logLevel="WARNING", options_first=False, projectName="sherlock" ) arguments, settings, log, dbConn = su.setup() # tab completion for raw_input readline.set_completer_delims(" \t\n;") readline.parse_and_bind("tab: complete") readline.set_completer(tab_complete) # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug("%s = %s" % (varname, val)) ## START LOGGING ## startTime = dcu.get_now_sql_datetime() log.info("--- STARTING TO RUN THE importers_clutils.py AT %s" % (startTime,)) # set options interactively if user requests if "interactiveFlag" in locals() and interactiveFlag: # load previous settings moduleDirectory = os.path.dirname(__file__) + "/resources" pathToPickleFile = "%(moduleDirectory)s/previousSettings.p" % locals() try: with open(pathToPickleFile): pass previousSettingsExist = True except: previousSettingsExist = False previousSettings = {} if previousSettingsExist: previousSettings = pickle.load(open(pathToPickleFile, "rb")) # x-raw-input # x-boolean-raw-input # x-raw-input-with-default-value-from-previous-settings # save the most recently used requests pickleMeObjects = [] pickleMe = {} theseLocals = locals() for k in pickleMeObjects: pickleMe[k] = theseLocals[k] pickle.dump(pickleMe, open(pathToPickleFile, "wb")) # call the worker function # x-if-settings-or-database-credientials if cat: if cat_name == "milliquas": testObject = milliquasImporter( log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name ) testObject.get() if cat_name == "veron": testObject = veronImporter( log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name ) testObject.get() if "sdss" in cat_name: testObject = sdssImporter( log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name ) testObject.get() if "ned_d" in cat_name: testObject = nedImporter( log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name ) testObject.get() elif stream: if "pessto" in stream_name: testObject = pesstoImporter( log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name ) testObject.get() if "ifs" in stream_name: testObject = ifsImporter( log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name ) testObject.get() if "dbConn" in locals() and dbConn: dbConn.commit() dbConn.close() ## FINISH LOGGING ## endTime = dcu.get_now_sql_datetime() runningTime = dcu.calculate_time_difference(startTime, endTime) log.info("-- FINISHED ATTEMPT TO RUN THE importers_clutils.py AT %s (RUNTIME: %s) --" % (endTime, runningTime)) return
def main(arguments=None): """ *The main function used when ``createpythonpackage.py`` is run as a single script from the cl, or when installed as a cl command* """ ########## IMPORTS ########## ## STANDARD LIB ## ## THIRD PARTY ## ## LOCAL APPLICATION ## ## ACTIONS BASED ON WHICH ARGUMENTS ARE RECIEVED ## # PRINT COMMAND-LINE USAGE IF NO ARGUMENTS PASSED if arguments == None: arguments = docopt(__doc__) # x-unpackge-settings-in-main-function # SETUP LOGGER -- DEFAULT TO CONSOLE LOGGER IF NONE PROVIDED IN SETTINGS if 'settings' in locals() and "logging settings" in settings: log = dl.setup_dryx_logging( yaml_file=arguments["--settingsFile"] ) elif "--logger" not in arguments or arguments["--logger"] is None: log = dl.console_logger( level="WARNING" ) log.debug('logger setup') # x-setup-database-connection-in-main-function # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): varname = arg.replace("--", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) ## START LOGGING ## startTime = dcu.get_now_sql_datetime() log.info( '--- STARTING TO RUN THE createpythonpackage.py AT %s' % (startTime,)) log.debug('locals(): %s' % (locals(),)) # call the worker function if "packageName" in locals() and packageName and "location" in locals(): createpythonpackage( log=log, packageName=packageName, location=location, ) elif "subPackageName" in locals() and subPackageName: createpythonsubpackage( log=log, subPackageName=subPackageName, pathToHostDirectory=pathToHostDirectory, ) elif "moduleName" in locals() and moduleName: createpythonmodule( log=log, moduleName=moduleName, pathToHostDirectory=pathToHostDirectory, ) if "dbConn" in locals() and dbConn: dbConn.commit() dbConn.close() ## FINISH LOGGING ## endTime = dcu.get_now_sql_datetime() runningTime = dcu.calculate_time_difference(startTime, endTime) log.info( '-- FINISHED ATTEMPT TO RUN THE createpythonpackage.py AT %s (RUNTIME: %s) --' % (endTime, runningTime, )) return
def qubits(clArgs=None): """ *qubits ====================== :Summary: The main MCS project file. A Monte Carlo Simulator of a PS1 supernova survey Many parameters can be set and customised in the yaml settings (see settings file) :Author: David Young :Date Created: April 18, 2013 :dryx syntax: - ``xxx`` = come back here and do some more work - ``_someObject`` = a 'private' object that should only be changed for debugging :Notes: - If you have any questions requiring this script please email me: [email protected] Usage: qubits -s <pathToSettingsFile> -o <pathToOutputDirectory> -d <pathToSpectralDatabase> -h, --help show this help message -v, --version print version -s, --settings provide a path to the settings file -d, --database provide the path to the root directory containing your nested-folders and files spectral database -o, --output provide a path to an output directory for the results of the simulations* """ ################ > IMPORTS ################ ## STANDARD LIB ## import sys import os from datetime import datetime, date, time ## THIRD PARTY ## from docopt import docopt import yaml ## LOCAL APPLICATION ## from . import commonutils as cu from . import surveysim as ss from . import datagenerator as dg from . import results as r import dryxPython.commonutils as dcu from . import universe as u import dryxPython.mmd.mmd as dmd # SETUP AN EMPTY LOGGER (IF REQUIRED) log = _set_up_command_line_tool() if clArgs == None: clArgs = docopt(qubits.__doc__) pathToOutputDirectory = clArgs["<pathToOutputDirectory>"] pathToSettingsFile = clArgs["<pathToSettingsFile>"] pathToSpectralDatabase = clArgs["<pathToSpectralDatabase>"] pathToOutputDirectory = os.path.abspath(pathToOutputDirectory) + "/" pathToSettingsFile = os.path.abspath(pathToSettingsFile) pathToSpectralDatabase = os.path.abspath(pathToSpectralDatabase) + "/" # IMPORT THE SIMULATION SETTINGS (allSettings, programSettings, limitingMags, sampleNumber, peakMagnitudeDistributions, explosionDaysFromSettings, extendLightCurveTail, relativeSNRates, lowerRedshiftLimit, upperRedshiftLimit, redshiftResolution, restFrameFilter, kCorrectionTemporalResolution, kCorPolyOrder, kCorMinimumDataPoints, extinctionType, extinctionConstant, hostExtinctionDistributions, galacticExtinctionDistribution, surveyCadenceSettings, snLightCurves, surveyArea, CCSNRateFraction, transientToCCSNRateFraction, extraSurveyConstraints, lightCurvePolyOrder, logLevel) = cu.read_in_survey_parameters( log, pathToSettingsFile=pathToSettingsFile) logFilePath = pathToOutputDirectory + "/qubits.log" del log log = _set_up_command_line_tool(level=str(logLevel), logFilePath=logFilePath) # dbConn, log = cu.settings( # pathToSettingsFile=pathToSettingsFile, # dbConn=False, # log=True # ) ## START LOGGING ## startTime = dcu.get_now_sql_datetime() log.info('--- STARTING TO RUN THE qubits AT %s' % (startTime, )) resultsDict = {} pathToOutputPlotDirectory = pathToOutputDirectory + "/plots/" dcu.dryx_mkdir(log, directoryPath=pathToOutputPlotDirectory) pathToResultsFolder = pathToOutputDirectory + "/results/" dcu.dryx_mkdir(log, directoryPath=pathToResultsFolder) # GENERATE THE DATA FOR SIMULATIONS if programSettings['Extract Lightcurves from Spectra']: log.info('generating the Lightcurves') dg.generate_model_lightcurves( log=log, pathToSpectralDatabase=pathToSpectralDatabase, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, explosionDaysFromSettings=explosionDaysFromSettings, extendLightCurveTail=extendLightCurveTail, polyOrder=lightCurvePolyOrder) if programSettings['Generate KCorrection Database']: log.info('generating the kcorrection data') dg.generate_kcorrection_listing_database( log, pathToOutputDirectory=pathToOutputDirectory, pathToSpectralDatabase=pathToSpectralDatabase, restFrameFilter=restFrameFilter, temporalResolution=kCorrectionTemporalResolution, redshiftResolution=redshiftResolution, redshiftLower=lowerRedshiftLimit, redshiftUpper=upperRedshiftLimit + redshiftResolution) log.info('generating the kcorrection polynomials') dg.generate_kcorrection_polynomial_database( log, pathToOutputDirectory=pathToOutputDirectory, restFrameFilter=restFrameFilter, kCorPolyOrder=kCorPolyOrder, # ORDER OF THE POLYNOMIAL TO FIT kCorMinimumDataPoints=kCorMinimumDataPoints, redshiftResolution=redshiftResolution, redshiftLower=lowerRedshiftLimit, redshiftUpper=upperRedshiftLimit + redshiftResolution, plot=programSettings['Generate KCorrection Plots']) if programSettings['Run the Simulation']: # CREATE THE OBSERVABLE UNIVERSE! log.info('generating the redshift array') redshiftArray = u.random_redshift_array( log, sampleNumber, lowerRedshiftLimit, upperRedshiftLimit, redshiftResolution=redshiftResolution, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings['Plot Simulation Helper Plots']) resultsDict['Redshifts'] = redshiftArray.tolist() log.info('generating the SN type array') snTypesArray = u.random_sn_types_array( log, sampleNumber, relativeSNRates, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings['Plot Simulation Helper Plots']) resultsDict['SN Types'] = snTypesArray.tolist() log.info('generating peak magnitudes for the SNe') peakMagnitudesArray = u.random_peak_magnitudes( log, peakMagnitudeDistributions, snTypesArray, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the SN host extictions array') hostExtinctionArray = u.random_host_extinction( log, sampleNumber, extinctionType, extinctionConstant, hostExtinctionDistributions, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the SN galactic extictions array') galacticExtinctionArray = u.random_galactic_extinction( log, sampleNumber, extinctionType, extinctionConstant, galacticExtinctionDistribution, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the raw lightcurves for the SNe') rawLightCurveDict = u.generate_numpy_polynomial_lightcurves( log, snLightCurves=snLightCurves, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the k-correction array for the SNe') kCorrectionArray = u.build_kcorrection_array( log, redshiftArray, snTypesArray, snLightCurves, pathToOutputDirectory=pathToOutputDirectory, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the observed lightcurves for the SNe') observedFrameLightCurveInfo, peakAppMagList = u.convert_lightcurves_to_observered_frame( log, snLightCurves=snLightCurves, rawLightCurveDict=rawLightCurveDict, redshiftArray=redshiftArray, snTypesArray=snTypesArray, peakMagnitudesArray=peakMagnitudesArray, kCorrectionArray=kCorrectionArray, hostExtinctionArray=hostExtinctionArray, galacticExtinctionArray=galacticExtinctionArray, restFrameFilter=restFrameFilter, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, polyOrder=lightCurvePolyOrder, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the survey observation cadence') cadenceDictionary = ss.survey_cadence_arrays( log, surveyCadenceSettings, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings['Plot Simulation Helper Plots']) log.info('determining if the SNe are discoverable by the survey') discoverableList = ss.determine_if_sne_are_discoverable( log, redshiftArray=redshiftArray, limitingMags=limitingMags, observedFrameLightCurveInfo=observedFrameLightCurveInfo, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings['Plot Simulation Helper Plots']) log.info( 'determining the day (if and) when each SN is first discoverable by the survey' ) ripeDayList = ss.determine_when_sne_are_ripe_for_discovery( log, redshiftArray=redshiftArray, limitingMags=limitingMags, discoverableList=discoverableList, observedFrameLightCurveInfo=observedFrameLightCurveInfo, plot=programSettings['Plot Simulation Helper Plots']) # log.info('determining the day when each SN is disappears fainter than the survey limiting mags') # disappearDayList = determine_when_discovered_sne_disappear( # log, # redshiftArray=redshiftArray, # limitingMags=limitingMags, # ripeDayList=ripeDayList, # observedFrameLightCurveInfo=observedFrameLightCurveInfo, # plot=programSettings['Plot Simulation Helper Plots']) log.info('determining if and when each SN is discovered by the survey') lightCurveDiscoveryDayList, surveyDiscoveryDayList, snCampaignLengthList = ss.determine_if_sne_are_discovered( log, limitingMags=limitingMags, ripeDayList=ripeDayList, cadenceDictionary=cadenceDictionary, observedFrameLightCurveInfo=observedFrameLightCurveInfo, extraSurveyConstraints=extraSurveyConstraints, plot=programSettings['Plot Simulation Helper Plots']) resultsDict[ 'Discoveries Relative to Peak Magnitudes'] = lightCurveDiscoveryDayList resultsDict[ 'Discoveries Relative to Survey Year'] = surveyDiscoveryDayList resultsDict['Campaign Length'] = snCampaignLengthList resultsDict['Cadence Dictionary'] = cadenceDictionary resultsDict['Peak Apparent Magnitudes'] = peakAppMagList now = datetime.now() now = now.strftime("%Y%m%dt%H%M%S") fileName = pathToOutputDirectory + \ "simulation_results_%s.yaml" % (now,) stream = file(fileName, 'w') yamlContent = dict(allSettings.items() + resultsDict.items()) yaml.dump(yamlContent, stream, default_flow_style=False) stream.close() # COMPILE AND PLOT THE RESULTS if programSettings['Compile and Plot Results']: pathToYamlFile = pathToOutputDirectory + \ programSettings['Simulation Results File Used for Plots'] result_log = r.log_the_survey_settings(log, pathToYamlFile) snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList = r.import_results( log, pathToYamlFile) snRatePlotLink, totalRate, tooFaintRate, shortCampaignRate = r.determine_sn_rate( log, lightCurveDiscoveryTimes, snSurveyDiscoveryTimes, redshifts, surveyCadenceSettings=surveyCadenceSettings, lowerRedshiftLimit=lowerRedshiftLimit, upperRedshiftLimit=upperRedshiftLimit, redshiftResolution=redshiftResolution, surveyArea=surveyArea, CCSNRateFraction=CCSNRateFraction, transientToCCSNRateFraction=transientToCCSNRateFraction, peakAppMagList=peakAppMagList, snCampaignLengthList=snCampaignLengthList, extraSurveyConstraints=extraSurveyConstraints, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += """ ## Results ## This simulated survey discovered a total of **%s** transients per year. An extra **%s** transients were detected but deemed too faint to constrain a positive transient identification and a further **%s** transients where detected but an observational campaign of more than **%s** days could not be completed to ensure identification. See below for the various output plots. """ % ( totalRate, tooFaintRate, shortCampaignRate, extraSurveyConstraints["Observable for at least ? number of days"]) cadenceWheelLink = r.plot_cadence_wheel( log, cadenceDictionary, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += """%s""" % (cadenceWheelLink, ) discoveryMapLink = r.plot_sn_discovery_map( log, snSurveyDiscoveryTimes, peakAppMagList, snCampaignLengthList, redshifts, extraSurveyConstraints, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += """%s""" % (discoveryMapLink, ) ratioMapLink = r.plot_sn_discovery_ratio_map( log, snSurveyDiscoveryTimes, redshifts, peakAppMagList, snCampaignLengthList, extraSurveyConstraints, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += """%s""" % (ratioMapLink, ) result_log += """%s""" % (snRatePlotLink, ) now = datetime.now() now = now.strftime("%Y%m%dt%H%M%S") mdLogPath = pathToResultsFolder + \ "simulation_result_log_%s.md" % (now,) mdLog = open(mdLogPath, 'w') mdLog.write(result_log) mdLog.close() dmd.convert_to_html(log=log, pathToMMDFile=mdLogPath, css="amblin") # if dbConn: # dbConn.commit() # dbConn.close() ## FINISH LOGGING ## endTime = dcu.get_now_sql_datetime() runningTime = dcu.calculate_time_difference(startTime, endTime) log.info('-- FINISHED ATTEMPT TO RUN THE qubits AT %s (RUNTIME: %s) --' % ( endTime, runningTime, )) # TEST THE ARGUMENTS ## VARIABLES ## return None
def main(arguments=None): """ *The main function used when ``xy_scatter.py`` is run as a single script from the cl, or when installed as a cl command* """ ########## IMPORTS ########## ## STANDARD LIB ## ## THIRD PARTY ## ## LOCAL APPLICATION ## ## ACTIONS BASED ON WHICH ARGUMENTS ARE RECIEVED ## # PRINT COMMAND-LINE USAGE IF NO ARGUMENTS PASSED if arguments == None: arguments = docopt(__doc__) # SETUP LOGGER -- DEFAULT TO CONSOLE LOGGER IF NONE PROVIDED IN SETTINGS if 'settings' in locals() and "logging settings" in settings: log = dl.setup_dryx_logging( yaml_file=arguments["--settingsFile"] ) elif "--logger" not in arguments or arguments["--logger"] is None: log = dl.console_logger( level="DEBUG" ) log.debug('logger setup') # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): varname = arg.replace("--", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) ## START LOGGING ## startTime = dcu.get_now_sql_datetime() log.info( '--- STARTING TO RUN THE xy_scatter.py AT %s' % (startTime,)) # if "axisLabels" not in globals(): # axisLabels = False # if "title" not in globals(): # title = False # if "dataLabels" not in globals(): # dataLabels = False # call the worker function xy_scatter( log=log, x=x, y=y, axisLabels=axisLabels, title=title, dataLabels=dataLabels, colors=colors ) if "dbConn" in locals() and dbConn: dbConn.commit() dbConn.close() ## FINISH LOGGING ## endTime = dcu.get_now_sql_datetime() runningTime = dcu.calculate_time_difference(startTime, endTime) log.info( '-- FINISHED ATTEMPT TO RUN THE xy_scatter.py AT %s (RUNTIME: %s) --' % (endTime, runningTime, )) return
def main(arguments=None): """ *The main function used when ``update_git_repos.py`` is run as a single script from the cl, or when installed as a cl command* """ ########## IMPORTS ########## ## STANDARD LIB ## ## THIRD PARTY ## ## LOCAL APPLICATION ## import dryxPython.commonutils as dcu ## ACTIONS BASED ON WHICH ARGUMENTS ARE RECIEVED ## # PRINT COMMAND-LINE USAGE IF NO ARGUMENTS PASSED if arguments == None: arguments = docopt(__doc__) # UNPACK SETTINGS if "--settingsFile" in arguments and arguments["--settingsFile"]: import yaml stream = file(arguments["--settingsFile"], 'r') settings = yaml.load(stream) stream.close() # SETUP LOGGER -- DEFAULT TO CONSOLE LOGGER IF NONE PROVIDED IN SETTINGS if 'settings' in locals() and "logging settings" in settings: log = dl.setup_dryx_logging( yaml_file=arguments["--settingsFile"] ) elif "--logger" not in arguments or arguments["--logger"] is None: log = dl.console_logger( level="DEBUG" ) log.debug('logger setup') # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): varname = arg.replace("--", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) ## START LOGGING ## startTime = dcu.get_now_sql_datetime() log.info( '--- STARTING TO RUN THE git_update_script.py AT %s' % (startTime,)) # call the worker function # x-if-settings-or-database-credientials if "git repos" in settings: for repo in settings["git repos"]: log.debug('repo["path"]: %s' % (repo["path"],)) log.debug('repo["branchToUpdate"]: %s' % (repo["branchToUpdate"],)) update_git_repos( log=log, gitProjectRoot=repo["path"], branchToUpdate=repo["branchToUpdate"] ) ## FINISH LOGGING ## endTime = dcu.get_now_sql_datetime() runningTime = dcu.calculate_time_difference(startTime, endTime) log.info( '-- FINISHED ATTEMPT TO RUN THE git_update_script.py AT %s (RUNTIME: %s) --' % (endTime, runningTime, )) return