Пример #1
0
def main():
    """
    *The main function used when ``plotting`` run as a single script from the cl*
    """
    ################ > IMPORTS ################
    ## STANDARD LIB ##
    ## THIRD PARTY ##
    ## LOCAL APPLICATION ##
    import dryxPython.commonutils as cu

    dbConn, log = settings(
        dbConn=False,
        log=True)

    ## START LOGGING ##
    startTime = cu.get_now_sql_datetime()
    log.info('--- STARTING TO RUN THE plotting AT %s' % (startTime,))

    # WRITE CODE HERE

    if dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = cu.get_now_sql_datetime()
    runningTime = cu.calculate_time_difference(startTime, endTime)
    log.info('-- FINISHED ATTEMPT TO RUN THE plotting AT %s (RUNTIME: %s) --' %
             (endTime, runningTime, ))

    return
def main(arguments=None):
    """
    *The main function used when ``add_mavericks_tags_to_voodoopad.py`` is run as a single script from the cl, or when installed as a cl command*
    """
    ########## IMPORTS ##########
    ## STANDARD LIB ##
    ## THIRD PARTY ##
    ## LOCAL APPLICATION ##

    ## ACTIONS BASED ON WHICH ARGUMENTS ARE RECIEVED ##
    # PRINT COMMAND-LINE USAGE IF NO ARGUMENTS PASSED
    if arguments == None:
        arguments = docopt(__doc__)

    # SETUP LOGGER -- DEFAULT TO CONSOLE LOGGER IF NONE PROVIDED IN SETTINGS
    if "--logger" not in arguments or arguments["--logger"] is None:
        log = dl.console_logger(
            level="DEBUG"
        )
        log.debug('logger setup')

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        varname = arg.replace("--", "")
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val,))
        else:
            exec(varname + " = %s" % (val,))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (varname, val,))

    ## START LOGGING ##
    startTime = dcu.get_now_sql_datetime()
    log.info(
        '--- STARTING TO RUN THE add_mavericks_tags_to_voodoopad.py AT %s' %
        (startTime,))

    # call the worker function
    add_mavericks_tags_to_voodoopad(
        log=log,
        pathToVpspotlight=pathToVpspotlight,
    )

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = dcu.get_now_sql_datetime()
    runningTime = dcu.calculate_time_difference(startTime, endTime)
    log.info(
        '-- FINISHED ATTEMPT TO RUN THE add_mavericks_tags_to_voodoopad.py AT %s (RUNTIME: %s) --' %
        (endTime, runningTime, ))

    return
Пример #3
0
def main():
    """
    *Used for debugging

    Key Arguments:
        -
        - dbConn -- mysql database connection
        - log -- logger

    Return:
        - None*
    """
    ################ > IMPORTS ################
    ## STANDARD LIB ##
    ## THIRD PARTY ##
    ## LOCAL APPLICATION ##
    import pesstoMarshallPythonPath as pp
    pp.set_python_path()
    import pmCommonUtils as p
    import dryxPython.commonutils as cu

    ################ > SETUP ##################
    # SETUP DB CONNECTION AND A LOGGER
    dbConn, log = p.settings()
    ## START LOGGING ##
    startTime = cu.get_now_sql_datetime()
    log.info('--- STARTING TO RUN THE fitstools AT %s' % (startTime,))

    ################ > VARIABLE SETTINGS ######
    ################ >ACTION(S) ###############

    dbConn.commit()
    dbConn.close()
    ## FINISH LOGGING ##
    endTime = cu.get_now_sql_datetime()
    runningTime = cu.calculate_time_difference(startTime, endTime)
    log.info('-- FINISHED ATTEMPT TO RUN THE fitstools AT %s (RUNTIME: %s) --' %
             (endTime, runningTime,))
    return
Пример #4
0
def main():
    """
    *The main function used when ``csvtools.py`` run as a single script from the cl*
    """
    ########## PRE-IMPORT SETUP ##########
    relativePathToProjectRoot = "../../../"
    import dryxPython.projectsetup as dps
    projectSetup = dps.projectSetup(
        dbConn=False,
        relativePathToProjectRoot=relativePathToProjectRoot
    )
    global settings, contentPaths
    dbConn, log, settings, contentPaths = projectSetup.get_project_atrributes()

    ########## IMPORTS ##########
    ## STANDARD LIB ##
    ## THIRD PARTY ##
    ## LOCAL APPLICATION ##
    import dryxPython.commonutils as cu

    ## START LOGGING ##
    startTime = cu.get_now_sql_datetime()
    log.info('--- STARTING TO RUN THE csvtools.py AT %s' % (startTime,))

    # SET GLOBAL VARIABLES

    # WRITE CODE HERE

    if dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = cu.get_now_sql_datetime()
    runningTime = cu.calculate_time_difference(startTime, endTime)
    log.info(
        '-- FINISHED ATTEMPT TO RUN THE csvtools.py AT %s (RUNTIME: %s) --' %
        (endTime, runningTime, ))

    return
Пример #5
0
def main(arguments=None):
    """
    The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command
    """
    # setup the command-line util settings
    su = setup_main_clutil(
        arguments=arguments,
        docString=__doc__,
        logLevel="DEBUG",
        options_first=False,
        projectName="sherlock"
    )
    arguments, settings, log, dbConn = su.setup()

    # tab completion for raw_input
    readline.set_completer_delims(' \t\n;')
    readline.parse_and_bind("tab: complete")
    readline.set_completer(tab_complete)

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val,))
        else:
            exec(varname + " = %s" % (val,))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (varname, val,))

    ## START LOGGING ##
    startTime = dcu.get_now_sql_datetime()
    log.debug(
        '--- STARTING TO RUN THE cl_utils.py AT %s' %
        (startTime,))

    # set options interactively if user requests
    if "interactiveFlag" in locals() and interactiveFlag:

        # load previous settings
        moduleDirectory = os.path.dirname(__file__) + "/resources"
        pathToPickleFile = "%(moduleDirectory)s/previousSettings.p" % locals()
        try:
            with open(pathToPickleFile):
                pass
            previousSettingsExist = True
        except:
            previousSettingsExist = False
        previousSettings = {}
        if previousSettingsExist:
            previousSettings = pickle.load(open(pathToPickleFile, "rb"))

        # x-raw-input
        # x-boolean-raw-input
        # x-raw-input-with-default-value-from-previous-settings

        # save the most recently used requests
        pickleMeObjects = []
        pickleMe = {}
        theseLocals = locals()
        for k in pickleMeObjects:
            pickleMe[k] = theseLocals[k]
        pickle.dump(pickleMe, open(pathToPickleFile, "wb"))

    # call the worker function
    # x-if-settings-or-database-credientials

    if match:
        sherlock = classifier(
            log=log,
            settings=settings,
            update=updateFlag,
            transientIdList=[]
        )
        sherlock.get()
    if clean:
        cleaner = cleanup_database_tables(
            log=log,
            settings=settings
        )
        cleaner.get()
    if wiki:
        updateWiki = update_wiki_pages(
            log=log,
            settings=settings
        )
        updateWiki.get()

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = dcu.get_now_sql_datetime()
    runningTime = dcu.calculate_time_difference(startTime, endTime)
    log.debug('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
              (endTime, runningTime, ))

    return
Пример #6
0
def main(arguments=None):
    """
    *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
    """
    # setup the command-line util settings
    su = tools(arguments=arguments,
               docString=__doc__,
               logLevel="WARNING",
               options_first=False,
               projectName="qubits")
    arguments, settings, log, dbConn = su.setup()

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
        if varname == "import":
            varname = "iimport"
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val, ))
        else:
            exec(varname + " = %s" % (val, ))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (
            varname,
            val,
        ))

    ## START LOGGING ##
    startTime = times.get_now_sql_datetime()
    log.info('--- STARTING TO RUN THE cl_utils.py AT %s' % (startTime, ))

    if init:
        from . import workspace
        ws = workspace(log=log, pathToWorkspace=pathToWorkspace)
        ws.setup()
        return

    # IMPORT THE SIMULATION SETTINGS
    (allSettings, programSettings, limitingMags, sampleNumber,
     peakMagnitudeDistributions, explosionDaysFromSettings,
     extendLightCurveTail, relativeSNRates, lowerRedshiftLimit,
     upperRedshiftLimit, redshiftResolution, restFrameFilter,
     kCorrectionTemporalResolution, kCorPolyOrder, kCorMinimumDataPoints,
     extinctionType, extinctionConstant, hostExtinctionDistributions,
     galacticExtinctionDistribution, surveyCadenceSettings, snLightCurves,
     surveyArea, CCSNRateFraction, transientToCCSNRateFraction,
     extraSurveyConstraints, lightCurvePolyOrder,
     logLevel) = cu.read_in_survey_parameters(
         log, pathToSettingsFile=pathToSettingsFile)

    logFilePath = pathToOutputDirectory + "/qubits.log"
    del log
    log = _set_up_command_line_tool(level=str(logLevel),
                                    logFilePath=logFilePath)

    # dbConn, log = cu.settings(
    #     pathToSettingsFile=pathToSettingsFile,
    #     dbConn=False,
    #     log=True
    # )

    ## START LOGGING ##
    startTime = dcu.get_now_sql_datetime()
    log.info('--- STARTING TO RUN THE qubits AT %s' % (startTime, ))

    resultsDict = {}

    pathToOutputPlotDirectory = pathToOutputDirectory + "/plots/"
    dcu.dryx_mkdir(log, directoryPath=pathToOutputPlotDirectory)

    pathToResultsFolder = pathToOutputDirectory + "/results/"
    dcu.dryx_mkdir(log, directoryPath=pathToResultsFolder)

    if not programSettings[
            'Extract Lightcurves from Spectra'] and not programSettings[
                'Generate KCorrection Database'] and not programSettings[
                    'Run the Simulation'] and not programSettings[
                        'Compile and Plot Results']:
        print "All stages of the simulatation have been switched off. Please switch on at least one stage of the simulation under the 'Programming Settings' in the settings file `%(pathToSettingsFile)s`" % locals(
        )

    # GENERATE THE DATA FOR SIMULATIONS
    if programSettings['Extract Lightcurves from Spectra']:
        log.info('generating the Lightcurves')
        dg.generate_model_lightcurves(
            log=log,
            pathToSpectralDatabase=pathToSpectralDatabase,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            explosionDaysFromSettings=explosionDaysFromSettings,
            extendLightCurveTail=extendLightCurveTail,
            polyOrder=lightCurvePolyOrder)
        print "The lightcurve file can be found here: %(pathToOutputDirectory)stransient_light_curves.yaml" % locals(
        )
        print "The lightcurve plots can be found in %(pathToOutputPlotDirectory)s" % locals(
        )

    if programSettings['Generate KCorrection Database']:
        log.info('generating the kcorrection data')
        dg.generate_kcorrection_listing_database(
            log,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToSpectralDatabase=pathToSpectralDatabase,
            restFrameFilter=restFrameFilter,
            temporalResolution=kCorrectionTemporalResolution,
            redshiftResolution=redshiftResolution,
            redshiftLower=lowerRedshiftLimit,
            redshiftUpper=upperRedshiftLimit + redshiftResolution)
        log.info('generating the kcorrection polynomials')
        dg.generate_kcorrection_polynomial_database(
            log,
            pathToOutputDirectory=pathToOutputDirectory,
            restFrameFilter=restFrameFilter,
            kCorPolyOrder=kCorPolyOrder,  # ORDER OF THE POLYNOMIAL TO FIT
            kCorMinimumDataPoints=kCorMinimumDataPoints,
            redshiftResolution=redshiftResolution,
            redshiftLower=lowerRedshiftLimit,
            redshiftUpper=upperRedshiftLimit + redshiftResolution,
            plot=programSettings['Generate KCorrection Plots'])

        print "The k-correction database has been generated here: %(pathToOutputDirectory)sk_corrections" % locals(
        )
        if programSettings['Generate KCorrection Plots']:
            print "The k-correction polynomial plots can also be found in %(pathToOutputDirectory)sk_corrections" % locals(
            )

    if programSettings['Run the Simulation']:
        # CREATE THE OBSERVABLE UNIVERSE!
        log.info('generating the redshift array')
        redshiftArray = u.random_redshift_array(
            log,
            sampleNumber,
            lowerRedshiftLimit,
            upperRedshiftLimit,
            redshiftResolution=redshiftResolution,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])
        resultsDict['Redshifts'] = redshiftArray.tolist()

        log.info('generating the SN type array')
        snTypesArray = u.random_sn_types_array(
            log,
            sampleNumber,
            relativeSNRates,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])
        resultsDict['SN Types'] = snTypesArray.tolist()

        log.info('generating peak magnitudes for the SNe')
        peakMagnitudesArray = u.random_peak_magnitudes(
            log,
            peakMagnitudeDistributions,
            snTypesArray,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the SN host extictions array')
        hostExtinctionArray = u.random_host_extinction(
            log,
            sampleNumber,
            extinctionType,
            extinctionConstant,
            hostExtinctionDistributions,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the SN galactic extictions array')
        galacticExtinctionArray = u.random_galactic_extinction(
            log,
            sampleNumber,
            extinctionType,
            extinctionConstant,
            galacticExtinctionDistribution,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the raw lightcurves for the SNe')
        rawLightCurveDict = u.generate_numpy_polynomial_lightcurves(
            log,
            snLightCurves=snLightCurves,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the k-correction array for the SNe')
        kCorrectionArray = u.build_kcorrection_array(
            log,
            redshiftArray,
            snTypesArray,
            snLightCurves,
            pathToOutputDirectory=pathToOutputDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the observed lightcurves for the SNe')
        observedFrameLightCurveInfo, peakAppMagList = u.convert_lightcurves_to_observered_frame(
            log,
            snLightCurves=snLightCurves,
            rawLightCurveDict=rawLightCurveDict,
            redshiftArray=redshiftArray,
            snTypesArray=snTypesArray,
            peakMagnitudesArray=peakMagnitudesArray,
            kCorrectionArray=kCorrectionArray,
            hostExtinctionArray=hostExtinctionArray,
            galacticExtinctionArray=galacticExtinctionArray,
            restFrameFilter=restFrameFilter,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            polyOrder=lightCurvePolyOrder,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the survey observation cadence')
        cadenceDictionary = ss.survey_cadence_arrays(
            log,
            surveyCadenceSettings,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('determining if the SNe are discoverable by the survey')
        discoverableList = ss.determine_if_sne_are_discoverable(
            log,
            redshiftArray=redshiftArray,
            limitingMags=limitingMags,
            observedFrameLightCurveInfo=observedFrameLightCurveInfo,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info(
            'determining the day (if and) when each SN is first discoverable by the survey'
        )
        ripeDayList = ss.determine_when_sne_are_ripe_for_discovery(
            log,
            redshiftArray=redshiftArray,
            limitingMags=limitingMags,
            discoverableList=discoverableList,
            observedFrameLightCurveInfo=observedFrameLightCurveInfo,
            plot=programSettings['Plot Simulation Helper Plots'])

        # log.info('determining the day when each SN is disappears fainter than the survey limiting mags')
        # disappearDayList = determine_when_discovered_sne_disappear(
        #     log,
        #     redshiftArray=redshiftArray,
        #     limitingMags=limitingMags,
        #     ripeDayList=ripeDayList,
        #     observedFrameLightCurveInfo=observedFrameLightCurveInfo,
        #     plot=programSettings['Plot Simulation Helper Plots'])

        log.info('determining if and when each SN is discovered by the survey')
        lightCurveDiscoveryDayList, surveyDiscoveryDayList, snCampaignLengthList = ss.determine_if_sne_are_discovered(
            log,
            limitingMags=limitingMags,
            ripeDayList=ripeDayList,
            cadenceDictionary=cadenceDictionary,
            observedFrameLightCurveInfo=observedFrameLightCurveInfo,
            extraSurveyConstraints=extraSurveyConstraints,
            plot=programSettings['Plot Simulation Helper Plots'])

        resultsDict[
            'Discoveries Relative to Peak Magnitudes'] = lightCurveDiscoveryDayList
        resultsDict[
            'Discoveries Relative to Survey Year'] = surveyDiscoveryDayList
        resultsDict['Campaign Length'] = snCampaignLengthList
        resultsDict['Cadence Dictionary'] = cadenceDictionary
        resultsDict['Peak Apparent Magnitudes'] = peakAppMagList

        now = datetime.now()
        now = now.strftime("%Y%m%dt%H%M%S")
        fileName = pathToOutputDirectory + \
            "/simulation_results_%s.yaml" % (now,)
        stream = file(fileName, 'w')
        yamlContent = dict(allSettings.items() + resultsDict.items())
        yaml.dump(yamlContent, stream, default_flow_style=False)
        stream.close()

        print "The simulation output file can be found here: %(fileName)s. Remember to update your settings file 'Simulation Results File Used for Plots' parameter with this filename before compiling the results." % locals(
        )
        if programSettings['Plot Simulation Helper Plots']:
            print "The simulation helper-plots found in %(pathToOutputPlotDirectory)s" % locals(
            )

    # COMPILE AND PLOT THE RESULTS
    if programSettings['Compile and Plot Results']:
        pathToYamlFile = pathToOutputDirectory + "/" + \
            programSettings['Simulation Results File Used for Plots']
        result_log = r.log_the_survey_settings(log, pathToYamlFile)
        snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList = r.import_results(
            log, pathToYamlFile)
        snRatePlotLink, totalRate, tooFaintRate, shortCampaignRate = r.determine_sn_rate(
            log,
            lightCurveDiscoveryTimes,
            snSurveyDiscoveryTimes,
            redshifts,
            surveyCadenceSettings=surveyCadenceSettings,
            lowerRedshiftLimit=lowerRedshiftLimit,
            upperRedshiftLimit=upperRedshiftLimit,
            redshiftResolution=redshiftResolution,
            surveyArea=surveyArea,
            CCSNRateFraction=CCSNRateFraction,
            transientToCCSNRateFraction=transientToCCSNRateFraction,
            peakAppMagList=peakAppMagList,
            snCampaignLengthList=snCampaignLengthList,
            extraSurveyConstraints=extraSurveyConstraints,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """
## Results ##

This simulated survey discovered a total of **%s** transients per year. An extra **%s** transients were detected but deemed too faint to constrain a positive transient identification and a further **%s** transients where detected but an observational campaign of more than **%s** days could not be completed to ensure identification. See below for the various output plots.

        """ % (
            totalRate, tooFaintRate, shortCampaignRate,
            extraSurveyConstraints["Observable for at least ? number of days"])
        cadenceWheelLink = r.plot_cadence_wheel(
            log,
            cadenceDictionary,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """%s""" % (cadenceWheelLink, )
        discoveryMapLink = r.plot_sn_discovery_map(
            log,
            snSurveyDiscoveryTimes,
            peakAppMagList,
            snCampaignLengthList,
            redshifts,
            extraSurveyConstraints,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """%s""" % (discoveryMapLink, )
        ratioMapLink = r.plot_sn_discovery_ratio_map(
            log,
            snSurveyDiscoveryTimes,
            redshifts,
            peakAppMagList,
            snCampaignLengthList,
            extraSurveyConstraints,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """%s""" % (ratioMapLink, )
        result_log += """%s""" % (snRatePlotLink, )

        now = datetime.now()
        now = now.strftime("%Y%m%dt%H%M%S")
        mdLogPath = pathToResultsFolder + \
            "simulation_result_log_%s.md" % (now,)
        mdLog = open(mdLogPath, 'w')
        mdLog.write(result_log)
        mdLog.close()

        dmd.convert_to_html(log=log, pathToMMDFile=mdLogPath, css="amblin")

        print "Results can be found here: %(pathToResultsFolder)s" % locals()
        html = mdLogPath.replace(".md", ".html")
        print "Open this file in your browser: %(html)s" % locals()

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = times.get_now_sql_datetime()
    runningTime = times.calculate_time_difference(startTime, endTime)
    log.info(
        '-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' % (
            endTime,
            runningTime,
        ))

    return
Пример #7
0
def main(arguments=None):
    """
    The main function used when ``importers_clutils.py`` is run as a single script from the cl, or when installed as a cl command
    """
    # setup the command-line util settings
    su = setup_main_clutil(
        arguments=arguments, docString=__doc__, logLevel="WARNING", options_first=False, projectName="sherlock"
    )
    arguments, settings, log, dbConn = su.setup()

    # tab completion for raw_input
    readline.set_completer_delims(" \t\n;")
    readline.parse_and_bind("tab: complete")
    readline.set_completer(tab_complete)

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val,))
        else:
            exec(varname + " = %s" % (val,))
        if arg == "--dbConn":
            dbConn = val
        log.debug("%s = %s" % (varname, val))

    ## START LOGGING ##
    startTime = dcu.get_now_sql_datetime()
    log.info("--- STARTING TO RUN THE importers_clutils.py AT %s" % (startTime,))

    # set options interactively if user requests
    if "interactiveFlag" in locals() and interactiveFlag:

        # load previous settings
        moduleDirectory = os.path.dirname(__file__) + "/resources"
        pathToPickleFile = "%(moduleDirectory)s/previousSettings.p" % locals()
        try:
            with open(pathToPickleFile):
                pass
            previousSettingsExist = True
        except:
            previousSettingsExist = False
        previousSettings = {}
        if previousSettingsExist:
            previousSettings = pickle.load(open(pathToPickleFile, "rb"))

        # x-raw-input
        # x-boolean-raw-input
        # x-raw-input-with-default-value-from-previous-settings

        # save the most recently used requests
        pickleMeObjects = []
        pickleMe = {}
        theseLocals = locals()
        for k in pickleMeObjects:
            pickleMe[k] = theseLocals[k]
        pickle.dump(pickleMe, open(pathToPickleFile, "wb"))

    # call the worker function
    # x-if-settings-or-database-credientials
    if cat:
        if cat_name == "milliquas":
            testObject = milliquasImporter(
                log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name
            )
            testObject.get()
        if cat_name == "veron":
            testObject = veronImporter(
                log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name
            )
            testObject.get()
        if "sdss" in cat_name:
            testObject = sdssImporter(
                log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name
            )
            testObject.get()
        if "ned_d" in cat_name:
            testObject = nedImporter(
                log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name
            )
            testObject.get()

    elif stream:
        if "pessto" in stream_name:
            testObject = pesstoImporter(
                log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name
            )
            testObject.get()
        if "ifs" in stream_name:
            testObject = ifsImporter(
                log=log, settings=settings, pathToDataFile=pathToDataFile, version=cat_version, catalogueName=cat_name
            )
            testObject.get()

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = dcu.get_now_sql_datetime()
    runningTime = dcu.calculate_time_difference(startTime, endTime)
    log.info("-- FINISHED ATTEMPT TO RUN THE importers_clutils.py AT %s (RUNTIME: %s) --" % (endTime, runningTime))

    return
def main(arguments=None):
    """
    *The main function used when ``createpythonpackage.py`` is run as a single script from the cl, or when installed as a cl command*
    """
    ########## IMPORTS ##########
    ## STANDARD LIB ##
    ## THIRD PARTY ##
    ## LOCAL APPLICATION ##

    ## ACTIONS BASED ON WHICH ARGUMENTS ARE RECIEVED ##
    # PRINT COMMAND-LINE USAGE IF NO ARGUMENTS PASSED
    if arguments == None:
        arguments = docopt(__doc__)

    # x-unpackge-settings-in-main-function
    # SETUP LOGGER -- DEFAULT TO CONSOLE LOGGER IF NONE PROVIDED IN SETTINGS
    if 'settings' in locals() and "logging settings" in settings:
        log = dl.setup_dryx_logging(
            yaml_file=arguments["--settingsFile"]
        )
    elif "--logger" not in arguments or arguments["--logger"] is None:
        log = dl.console_logger(
            level="WARNING"
        )
        log.debug('logger setup')
    # x-setup-database-connection-in-main-function

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        varname = arg.replace("--", "")
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val,))
        else:
            exec(varname + " = %s" % (val,))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (varname, val,))

    ## START LOGGING ##
    startTime = dcu.get_now_sql_datetime()
    log.info(
        '--- STARTING TO RUN THE createpythonpackage.py AT %s' %
        (startTime,))

    log.debug('locals(): %s' % (locals(),))

    # call the worker function
    if "packageName" in locals() and packageName and "location" in locals():
        createpythonpackage(
            log=log,
            packageName=packageName,
            location=location,
        )
    elif "subPackageName" in locals() and subPackageName:
        createpythonsubpackage(
            log=log,
            subPackageName=subPackageName,
            pathToHostDirectory=pathToHostDirectory,
        )
    elif "moduleName" in locals() and moduleName:
        createpythonmodule(
            log=log,
            moduleName=moduleName,
            pathToHostDirectory=pathToHostDirectory,
        )

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = dcu.get_now_sql_datetime()
    runningTime = dcu.calculate_time_difference(startTime, endTime)
    log.info(
        '-- FINISHED ATTEMPT TO RUN THE createpythonpackage.py AT %s (RUNTIME: %s) --' %
        (endTime, runningTime, ))

    return
Пример #9
0
def qubits(clArgs=None):
    """
    *qubits
    ======================
    :Summary:
        The main MCS project file.
        A Monte Carlo Simulator of a PS1 supernova survey
        Many parameters can be set and customised in the yaml settings (see settings file)

    :Author:
        David Young

    :Date Created:
        April 18, 2013

    :dryx syntax:
        - ``xxx`` = come back here and do some more work
        - ``_someObject`` = a 'private' object that should only be changed for debugging

    :Notes:
        - If you have any questions requiring this script please email me: [email protected]

    Usage:
        qubits -s <pathToSettingsFile> -o <pathToOutputDirectory> -d <pathToSpectralDatabase>

        -h, --help      show this help message
        -v, --version   print version
        -s, --settings  provide a path to the settings file
        -d, --database  provide the path to the root directory containing your nested-folders and files spectral database
        -o, --output    provide a path to an output directory for the results of the simulations*
    """

    ################ > IMPORTS ################
    ## STANDARD LIB ##
    import sys
    import os
    from datetime import datetime, date, time
    ## THIRD PARTY ##
    from docopt import docopt
    import yaml
    ## LOCAL APPLICATION ##
    from . import commonutils as cu
    from . import surveysim as ss
    from . import datagenerator as dg
    from . import results as r
    import dryxPython.commonutils as dcu
    from . import universe as u
    import dryxPython.mmd.mmd as dmd

    # SETUP AN EMPTY LOGGER (IF REQUIRED)
    log = _set_up_command_line_tool()
    if clArgs == None:
        clArgs = docopt(qubits.__doc__)

    pathToOutputDirectory = clArgs["<pathToOutputDirectory>"]
    pathToSettingsFile = clArgs["<pathToSettingsFile>"]
    pathToSpectralDatabase = clArgs["<pathToSpectralDatabase>"]

    pathToOutputDirectory = os.path.abspath(pathToOutputDirectory) + "/"
    pathToSettingsFile = os.path.abspath(pathToSettingsFile)
    pathToSpectralDatabase = os.path.abspath(pathToSpectralDatabase) + "/"

    # IMPORT THE SIMULATION SETTINGS
    (allSettings, programSettings, limitingMags, sampleNumber,
     peakMagnitudeDistributions, explosionDaysFromSettings,
     extendLightCurveTail, relativeSNRates, lowerRedshiftLimit,
     upperRedshiftLimit, redshiftResolution, restFrameFilter,
     kCorrectionTemporalResolution, kCorPolyOrder, kCorMinimumDataPoints,
     extinctionType, extinctionConstant, hostExtinctionDistributions,
     galacticExtinctionDistribution, surveyCadenceSettings, snLightCurves,
     surveyArea, CCSNRateFraction, transientToCCSNRateFraction,
     extraSurveyConstraints, lightCurvePolyOrder,
     logLevel) = cu.read_in_survey_parameters(
         log, pathToSettingsFile=pathToSettingsFile)

    logFilePath = pathToOutputDirectory + "/qubits.log"
    del log
    log = _set_up_command_line_tool(level=str(logLevel),
                                    logFilePath=logFilePath)

    # dbConn, log = cu.settings(
    #     pathToSettingsFile=pathToSettingsFile,
    #     dbConn=False,
    #     log=True
    # )

    ## START LOGGING ##
    startTime = dcu.get_now_sql_datetime()
    log.info('--- STARTING TO RUN THE qubits AT %s' % (startTime, ))

    resultsDict = {}

    pathToOutputPlotDirectory = pathToOutputDirectory + "/plots/"
    dcu.dryx_mkdir(log, directoryPath=pathToOutputPlotDirectory)

    pathToResultsFolder = pathToOutputDirectory + "/results/"
    dcu.dryx_mkdir(log, directoryPath=pathToResultsFolder)

    # GENERATE THE DATA FOR SIMULATIONS
    if programSettings['Extract Lightcurves from Spectra']:
        log.info('generating the Lightcurves')
        dg.generate_model_lightcurves(
            log=log,
            pathToSpectralDatabase=pathToSpectralDatabase,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            explosionDaysFromSettings=explosionDaysFromSettings,
            extendLightCurveTail=extendLightCurveTail,
            polyOrder=lightCurvePolyOrder)

    if programSettings['Generate KCorrection Database']:
        log.info('generating the kcorrection data')
        dg.generate_kcorrection_listing_database(
            log,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToSpectralDatabase=pathToSpectralDatabase,
            restFrameFilter=restFrameFilter,
            temporalResolution=kCorrectionTemporalResolution,
            redshiftResolution=redshiftResolution,
            redshiftLower=lowerRedshiftLimit,
            redshiftUpper=upperRedshiftLimit + redshiftResolution)
        log.info('generating the kcorrection polynomials')
        dg.generate_kcorrection_polynomial_database(
            log,
            pathToOutputDirectory=pathToOutputDirectory,
            restFrameFilter=restFrameFilter,
            kCorPolyOrder=kCorPolyOrder,  # ORDER OF THE POLYNOMIAL TO FIT
            kCorMinimumDataPoints=kCorMinimumDataPoints,
            redshiftResolution=redshiftResolution,
            redshiftLower=lowerRedshiftLimit,
            redshiftUpper=upperRedshiftLimit + redshiftResolution,
            plot=programSettings['Generate KCorrection Plots'])

    if programSettings['Run the Simulation']:
        # CREATE THE OBSERVABLE UNIVERSE!
        log.info('generating the redshift array')
        redshiftArray = u.random_redshift_array(
            log,
            sampleNumber,
            lowerRedshiftLimit,
            upperRedshiftLimit,
            redshiftResolution=redshiftResolution,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])
        resultsDict['Redshifts'] = redshiftArray.tolist()

        log.info('generating the SN type array')
        snTypesArray = u.random_sn_types_array(
            log,
            sampleNumber,
            relativeSNRates,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])
        resultsDict['SN Types'] = snTypesArray.tolist()

        log.info('generating peak magnitudes for the SNe')
        peakMagnitudesArray = u.random_peak_magnitudes(
            log,
            peakMagnitudeDistributions,
            snTypesArray,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the SN host extictions array')
        hostExtinctionArray = u.random_host_extinction(
            log,
            sampleNumber,
            extinctionType,
            extinctionConstant,
            hostExtinctionDistributions,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the SN galactic extictions array')
        galacticExtinctionArray = u.random_galactic_extinction(
            log,
            sampleNumber,
            extinctionType,
            extinctionConstant,
            galacticExtinctionDistribution,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the raw lightcurves for the SNe')
        rawLightCurveDict = u.generate_numpy_polynomial_lightcurves(
            log,
            snLightCurves=snLightCurves,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the k-correction array for the SNe')
        kCorrectionArray = u.build_kcorrection_array(
            log,
            redshiftArray,
            snTypesArray,
            snLightCurves,
            pathToOutputDirectory=pathToOutputDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the observed lightcurves for the SNe')
        observedFrameLightCurveInfo, peakAppMagList = u.convert_lightcurves_to_observered_frame(
            log,
            snLightCurves=snLightCurves,
            rawLightCurveDict=rawLightCurveDict,
            redshiftArray=redshiftArray,
            snTypesArray=snTypesArray,
            peakMagnitudesArray=peakMagnitudesArray,
            kCorrectionArray=kCorrectionArray,
            hostExtinctionArray=hostExtinctionArray,
            galacticExtinctionArray=galacticExtinctionArray,
            restFrameFilter=restFrameFilter,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            polyOrder=lightCurvePolyOrder,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the survey observation cadence')
        cadenceDictionary = ss.survey_cadence_arrays(
            log,
            surveyCadenceSettings,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('determining if the SNe are discoverable by the survey')
        discoverableList = ss.determine_if_sne_are_discoverable(
            log,
            redshiftArray=redshiftArray,
            limitingMags=limitingMags,
            observedFrameLightCurveInfo=observedFrameLightCurveInfo,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info(
            'determining the day (if and) when each SN is first discoverable by the survey'
        )
        ripeDayList = ss.determine_when_sne_are_ripe_for_discovery(
            log,
            redshiftArray=redshiftArray,
            limitingMags=limitingMags,
            discoverableList=discoverableList,
            observedFrameLightCurveInfo=observedFrameLightCurveInfo,
            plot=programSettings['Plot Simulation Helper Plots'])

        # log.info('determining the day when each SN is disappears fainter than the survey limiting mags')
        # disappearDayList = determine_when_discovered_sne_disappear(
        #     log,
        #     redshiftArray=redshiftArray,
        #     limitingMags=limitingMags,
        #     ripeDayList=ripeDayList,
        #     observedFrameLightCurveInfo=observedFrameLightCurveInfo,
        #     plot=programSettings['Plot Simulation Helper Plots'])

        log.info('determining if and when each SN is discovered by the survey')
        lightCurveDiscoveryDayList, surveyDiscoveryDayList, snCampaignLengthList = ss.determine_if_sne_are_discovered(
            log,
            limitingMags=limitingMags,
            ripeDayList=ripeDayList,
            cadenceDictionary=cadenceDictionary,
            observedFrameLightCurveInfo=observedFrameLightCurveInfo,
            extraSurveyConstraints=extraSurveyConstraints,
            plot=programSettings['Plot Simulation Helper Plots'])

        resultsDict[
            'Discoveries Relative to Peak Magnitudes'] = lightCurveDiscoveryDayList
        resultsDict[
            'Discoveries Relative to Survey Year'] = surveyDiscoveryDayList
        resultsDict['Campaign Length'] = snCampaignLengthList
        resultsDict['Cadence Dictionary'] = cadenceDictionary
        resultsDict['Peak Apparent Magnitudes'] = peakAppMagList

        now = datetime.now()
        now = now.strftime("%Y%m%dt%H%M%S")
        fileName = pathToOutputDirectory + \
            "simulation_results_%s.yaml" % (now,)
        stream = file(fileName, 'w')
        yamlContent = dict(allSettings.items() + resultsDict.items())
        yaml.dump(yamlContent, stream, default_flow_style=False)
        stream.close()

    # COMPILE AND PLOT THE RESULTS
    if programSettings['Compile and Plot Results']:
        pathToYamlFile = pathToOutputDirectory + \
            programSettings['Simulation Results File Used for Plots']
        result_log = r.log_the_survey_settings(log, pathToYamlFile)
        snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList = r.import_results(
            log, pathToYamlFile)
        snRatePlotLink, totalRate, tooFaintRate, shortCampaignRate = r.determine_sn_rate(
            log,
            lightCurveDiscoveryTimes,
            snSurveyDiscoveryTimes,
            redshifts,
            surveyCadenceSettings=surveyCadenceSettings,
            lowerRedshiftLimit=lowerRedshiftLimit,
            upperRedshiftLimit=upperRedshiftLimit,
            redshiftResolution=redshiftResolution,
            surveyArea=surveyArea,
            CCSNRateFraction=CCSNRateFraction,
            transientToCCSNRateFraction=transientToCCSNRateFraction,
            peakAppMagList=peakAppMagList,
            snCampaignLengthList=snCampaignLengthList,
            extraSurveyConstraints=extraSurveyConstraints,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """
## Results ##

This simulated survey discovered a total of **%s** transients per year. An extra **%s** transients were detected but deemed too faint to constrain a positive transient identification and a further **%s** transients where detected but an observational campaign of more than **%s** days could not be completed to ensure identification. See below for the various output plots.

        """ % (
            totalRate, tooFaintRate, shortCampaignRate,
            extraSurveyConstraints["Observable for at least ? number of days"])
        cadenceWheelLink = r.plot_cadence_wheel(
            log,
            cadenceDictionary,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """%s""" % (cadenceWheelLink, )
        discoveryMapLink = r.plot_sn_discovery_map(
            log,
            snSurveyDiscoveryTimes,
            peakAppMagList,
            snCampaignLengthList,
            redshifts,
            extraSurveyConstraints,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """%s""" % (discoveryMapLink, )
        ratioMapLink = r.plot_sn_discovery_ratio_map(
            log,
            snSurveyDiscoveryTimes,
            redshifts,
            peakAppMagList,
            snCampaignLengthList,
            extraSurveyConstraints,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """%s""" % (ratioMapLink, )
        result_log += """%s""" % (snRatePlotLink, )

        now = datetime.now()
        now = now.strftime("%Y%m%dt%H%M%S")
        mdLogPath = pathToResultsFolder + \
            "simulation_result_log_%s.md" % (now,)
        mdLog = open(mdLogPath, 'w')
        mdLog.write(result_log)
        mdLog.close()

        dmd.convert_to_html(log=log, pathToMMDFile=mdLogPath, css="amblin")

    # if dbConn:
    #     dbConn.commit()
    #     dbConn.close()
    ## FINISH LOGGING ##
    endTime = dcu.get_now_sql_datetime()
    runningTime = dcu.calculate_time_difference(startTime, endTime)
    log.info('-- FINISHED ATTEMPT TO RUN THE qubits AT %s (RUNTIME: %s) --' % (
        endTime,
        runningTime,
    ))

    # TEST THE ARGUMENTS

    ## VARIABLES ##

    return None
Пример #10
0
def convert_dictionary_to_mysql_table(
        dbConn,
        log,
        dictionary,
        dbTableName,
        uniqueKeyList=[],
        createHelperTables=False,
        dateModified=False,
        returnInsertOnly=False,
        replace=False):
    """ *Convert a python dictionary into a mysql table*

    NOTE: ADDED TO FUNDAMENTALS

    **Key Arguments:**
        - ``log`` -- logger
        - ``dictionary`` -- python dictionary
        - ``dbConn`` -- the db connection
        - ``dbTableName`` -- name of the table you wish to add the data to (or create if it does not exist)
        - ``uniqueKeyList`` - a lists column names that need combined to create the primary key
        - ``createHelperTables`` -- create some helper tables with the main table, detailing original keywords etc
        - ``returnInsertOnly`` -- returns only the insert command (does not execute it)
        - ``replace`` -- use replace instead of insert statement

    **Return:**
        - ``None`` """

    # # >IMPORTS ##
    import pymysql as mdb
    import re
    import yaml
    import time
    import datetime
    from dryxPython import commonutils as dcu
    # import ordereddict as c  # REMOVE WHEN PYTHON 2.7 INSTALLED ON PSDB
    import collections as c
    import dryxPython.mysql as dms

    log.debug('starting convert_dictionary_to_mysql_table')

    if replace:
        insertVerb = "INSERT"
    else:
        insertVerb = "INSERT IGNORE"

    if returnInsertOnly == False:
        # TEST THE ARGUMENTS
        if str(type(dbConn).__name__) != "Connection":
            message = 'Please use a valid MySQL DB connection.'
            log.critical(message)
            raise TypeError(message)

        if not isinstance(dictionary, dict):
            message = 'Please make sure "dictionary" argument is a dict type.'
            log.critical(message)
            raise TypeError(message)

        if not isinstance(uniqueKeyList, list):
            message = 'Please make sure "uniqueKeyList" is a list'
            log.critical(message)
            raise TypeError(message)

        for i in uniqueKeyList:
            if i not in dictionary.keys():
                message = 'Please make sure values in "uniqueKeyList" are present in the "dictionary" you are tring to convert'
                log.critical(message)
                raise ValueError(message)

        for k, v in dictionary.iteritems():
            # log.debug('k: %s, v: %s' % (k, v,))
            if isinstance(v, list) and len(v) != 2:
                message = 'Please make sure the list values in "dictionary" 2 items in length'
                log.critical("%s: in %s we have a %s (%s)" %
                             (message, k, v, type(v)))
                raise ValueError(message)
            if isinstance(v, list):
                if not (isinstance(v[0], str) or isinstance(v[0], int) or isinstance(v[0], bool) or isinstance(v[0], float) or isinstance(v[0], long) or isinstance(v[0], datetime.date) or v[0] == None):
                    message = 'Please make sure values in "dictionary" are of an appropriate value to add to the database, must be str, float, int or bool'
                    log.critical("%s: in %s we have a %s (%s)" %
                                 (message, k, v, type(v)))
                    raise ValueError(message)
            else:
                if not (isinstance(v, str) or isinstance(v, int) or isinstance(v, bool) or isinstance(v, float) or isinstance(v, long) or isinstance(v, unicode) or isinstance(v, datetime.date) or v == None):
                    this = type(v)
                    message = 'Please make sure values in "dictionary" are of an appropriate value to add to the database, must be str, float, int or bool : %(k)s is a %(this)s' % locals(
                    )
                    log.critical("%s: in %s we have a %s (%s)" %
                                 (message, k, v, type(v)))
                    raise ValueError(message)

        if not isinstance(createHelperTables, bool):
            message = 'Please make sure "createHelperTables" is a True or False'
            log.critical(message)
            raise TypeError(message)

        # TEST IF TABLE EXISTS
        tableExists = does_mysql_table_exist(dbConn, log, dbTableName)

        # CREATE THE TABLE IF IT DOES NOT EXIST
        if tableExists is False:
            sqlQuery = """
                CREATE TABLE `%(dbTableName)s`
                (`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
                PRIMARY KEY (`primaryId`))
                ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
            """ % locals()
            dms.execute_mysql_write_query(
                sqlQuery=sqlQuery,
                dbConn=dbConn,
                log=log
            )

    reFeedParserClass = re.compile('FeedParserDict')
    reDatetime = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}T')
    reTypeTime = re.compile('struct_time')
    qCreateColumn = ''
    formattedKey = ''
    formattedKeyList = []
    myValues = []

    # ADD EXTRA COLUMNS TO THE DICTIONARY
    dictionary['dateCreated'] = [
        str(dcu.get_now_sql_datetime()), "date row was created"]
    if dateModified:
        dictionary['dateModified'] = [
            str(dcu.get_now_sql_datetime()), "date row was modified"]

    # ITERATE THROUGH THE DICTIONARY AND GENERATE THE A TABLE COLUMN WITH THE
    # NAME OF THE KEY, IF IT DOES NOT EXIST
    count = len(dictionary)
    i = 1
    for (key, value) in dictionary.items():
        if (isinstance(value, list) and value[0] is None):
            del dictionary[key]
    # SORT THE DICTIONARY BY KEY
    odictionary = c.OrderedDict(sorted(dictionary.items()))
    for (key, value) in odictionary.iteritems():

        formattedKey = key.replace(" ", "_").replace("-", "_")
        # DEC A KEYWORD IN MYSQL - NEED TO CHANGE BEFORE INGEST
        if formattedKey == "dec":
            formattedKey = "decl"
        if formattedKey == "DEC":
            formattedKey = "DECL"

        formattedKeyList.extend([formattedKey])
        if len(key) > 0:
            # CONVERT LIST AND FEEDPARSER VALUES TO YAML (SO I CAN PASS IT AS A
            # STRING TO MYSQL)
            if isinstance(value, list) and (isinstance(value[0], list) or reFeedParserClass.search(str(type(value[0])))):
                value[0] = yaml.dump(value[0])
                value[0] = str(value[0])
            # REMOVE CHARACTERS THAT COLLIDE WITH MYSQL
            # if type(value[0]) == str or type(value[0]) == unicode:
            #     value[0] = value[0].replace('"', """'""")
            # JOIN THE VALUES TOGETHER IN A LIST - EASIER TO GENERATE THE MYSQL
            # COMMAND LATER
            if isinstance(value, str):
                value = value.replace('\\', '\\\\')
                value = value.replace('"', '\\"')
                try:
                    udata = value.decode("utf-8", "ignore")
                    value = udata.encode("ascii", "ignore")
                except:
                    log.error('cound not decode value %(value)s' % locals())

                # log.debug('udata: %(udata)s' % locals())

            if isinstance(value, unicode):
                value = value.replace('"', '\\"')
                value = value.encode("ascii", "ignore")

            if isinstance(value, list) and isinstance(value[0], unicode):
                myValues.extend(['%s' % value[0].strip()])
            elif isinstance(value, list):
                myValues.extend(['%s' % (value[0], )])
            else:
                myValues.extend(['%s' % (value, )])

            if returnInsertOnly == False:
                # CHECK IF COLUMN EXISTS YET
                colExists = \
                    "SELECT *\
                                    FROM information_schema.COLUMNS\
                                    WHERE TABLE_SCHEMA=DATABASE()\
                                        AND COLUMN_NAME='" \
                    + formattedKey + "'\
                                        AND TABLE_NAME='" + dbTableName + """'"""
                try:
                    # log.debug('checking if the column '+formattedKey+' exists
                    # in the '+dbTableName+' table')
                    rows = execute_mysql_read_query(
                        colExists,
                        dbConn,
                        log,
                    )
                except Exception as e:
                    log.error('something went wrong' + str(e) + '\n')

                # IF COLUMN DOESN'T EXIT - GENERATE IT
                if len(rows) == 0:
                    qCreateColumn = """ALTER TABLE %s ADD %s""" % (
                        dbTableName, formattedKey)
                    if not isinstance(value, list):
                        value = [value]
                    if reDatetime.search(str(value[0])):
                        # log.debug('Ok - a datetime string was found')
                        qCreateColumn += ' datetime DEFAULT NULL'
                    elif formattedKey == 'updated_parsed' or formattedKey == 'published_parsed' or formattedKey \
                            == 'feedName' or formattedKey == 'title':
                        qCreateColumn += ' varchar(100) DEFAULT NULL'
                    elif (isinstance(value[0], str) or isinstance(value[0], unicode)) and len(value[0]) < 30:
                        qCreateColumn += ' varchar(100) DEFAULT NULL'
                    elif (isinstance(value[0], str) or isinstance(value[0], unicode)) and len(value[0]) >= 30 and len(value[0]) < 80:
                        qCreateColumn += ' varchar(100) DEFAULT NULL'
                    elif isinstance(value[0], str) or isinstance(value[0], unicode):
                        columnLength = 450 + len(value[0]) * 2
                        qCreateColumn += ' varchar(' + str(
                            columnLength) + ') DEFAULT NULL'
                    elif isinstance(value[0], int) and abs(value[0]) <= 9:
                        qCreateColumn += ' tinyint DEFAULT NULL'
                    elif isinstance(value[0], int):
                        qCreateColumn += ' int DEFAULT NULL'
                    elif isinstance(value[0], float) or isinstance(value[0], long):
                        qCreateColumn += ' double DEFAULT NULL'
                    elif isinstance(value[0], bool):
                        qCreateColumn += ' tinyint DEFAULT NULL'
                    elif isinstance(value[0], list):
                        qCreateColumn += ' varchar(1024) DEFAULT NULL'
                    else:
                        # log.debug('Do not know what format to add this key in
                        # MySQL - removing from dictionary: %s, %s'
                                 # % (key, type(value[0])))
                        formattedKeyList.pop()
                        myValues.pop()
                        qCreateColumn = None
                    if qCreateColumn:
                        # ADD COMMENT TO GIVE THE ORGINAL KEYWORD IF formatted FOR
                        # MYSQL
                        if key is not formattedKey:
                            qCreateColumn += " COMMENT 'original keyword: " + \
                                key + """'"""
                        # CREATE THE COLUMN IF IT DOES NOT EXIST
                        try:
                            log.info('creating the ' +
                                     formattedKey + ' column in the ' + dbTableName + ' table')
                            message = execute_mysql_write_query(
                                qCreateColumn,
                                dbConn,
                                log,
                            )
                        except Exception as e:
                            # log.debug('qCreateColumn: %s' % (qCreateColumn,
                            # ))
                            log.error('could not create the ' + formattedKey + ' column in the ' + dbTableName
                                      + ' table -- ' + str(e) + '\n')

    if returnInsertOnly == False:
        # GENERATE THE INDEX NAME - THEN CREATE INDEX IF IT DOES NOT YET EXIST
        if len(uniqueKeyList):
            for i in range(len(uniqueKeyList)):
                uniqueKeyList[i] = uniqueKeyList[
                    i].replace(" ", "_").replace("-", "_")
                if uniqueKeyList[i] == "dec":
                    uniqueKeyList[i] = "decl"
                if uniqueKeyList[i] == "DEC":
                    uniqueKeyList[i] = "DECL"

            indexName = uniqueKeyList[0].replace(" ", "_").replace("-", "_")
            for i in range(len(uniqueKeyList) - 1):
                indexName += '_' + uniqueKeyList[i + 1]

            indexName = dcu.make_lowercase_nospace(indexName)
            rows = execute_mysql_read_query(
                """SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE TABLE_SCHEMA = DATABASE() AND
                                        TABLE_NAME = '"""
                + dbTableName + """' AND INDEX_NAME = '""" +
                indexName + """'""",
                dbConn,
                log,
            )
            exists = rows[0]['COUNT(*)']
            # log.debug('uniqueKeyList: %s' % (uniqueKeyList,))
            if exists == 0:
                if isinstance(uniqueKeyList, list):
                    uniqueKeyList = ','.join(uniqueKeyList)

                addUniqueKey = 'ALTER TABLE ' + dbTableName + \
                    ' ADD unique ' + indexName + """ (""" + uniqueKeyList + ')'
                # log.debug('HERE IS THE COMMAND:'+addUniqueKey)
                message = execute_mysql_write_query(
                    addUniqueKey,
                    dbConn,
                    log,
                )

    if returnInsertOnly == True:
        myKeys = ','.join(formattedKeyList)
        valueString = ("%s, " * len(myValues))[:-2]
        insertCommand = insertVerb + """ INTO `""" + dbTableName + \
            """` (""" + myKeys + """) VALUES (""" + valueString + """)"""
        mv = []
        mv[:] = [None if m == "None" else m for m in myValues]
        valueTuple = tuple(mv)

        dup = ""
        if replace:
            dup = " ON DUPLICATE KEY UPDATE "
            for k, v in zip(formattedKeyList, mv):
                dup = """%(dup)s %(k)s=values(%(k)s),""" % locals()
        dup = dup[:-1]

        insertCommand = insertCommand + dup

        return insertCommand, valueTuple

    # GENERATE THE INSERT COMMAND - IGNORE DUPLICATE ENTRIES
    myKeys = ','.join(formattedKeyList)
    myValues = '" ,"'.join(myValues)
    # log.debug(myValues+" ------ PRESTRIP")
    # REMOVE SOME CONVERSION NOISE
    myValues = myValues.replace('time.struct_time', '')
    myValues = myValues.replace(
        '- !!python/object/new:feedparser.FeedParserDict', '')
    myValues = myValues.replace(
        '!!python/object/new:feedparser.FeedParserDict', '')
    myValues = myValues.replace('dictitems:', '')
    myValues = myValues.replace('dictitems', '')
    myValues = myValues.replace('!!python/unicode:', '')
    myValues = myValues.replace('!!python/unicode', '')
    myValues = myValues.replace('"None"', 'null')
    myValues = myValues.replace('"None', 'null')

    if myValues[-4:] != 'null':
        myValues += '"'

    dup = ""
    if replace:
        dup = " ON DUPLICATE KEY UPDATE "
        dupValues = ('"' + myValues).split(" ,")
        dupKeys = myKeys.split(",")

        for k, v in zip(dupKeys, dupValues):
            dup = """%(dup)s %(k)s=%(v)s,""" % locals()
    dup = dup[:-1]

    # log.debug(myValues+" ------ POSTSTRIP")
    addValue = insertVerb + """ INTO `""" + dbTableName + \
        """` (""" + myKeys + """) VALUES (\"""" + \
        myValues + """) %(dup)s """ % locals()
    # log.debug(addValue)

    message = ""
    try:
        # log.debug('adding new data to the %s table; query: %s' %
        # (dbTableName, addValue))
        message = execute_mysql_write_query(
            addValue,
            dbConn,
            log
        )

    except Exception as e:
        log.error("could not add new data added to the table '" +
                  dbTableName + "' : " + str(e) + '\n')

    log.debug('finished convert_dictionary_to_mysql_table')

    return message
Пример #11
0
                feedURL, self._downloadDirectory, 0)
        except Exception, e:
            log.error("could not download %s xml file : %s" %
                      (rssFeedName, str(e)))
            return -1

        # INSTANTIATE THE XML FILE OBJECT
        xf = xml_file()
        xf.feedUrl = localUrl
        xf.rssFeedName = rssFeedName

        # CHANNEL ELEMENTS = TOP LEVEL XML FEED METADATA
        # GRAB THE DICTIONARY OF ELEMENTS AND CREATE/APPEND TO MYSQL TABLE
        xfce = xf.get_channel_elements()
        # ADD EXTRA COLUMNS TO THE DICTIONARY
        now = str(cu.get_now_sql_datetime())
        xfce['dateCreated'] = now
        xfce['dateLastModified'] = now
        xfce['dateLastRead'] = now
        xfce['rssFeedName'] = rssFeedName
        xfce['feedURL'] = feedURL
        xfce['rssFeedSource'] = rssFeedSource
        if(type(uniqueColumns) is list):  # I.E. DICTIONARY
            xfce['feedTableUniqueKeyName'] = "_".join(uniqueColumns)
            xfce['uniqueKeyCols'] = ",".join(uniqueColumns)
        else:
            xfce['feedTableUniqueKeyName'] = uniqueColumns
            xfce['uniqueKeyCols'] = uniqueColumns

        # APPEND TO MYSQL SUBSCRIPTION TABLE
        try:
Пример #12
0
def main(arguments=None):
    """
    *The main function used when ``xy_scatter.py`` is run as a single script from the cl, or when installed as a cl command*
    """
    ########## IMPORTS ##########
    ## STANDARD LIB ##
    ## THIRD PARTY ##
    ## LOCAL APPLICATION ##

    ## ACTIONS BASED ON WHICH ARGUMENTS ARE RECIEVED ##
    # PRINT COMMAND-LINE USAGE IF NO ARGUMENTS PASSED
    if arguments == None:
        arguments = docopt(__doc__)

    # SETUP LOGGER -- DEFAULT TO CONSOLE LOGGER IF NONE PROVIDED IN SETTINGS
    if 'settings' in locals() and "logging settings" in settings:
        log = dl.setup_dryx_logging(
            yaml_file=arguments["--settingsFile"]
        )
    elif "--logger" not in arguments or arguments["--logger"] is None:
        log = dl.console_logger(
            level="DEBUG"
        )
        log.debug('logger setup')

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        varname = arg.replace("--", "")
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val,))
        else:
            exec(varname + " = %s" % (val,))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (varname, val,))

    ## START LOGGING ##
    startTime = dcu.get_now_sql_datetime()
    log.info(
        '--- STARTING TO RUN THE xy_scatter.py AT %s' %
        (startTime,))

    # if "axisLabels" not in globals():
    #     axisLabels = False
    # if "title" not in globals():
    #     title = False
    # if "dataLabels" not in globals():
    #     dataLabels = False

    # call the worker function
    xy_scatter(
        log=log,
        x=x,
        y=y,
        axisLabels=axisLabels,
        title=title,
        dataLabels=dataLabels,
        colors=colors
    )

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = dcu.get_now_sql_datetime()
    runningTime = dcu.calculate_time_difference(startTime, endTime)
    log.info(
        '-- FINISHED ATTEMPT TO RUN THE xy_scatter.py AT %s (RUNTIME: %s) --' %
        (endTime, runningTime, ))

    return
Пример #13
0
def main(arguments=None):
    """
    *The main function used when ``update_git_repos.py`` is run as a single script from the cl, or when installed as a cl command*
    """
    ########## IMPORTS ##########
    ## STANDARD LIB ##
    ## THIRD PARTY ##
    ## LOCAL APPLICATION ##
    import dryxPython.commonutils as dcu

    ## ACTIONS BASED ON WHICH ARGUMENTS ARE RECIEVED ##
    # PRINT COMMAND-LINE USAGE IF NO ARGUMENTS PASSED
    if arguments == None:
        arguments = docopt(__doc__)

    # UNPACK SETTINGS
    if "--settingsFile" in arguments and arguments["--settingsFile"]:
        import yaml
        stream = file(arguments["--settingsFile"], 'r')
        settings = yaml.load(stream)
        stream.close()
    # SETUP LOGGER -- DEFAULT TO CONSOLE LOGGER IF NONE PROVIDED IN SETTINGS
    if 'settings' in locals() and "logging settings" in settings:
        log = dl.setup_dryx_logging(
            yaml_file=arguments["--settingsFile"]
        )
    elif "--logger" not in arguments or arguments["--logger"] is None:
        log = dl.console_logger(
            level="DEBUG"
        )
        log.debug('logger setup')

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        varname = arg.replace("--", "")
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val,))
        else:
            exec(varname + " = %s" % (val,))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (varname, val,))

    ## START LOGGING ##
    startTime = dcu.get_now_sql_datetime()
    log.info(
        '--- STARTING TO RUN THE git_update_script.py AT %s' %
        (startTime,))

    # call the worker function
    # x-if-settings-or-database-credientials
    if "git repos" in settings:
        for repo in settings["git repos"]:
            log.debug('repo["path"]: %s' % (repo["path"],))
            log.debug('repo["branchToUpdate"]: %s' % (repo["branchToUpdate"],))
            update_git_repos(
                log=log,
                gitProjectRoot=repo["path"],
                branchToUpdate=repo["branchToUpdate"]
            )

    ## FINISH LOGGING ##
    endTime = dcu.get_now_sql_datetime()
    runningTime = dcu.calculate_time_difference(startTime, endTime)
    log.info(
        '-- FINISHED ATTEMPT TO RUN THE git_update_script.py AT %s (RUNTIME: %s) --' %
        (endTime, runningTime, ))

    return
Пример #14
0
def main(arguments=None):
    """
    *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
    """
    # setup the command-line util settings
    su = tools(
        arguments=arguments,
        docString=__doc__,
        logLevel="WARNING",
        options_first=False,
        projectName="qubits"
    )
    arguments, settings, log, dbConn = su.setup()

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
        if varname == "import":
            varname = "iimport"
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val,))
        else:
            exec(varname + " = %s" % (val,))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (varname, val,))

    ## START LOGGING ##
    startTime = times.get_now_sql_datetime()
    log.info(
        '--- STARTING TO RUN THE cl_utils.py AT %s' %
        (startTime,))

    if init:
        from . import workspace
        ws = workspace(
            log=log,
            pathToWorkspace=pathToWorkspace
        )
        ws.setup()
        return

    # IMPORT THE SIMULATION SETTINGS
    (allSettings,
     programSettings,
     limitingMags,
     sampleNumber,
     peakMagnitudeDistributions,
     explosionDaysFromSettings,
     extendLightCurveTail,
     relativeSNRates,
     lowerRedshiftLimit,
     upperRedshiftLimit,
     redshiftResolution,
     restFrameFilter,
     kCorrectionTemporalResolution,
     kCorPolyOrder,
     kCorMinimumDataPoints,
     extinctionType,
     extinctionConstant,
     hostExtinctionDistributions,
     galacticExtinctionDistribution,
     surveyCadenceSettings,
     snLightCurves,
     surveyArea,
     CCSNRateFraction,
     transientToCCSNRateFraction,
     extraSurveyConstraints,
     lightCurvePolyOrder,
     logLevel) = cu.read_in_survey_parameters(
        log,
        pathToSettingsFile=pathToSettingsFile
    )

    logFilePath = pathToOutputDirectory + "/qubits.log"
    del log
    log = _set_up_command_line_tool(
        level=str(logLevel),
        logFilePath=logFilePath
    )

    # dbConn, log = cu.settings(
    #     pathToSettingsFile=pathToSettingsFile,
    #     dbConn=False,
    #     log=True
    # )

    ## START LOGGING ##
    startTime = dcu.get_now_sql_datetime()
    log.info('--- STARTING TO RUN THE qubits AT %s' % (startTime,))

    resultsDict = {}

    pathToOutputPlotDirectory = pathToOutputDirectory + "/plots/"
    dcu.dryx_mkdir(
        log,
        directoryPath=pathToOutputPlotDirectory
    )

    pathToResultsFolder = pathToOutputDirectory + "/results/"
    dcu.dryx_mkdir(
        log,
        directoryPath=pathToResultsFolder
    )

    if not programSettings['Extract Lightcurves from Spectra'] and not programSettings['Generate KCorrection Database'] and not programSettings['Run the Simulation'] and not programSettings['Compile and Plot Results']:
        print "All stages of the simulatation have been switched off. Please switch on at least one stage of the simulation under the 'Programming Settings' in the settings file `%(pathToSettingsFile)s`" % locals()

    # GENERATE THE DATA FOR SIMULATIONS
    if programSettings['Extract Lightcurves from Spectra']:
        log.info('generating the Lightcurves')
        dg.generate_model_lightcurves(
            log=log,
            pathToSpectralDatabase=pathToSpectralDatabase,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            explosionDaysFromSettings=explosionDaysFromSettings,
            extendLightCurveTail=extendLightCurveTail,
            polyOrder=lightCurvePolyOrder
        )
        print "The lightcurve file can be found here: %(pathToOutputDirectory)stransient_light_curves.yaml" % locals()
        print "The lightcurve plots can be found in %(pathToOutputPlotDirectory)s" % locals()

    if programSettings['Generate KCorrection Database']:
        log.info('generating the kcorrection data')
        dg.generate_kcorrection_listing_database(
            log,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToSpectralDatabase=pathToSpectralDatabase,
            restFrameFilter=restFrameFilter,
            temporalResolution=kCorrectionTemporalResolution,
            redshiftResolution=redshiftResolution,
            redshiftLower=lowerRedshiftLimit,
            redshiftUpper=upperRedshiftLimit + redshiftResolution)
        log.info('generating the kcorrection polynomials')
        dg.generate_kcorrection_polynomial_database(
            log,
            pathToOutputDirectory=pathToOutputDirectory,
            restFrameFilter=restFrameFilter,
            kCorPolyOrder=kCorPolyOrder,  # ORDER OF THE POLYNOMIAL TO FIT
            kCorMinimumDataPoints=kCorMinimumDataPoints,
            redshiftResolution=redshiftResolution,
            redshiftLower=lowerRedshiftLimit,
            redshiftUpper=upperRedshiftLimit + redshiftResolution,
            plot=programSettings['Generate KCorrection Plots'])

        print "The k-correction database has been generated here: %(pathToOutputDirectory)sk_corrections" % locals()
        if programSettings['Generate KCorrection Plots']:
            print "The k-correction polynomial plots can also be found in %(pathToOutputDirectory)sk_corrections" % locals()

    if programSettings['Run the Simulation']:
        # CREATE THE OBSERVABLE UNIVERSE!
        log.info('generating the redshift array')
        redshiftArray = u.random_redshift_array(
            log,
            sampleNumber,
            lowerRedshiftLimit,
            upperRedshiftLimit,
            redshiftResolution=redshiftResolution,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])
        resultsDict['Redshifts'] = redshiftArray.tolist()

        log.info('generating the SN type array')
        snTypesArray = u.random_sn_types_array(
            log,
            sampleNumber,
            relativeSNRates,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])
        resultsDict['SN Types'] = snTypesArray.tolist()

        log.info('generating peak magnitudes for the SNe')
        peakMagnitudesArray = u.random_peak_magnitudes(
            log,
            peakMagnitudeDistributions,
            snTypesArray,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the SN host extictions array')
        hostExtinctionArray = u.random_host_extinction(
            log,
            sampleNumber,
            extinctionType,
            extinctionConstant,
            hostExtinctionDistributions,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the SN galactic extictions array')
        galacticExtinctionArray = u.random_galactic_extinction(
            log,
            sampleNumber,
            extinctionType,
            extinctionConstant,
            galacticExtinctionDistribution,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the raw lightcurves for the SNe')
        rawLightCurveDict = u.generate_numpy_polynomial_lightcurves(
            log,
            snLightCurves=snLightCurves,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the k-correction array for the SNe')
        kCorrectionArray = u.build_kcorrection_array(
            log,
            redshiftArray,
            snTypesArray,
            snLightCurves,
            pathToOutputDirectory=pathToOutputDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the observed lightcurves for the SNe')
        observedFrameLightCurveInfo, peakAppMagList = u.convert_lightcurves_to_observered_frame(
            log,
            snLightCurves=snLightCurves,
            rawLightCurveDict=rawLightCurveDict,
            redshiftArray=redshiftArray,
            snTypesArray=snTypesArray,
            peakMagnitudesArray=peakMagnitudesArray,
            kCorrectionArray=kCorrectionArray,
            hostExtinctionArray=hostExtinctionArray,
            galacticExtinctionArray=galacticExtinctionArray,
            restFrameFilter=restFrameFilter,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            polyOrder=lightCurvePolyOrder,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('generating the survey observation cadence')
        cadenceDictionary = ss.survey_cadence_arrays(
            log,
            surveyCadenceSettings,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info('determining if the SNe are discoverable by the survey')
        discoverableList = ss.determine_if_sne_are_discoverable(
            log,
            redshiftArray=redshiftArray,
            limitingMags=limitingMags,
            observedFrameLightCurveInfo=observedFrameLightCurveInfo,
            pathToOutputDirectory=pathToOutputDirectory,
            pathToOutputPlotDirectory=pathToOutputPlotDirectory,
            plot=programSettings['Plot Simulation Helper Plots'])

        log.info(
            'determining the day (if and) when each SN is first discoverable by the survey')
        ripeDayList = ss.determine_when_sne_are_ripe_for_discovery(
            log,
            redshiftArray=redshiftArray,
            limitingMags=limitingMags,
            discoverableList=discoverableList,
            observedFrameLightCurveInfo=observedFrameLightCurveInfo,
            plot=programSettings['Plot Simulation Helper Plots'])

        # log.info('determining the day when each SN is disappears fainter than the survey limiting mags')
        # disappearDayList = determine_when_discovered_sne_disappear(
        #     log,
        #     redshiftArray=redshiftArray,
        #     limitingMags=limitingMags,
        #     ripeDayList=ripeDayList,
        #     observedFrameLightCurveInfo=observedFrameLightCurveInfo,
        #     plot=programSettings['Plot Simulation Helper Plots'])

        log.info('determining if and when each SN is discovered by the survey')
        lightCurveDiscoveryDayList, surveyDiscoveryDayList, snCampaignLengthList = ss.determine_if_sne_are_discovered(
            log,
            limitingMags=limitingMags,
            ripeDayList=ripeDayList,
            cadenceDictionary=cadenceDictionary,
            observedFrameLightCurveInfo=observedFrameLightCurveInfo,
            extraSurveyConstraints=extraSurveyConstraints,
            plot=programSettings['Plot Simulation Helper Plots'])

        resultsDict[
            'Discoveries Relative to Peak Magnitudes'] = lightCurveDiscoveryDayList
        resultsDict[
            'Discoveries Relative to Survey Year'] = surveyDiscoveryDayList
        resultsDict['Campaign Length'] = snCampaignLengthList
        resultsDict['Cadence Dictionary'] = cadenceDictionary
        resultsDict['Peak Apparent Magnitudes'] = peakAppMagList

        now = datetime.now()
        now = now.strftime("%Y%m%dt%H%M%S")
        fileName = pathToOutputDirectory + \
            "/simulation_results_%s.yaml" % (now,)
        stream = file(fileName, 'w')
        yamlContent = dict(allSettings.items() + resultsDict.items())
        yaml.dump(yamlContent, stream, default_flow_style=False)
        stream.close()

        print "The simulation output file can be found here: %(fileName)s. Remember to update your settings file 'Simulation Results File Used for Plots' parameter with this filename before compiling the results." % locals()
        if programSettings['Plot Simulation Helper Plots']:
            print "The simulation helper-plots found in %(pathToOutputPlotDirectory)s" % locals()

    # COMPILE AND PLOT THE RESULTS
    if programSettings['Compile and Plot Results']:
        pathToYamlFile = pathToOutputDirectory + "/" + \
            programSettings['Simulation Results File Used for Plots']
        result_log = r.log_the_survey_settings(log, pathToYamlFile)
        snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList = r.import_results(
            log, pathToYamlFile)
        snRatePlotLink, totalRate, tooFaintRate, shortCampaignRate = r.determine_sn_rate(
            log,
            lightCurveDiscoveryTimes,
            snSurveyDiscoveryTimes,
            redshifts,
            surveyCadenceSettings=surveyCadenceSettings,
            lowerRedshiftLimit=lowerRedshiftLimit,
            upperRedshiftLimit=upperRedshiftLimit,
            redshiftResolution=redshiftResolution,
            surveyArea=surveyArea,
            CCSNRateFraction=CCSNRateFraction,
            transientToCCSNRateFraction=transientToCCSNRateFraction,
            peakAppMagList=peakAppMagList,
            snCampaignLengthList=snCampaignLengthList,
            extraSurveyConstraints=extraSurveyConstraints,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """
## Results ##

This simulated survey discovered a total of **%s** transients per year. An extra **%s** transients were detected but deemed too faint to constrain a positive transient identification and a further **%s** transients where detected but an observational campaign of more than **%s** days could not be completed to ensure identification. See below for the various output plots.

        """ % (totalRate, tooFaintRate, shortCampaignRate, extraSurveyConstraints["Observable for at least ? number of days"])
        cadenceWheelLink = r.plot_cadence_wheel(
            log,
            cadenceDictionary,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """%s""" % (cadenceWheelLink,)
        discoveryMapLink = r.plot_sn_discovery_map(
            log,
            snSurveyDiscoveryTimes,
            peakAppMagList,
            snCampaignLengthList,
            redshifts,
            extraSurveyConstraints,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """%s""" % (discoveryMapLink,)
        ratioMapLink = r.plot_sn_discovery_ratio_map(
            log,
            snSurveyDiscoveryTimes,
            redshifts,
            peakAppMagList,
            snCampaignLengthList,
            extraSurveyConstraints,
            pathToOutputPlotFolder=pathToOutputPlotDirectory)
        result_log += """%s""" % (ratioMapLink,)
        result_log += """%s""" % (snRatePlotLink,)

        now = datetime.now()
        now = now.strftime("%Y%m%dt%H%M%S")
        mdLogPath = pathToResultsFolder + \
            "simulation_result_log_%s.md" % (now,)
        mdLog = open(mdLogPath, 'w')
        mdLog.write(result_log)
        mdLog.close()

        dmd.convert_to_html(
            log=log,
            pathToMMDFile=mdLogPath,
            css="amblin"
        )

        print "Results can be found here: %(pathToResultsFolder)s" % locals()
        html = mdLogPath.replace(".md", ".html")
        print "Open this file in your browser: %(html)s" % locals()

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = times.get_now_sql_datetime()
    runningTime = times.calculate_time_difference(startTime, endTime)
    log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
             (endTime, runningTime, ))

    return