def test_list_of_dictionaries_function_02(self):

        dataList = [{
            "owner": "daisy",
            "pet": "dog",
            "address": "belfast, uk"
        }, {
            "owner": "john",
            "pet": "snake",
            "address": "the moon"
        }, {
            "owner": "susan",
            "pet": "crocodile",
            "address": "larne"
        }]

        from fundamentals.renderer import list_of_dictionaries
        dataSet = list_of_dictionaries(log=log, listOfDictionaries=dataList)
        print dataSet.table()
        dataSet.table(filepath=pathToOutputDir + "myData.dat")
        print dataSet.csv()
        dataSet.csv(filepath=pathToOutputDir + "myData.csv")
        print dataSet.mysql(tableName="TNS")
        dataSet.mysql(tableName="TNS",
                      filepath=pathToOutputDir + "myData.mysql")
        print dataSet.json()
        dataSet.json(filepath=pathToOutputDir + "myData.json")
        print dataSet.yaml()
        dataSet.yaml(filepath=pathToOutputDir + "myData.yaml")
        print dataSet.markdown()
        dataSet.markdown(filepath=pathToOutputDir + "myData.md")
示例#2
0
    def search(self):
        """
        *Return the results of the database conesearch*

        **Return**

        - ``conesearch``


        **Usage**

        See class usage.

        """
        self.log.debug('starting the ``get`` method')

        sqlQuery = self._get_on_trixel_sources_from_database_query()

        databaseRows = self._execute_query(sqlQuery)
        matchIndies, matches = self._list_crossmatch(databaseRows)

        from fundamentals.renderer import list_of_dictionaries
        matches = list_of_dictionaries(log=self.log,
                                       listOfDictionaries=matches,
                                       reDatetime=self.reDatetime)

        self.log.debug('completed the ``get`` method')
        return matchIndies, matches
示例#3
0
    def search(self):
        """
        *Return the results of the database conesearch*

        **Return:**
            - ``conesearch``

        **Usage:**

            See class usage.
        """
        self.log.info('starting the ``get`` method')

        sqlQuery = self._get_on_trixel_sources_from_database_query()
        databaseRows = self._execute_query(sqlQuery)
        matchIndies, matches = self._list_crossmatch(databaseRows)

        from fundamentals.renderer import list_of_dictionaries
        matches = list_of_dictionaries(
            log=self.log,
            listOfDictionaries=matches
        )

        self.log.info('completed the ``get`` method')
        return matchIndies, matches
    def test_list_of_dictionaries_function_exception(self):

        from fundamentals.renderer import list_of_dictionaries
        try:
            this = list_of_dictionaries(log=log,
                                        listOfDictionaries=listOfDictionaries,
                                        fakeKey="break the code")
            this.get()
            assert False
        except Exception, e:
            assert True
            print str(e)
    def test_list_of_dictionaries_function_exception(self):

        from fundamentals.renderer import list_of_dictionaries
        try:
            this = list_of_dictionaries(
                log=log,
                listOfDictionaries=listOfDictionaries,
                fakeKey="break the code"
            )
            this.get()
            assert False
        except Exception, e:
            assert True
            print str(e)
    def test_list_of_dictionaries_function(self):

        from fundamentals.renderer import list_of_dictionaries
        dataSet = list_of_dictionaries(log=log,
                                       listOfDictionaries=listOfDictionaries)
        print dataSet.table()
        dataSet.table(filepath=pathToOutputDir + "myData.dat")
        print dataSet.csv()
        dataSet.csv(filepath=pathToOutputDir + "myData.csv")
        print dataSet.mysql(tableName="TNS")
        dataSet.mysql(tableName="TNS",
                      filepath=pathToOutputDir + "myData.mysql")

        print dataSet.json()
        dataSet.json(filepath=pathToOutputDir + "myData.json")
        print dataSet.yaml()
        dataSet.yaml(filepath=pathToOutputDir + "myData.yaml")
        print dataSet.markdown()
        dataSet.markdown(filepath=pathToOutputDir + "myData.md")
    def test_list_of_dictionaries_function(self):

        from fundamentals.renderer import list_of_dictionaries
        dataSet = list_of_dictionaries(
            log=log,
            listOfDictionaries=listOfDictionaries
        )
        print dataSet.table()
        dataSet.table(filepath=pathToOutputDir + "myData.dat")
        print dataSet.csv()
        dataSet.csv(filepath=pathToOutputDir + "myData.csv")
        print dataSet.mysql(tableName="TNS")
        dataSet.mysql(tableName="TNS",
                      filepath=pathToOutputDir + "myData.mysql")

        print dataSet.json()
        dataSet.json(filepath=pathToOutputDir + "myData.json")
        print dataSet.yaml()
        dataSet.yaml(filepath=pathToOutputDir + "myData.yaml")
        print dataSet.markdown()
        dataSet.markdown(filepath=pathToOutputDir + "myData.md")
    def test_list_of_dictionaries_function_02(self):

        dataList = [
            {
                "owner": "daisy",
                "pet": "dog",
                "address": "belfast, uk"
            },
            {
                "owner": "john",
                "pet": "snake",
                "address": "the moon"
            },
            {
                "owner": "susan",
                "pet": "crocodile",
                "address": "larne"
            }

        ]

        from fundamentals.renderer import list_of_dictionaries
        dataSet = list_of_dictionaries(
            log=log,
            listOfDictionaries=dataList
        )
        print dataSet.table()
        dataSet.table(filepath=pathToOutputDir + "myData.dat")
        print dataSet.csv()
        dataSet.csv(filepath=pathToOutputDir + "myData.csv")
        print dataSet.mysql(tableName="TNS")
        dataSet.mysql(tableName="TNS",
                      filepath=pathToOutputDir + "myData.mysql")
        print dataSet.json()
        dataSet.json(filepath=pathToOutputDir + "myData.json")
        print dataSet.yaml()
        dataSet.yaml(filepath=pathToOutputDir + "myData.yaml")
        print dataSet.markdown()
        dataSet.markdown(filepath=pathToOutputDir + "myData.md")
示例#9
0
    def get(self):
        """
        *get the cone_search object*

        **Return**

        - ``results`` -- the results of the conesearch
        
        """
        self.log.debug('starting the ``get`` method')

        # sort results by angular separation
        from operator import itemgetter
        results = list(self.squareResults)
        results = sorted(results,
                         key=itemgetter('separation_arcsec'),
                         reverse=True)

        # order of results
        headers = [
            "sdss_name", "type", "ra", "dec", "specz", "specz_err", "photoz",
            "photoz_err", "separation_arcsec", "separation_north_arcsec",
            "separation_east_arcsec"
        ]

        import collections
        orderDict = collections.OrderedDict(sorted({}.items()))

        # filter out results greater than the search radius
        filteredResults = []
        for row in results:
            if float(row["separation_arcsec"]) < self.searchRadius:
                orderDict = collections.OrderedDict(sorted({}.items()))
                for h in headers:
                    if h in list(row.keys()):
                        orderDict[h] = row[h]
                filteredResults.append(orderDict)
            else:
                pass

        if self.nearest and len(filteredResults):
            orderDict = collections.OrderedDict(sorted({}.items()))
            for h in headers:
                if h in list(filteredResults[0].keys()):
                    orderDict[h] = row[h]
            filteredResults = [orderDict]
            # filteredResults = [filteredResults[0]]

        if not len(filteredResults):
            orderDict = collections.OrderedDict(sorted({}.items()))
            for h in headers:
                if self.galaxyType == "all" or self.galaxyType == False or (
                        self.galaxyType == "specz"
                        and h not in ["photoz_err", "photoz"]) or (
                            self.galaxyType == "photoz"
                            and h not in ["specz", "specz_err"]):
                    orderDict[h] = ""
            filteredResults = [orderDict]

        # pretty format print
        dataSet = list_of_dictionaries(log=self.log,
                                       listOfDictionaries=list(
                                           reversed(filteredResults)))
        if self.outputFormat == "csv":
            results = dataSet.csv()
        else:
            results = dataSet.table()

        # sdss only allows 60 hits per minute
        sleep(1)

        self.log.debug('completed the ``get`` method')
        return results
示例#10
0
class workspace():
    """
    *tools for sorting, archiving and indexing tasks and maintaining the contents of all taskpaper files within a given workspace*

    **Key Arguments:**
        - ``log`` -- logger
        - ``fileOrWorkspacePath`` -- the root path of the workspace you wish to sort the taskpaper docs within, or the path to a single taskpaper file
        - ``settings`` -- the settings dictionary

    **Usage:**

        To setup your logger, settings and database connections, please use the ``fundamentals`` package (`see tutorial here <http://fundamentals.readthedocs.io/en/latest/#tutorial>`_). 

        To initiate a taskpaper workspace object, use the following:

        .. code-block:: python 

            from tastic.workspace import workspace
            ws = workspace(
                log=log,
                settings=settings,
                fileOrWorkspacePath="/path/to/root/of/workspace"
            )

        or to target a single taskpaper document use instead the path to the file:

        .. code-block:: python 

            from tastic.workspace import workspace
            ws = workspace(
                log=log,
                settings=settings,
                fileOrWorkspacePath="/path/to/doc.taskpaper"
            )
    """

    # Initialisation

    def __init__(self, log, fileOrWorkspacePath, settings=False):
        self.log = log
        log.debug("instansiating a new 'sort' object")
        self.settings = settings
        self.taskpaperPath = False
        self.workspaceRoot = False
        # xt-self-arg-tmpx

        # INITIAL ACTIONS
        # ARE WE DEALING WITH A WORKSPACE DIRECTORY OR SINGLE FILE
        if os.path.isfile(fileOrWorkspacePath):
            self.taskpaperPath = fileOrWorkspacePath
        else:
            self.workspaceRoot = fileOrWorkspacePath

        self.taskpaperFiles = self._get_all_taskpaper_files()

        return None

    def sort(self):
        """
        *sort the workspace or individual taskpaper document via the workflow tags found in the settings file*

        **Usage:**

            To sort all of the taskpaper documents in the workspace via the workflow tag set with the settings file, for example:

            .. code-block:: yaml

                workflowTags: "@due, @flag, @hold, @next, @someday, @wait" 

            use the ``sort()`` method:

            .. code-block:: python 

                ws.sort()
        """
        self.log.info('starting the ``sort`` method')

        for f in self.taskpaperFiles:
            self._sort_tp_file(f)

        self.log.info('completed the ``sort`` method')
        return None

    def archive_done(self):
        """*move done tasks from the document's 'Archive' project into an adjacent markdown tasklog file*

        **Usage:**

            To move the archived tasks within a workspace's taskpaper docs into ``-tasklog.md`` files use the ``archive_done()`` method:

            .. code-block:: python 

                ws.archive_done()
        """
        self.log.info('starting the ``archive_done`` method')

        for f in self.taskpaperFiles:
            self._archive_tp_file_done_tasks(f)

        self.log.info('completed the ``archive_done`` method')
        return None

    def _get_all_taskpaper_files(self):
        """*get a list of all the taskpaper filepaths in the workspace*

        **Return:**
            - ``taskpaperFiles`` -- a list of paths to all the taskpaper files within the workspace
        """
        self.log.info('starting the ``_get_all_taskpaper_files`` method')

        if self.workspaceRoot:
            from fundamentals.files import recursive_directory_listing
            theseFiles = recursive_directory_listing(
                log=self.log,
                baseFolderPath=self.workspaceRoot,
                whatToList="files"  # all | files | dirs
            )

            taskpaperFiles = []
            taskpaperFiles[:] = [
                f for f in theseFiles if os.path.splitext(f)[1] == ".taskpaper"
            ]
        else:
            taskpaperFiles = [self.taskpaperPath]

        self.log.info('completed the ``_get_all_taskpaper_files`` method')
        return taskpaperFiles

    def _sort_tp_file(self, taskpaperPath):
        """*sort individual taskpaper documents*

        **Key Arguments:**
            - ``taskpaperPath`` -- path to a taskpaper file

        **Return:**
            - None
        """
        self.log.info('starting the ``_sort_tp_file`` method')

        # OPEN TASKPAPER FILE

        self.log.info("sorting taskpaper file %(taskpaperPath)s" % locals())
        doc = document(taskpaperPath)
        doc.tidy()
        doc.sort_tasks(self.settings["workflowTags"])
        doc.sort_projects(self.settings["workflowTags"])
        doc.save()

        self.log.info('completed the ``_sort_tp_file`` method')
        return None

    def _archive_tp_file_done_tasks(self, taskpaperPath):
        """* archive tp file done tasks*

        **Key Arguments:**
            - ``taskpaperPath`` -- path to a taskpaper file

        **Return:**
            - None
        """
        self.log.info('starting the ``_archive_tp_file_done_tasks`` method')
        self.log.info("archiving taskpaper file %(taskpaperPath)s" % locals())
        taskLog = {}
        mdArchiveFile = taskpaperPath.replace(".taskpaper", "-tasklog.md")
        exists = os.path.exists(mdArchiveFile)
        if exists:
            pathToReadFile = mdArchiveFile
            try:
                self.log.debug("attempting to open the file %s" %
                               (pathToReadFile, ))
                readFile = codecs.open(pathToReadFile,
                                       encoding='utf-8',
                                       mode='r')
                thisData = readFile.read()
                readFile.close()
            except IOError, e:
                message = 'could not open the file %s' % (pathToReadFile, )
                self.log.critical(message)
                raise IOError(message)
            readFile.close()
            table = False
            for l in thisData.split("\n"):
                l = l.encode("utf-8")
                if ":---" in l:
                    table = True
                    continue
                if table == True and len(l) and l[0] == "|":
                    dictt = collections.OrderedDict(sorted({}.items()))
                    columns = l.split("|")

                    dictt["task"] = columns[1].strip().decode("utf-8")
                    dictt["completed"] = columns[2].strip().decode("utf-8")
                    dictt["project"] = columns[3].strip().decode("utf-8")
                    taskLog[dictt["task"] + dictt["completed"] +
                            dictt["project"]] = dictt

        doc = document(taskpaperPath)
        aProject = doc.get_project("Archive")
        if not aProject:
            return

        doneTasks = aProject.tagged_tasks("@done")

        for task in doneTasks:
            dateCompleted = ""
            project = ""
            for t in task.tags:
                if "done" in t:
                    dateCompleted = t.replace("done",
                                              "").replace("(",
                                                          "").replace(")", "")
                if "project(" in t:
                    project = t.replace("project",
                                        "").replace("(", "").replace(")", "")

            dictt = collections.OrderedDict(sorted({}.items()))

            notes = ""
            if task.notes:
                for n in task.notes:
                    if len(notes) and notes[-2:] != ". ":
                        if notes[-1] == ".":
                            notes += " "
                        else:
                            notes += ". "
                    notes += n.title
            if len(notes):
                notes = "<br><br>**NOTES:**<br>" + \
                    "<br>".join(textwrap.wrap(
                        notes, 120, break_long_words=True))

            dictt["task"] = "<br>".join(
                textwrap.wrap(task.title[2:], 120,
                              break_long_words=True)) + notes
            dictt["task"] = dictt["task"].encode("utf-8")
            dictt["completed"] = dateCompleted
            dictt["project"] = project

            # SET ENCODE ERROR RETURN VALUE

            # RECODE INTO ASCII
            dictt["task"] = dictt["task"].decode("utf-8")
            dictt["completed"] = dictt["completed"].decode("utf-8")
            dictt["project"] = dictt["project"].decode("utf-8")
            taskLog[dictt["task"] + dictt["completed"] +
                    dictt["project"]] = dictt

        taskLog = taskLog.values()

        taskLog = sorted(taskLog, key=itemgetter('task'), reverse=True)
        taskLog = sorted(taskLog, key=itemgetter('project'), reverse=True)
        taskLog = sorted(taskLog, key=itemgetter('completed'), reverse=True)

        dataSet = list_of_dictionaries(log=self.log,
                                       listOfDictionaries=taskLog)

        markdownData = dataSet.markdown(filepath=None)

        try:
            self.log.debug("attempting to open the file %s" %
                           (mdArchiveFile, ))
            writeFile = codecs.open(mdArchiveFile, encoding='utf-8', mode='w')
        except IOError, e:
            message = 'could not open the file %s' % (mdArchiveFile, )
            self.log.critical(message)
            raise IOError(message)
示例#11
0
def main(arguments=None):
    """
    *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
    """

    # setup the command-line util settings
    su = tools(
        arguments=arguments,
        docString=__doc__,
        logLevel="WARNING",
        options_first=False,
        projectName="rockfinder",
        defaultSettingsFile=True
    )
    arguments, settings, log, dbConn = su.setup()

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val,))
        else:
            exec(varname + " = %s" % (val,))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (varname, val,))

    ## START LOGGING ##
    startTime = times.get_now_sql_datetime()
    log.info(
        '--- STARTING TO RUN THE cl_utils.py AT %s' %
        (startTime,))

    # CALL FUNCTIONS/OBJECTS

    if init:
        from os.path import expanduser
        home = expanduser("~")
        filepath = home + "/.config/rockfinder/rockfinder.yaml"
        cmd = """open %(filepath)s""" % locals()
        p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        try:
            cmd = """open %(filepath)s""" % locals()
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        except:
            pass
        try:
            cmd = """start %(filepath)s""" % locals()
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        except:
            pass
        return

    if where and orbfitFlag:
        from rockfinder import orbfit_ephemeris
        eph = orbfit_ephemeris(
            log=log,
            objectId=objectId,
            mjd=mjd,
            obscode=obscode,
            settings=settings,
            verbose=extraFlag
        )
    else:
        from rockfinder import jpl_horizons_ephemeris
        eph = jpl_horizons_ephemeris(
            log=log,
            objectId=objectId,
            mjd=mjd,
            obscode=obscode,
            verbose=extraFlag
        )

    dataSet = list_of_dictionaries(
        log=log,
        listOfDictionaries=eph
    )
    # xfundamentals-render-list-of-dictionaries

    output = dataSet.table(filepath=None)
    if csv:
        output = dataSet.csv(filepath=None)
    elif json:
        output = dataSet.json(filepath=None)
    elif yaml:
        output = dataSet.yaml(filepath=None)
    elif md:
        output = dataSet.markdown(filepath=None)
    elif rst:
        output = dataSet.reST(filepath=None)

    print output

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = times.get_now_sql_datetime()
    runningTime = times.calculate_time_difference(startTime, endTime)
    log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
             (endTime, runningTime, ))

    return
    def parse_panstarrs_nightlogs(
            self,
            updateAll=False):
        """*download and parse the ps1 night logs from the range of time a wave survey campaign is active*

        The night-log data is added to the ps1_nightlogs table

        **Key Arguments:**
            - ``updateAll`` -- update all of the PS1 nightlogs. This will take sometime, the default is to lift the logs from the last 7 days. Default *False*.

        **Return:**
            - None

        **Usage:**
            ..  todo::

                - add usage info
                - create a sublime snippet for usage
                - update package tutorial if needed

            .. code-block:: python

                usage code

        """
        self.log.debug('starting the ``parse_panstarrs_nightlogs`` method')

        # CONVERTER TO CONVERT MJD TO DATE
        converter = conversions(
            log=self.log
        )

        createStatement = """
CREATE TABLE `ps1_nightlogs` (
  `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
  `airm` double DEFAULT NULL,
  `comments` varchar(200) DEFAULT NULL,
  `decDeg` double DEFAULT NULL,
  `etime` double DEFAULT NULL,
  `f` varchar(10) DEFAULT NULL,
  `filesetID` varchar(100) DEFAULT NULL,
  `raDeg` double DEFAULT NULL,
  `telescope_pointing` varchar(200) DEFAULT NULL,
  `time_registered` datetime DEFAULT NULL,
  `type` varchar(100) DEFAULT NULL,
  `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
  `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
  `updated` varchar(45) DEFAULT '0',
  PRIMARY KEY (`primaryId`),
  UNIQUE KEY `filesetid` (`filesetID`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
"""

        from astrocalc.times import now
        mjdNow = now(
            log=self.log
        ).get_mjd()

        # WAVE METADATA FOUND IN SETTINGS FILE
        for wave in self.settings["gravitational waves"]:
            # GIVE A 3 DAY WINDOW EITHER SIDE OF WAVE TIME-RANGE
            mjdLower = int(self.settings["gravitational waves"][
                wave]["mjd"] - 21. - 3.)
            mjdUpper = int(self.settings["gravitational waves"][
                wave]["mjd"] + 31. + 3.)

            if updateAll == False:
                if mjdUpper < mjdNow - 7.:
                    continue
                if mjdUpper > mjdNow:
                    mjdUpper = int(mjdNow)
                if mjdLower < mjdNow - 7.:
                    mjdLower = int(mjdNow - 7.)

            # METRIC NIGHT LOGS FOR EACH NIGHT FOUND AT A URL SIMILAR TO :
            # "http://ipp0022.ifa.hawaii.edu/ps1sc/metrics/2016-12-14/index.html"
            urls = []
            for i in range(mjdUpper - mjdLower + 3):
                mjd = i + mjdLower
                utDate = converter.mjd_to_ut_datetime(
                    mjd=mjd,
                    sqlDate=False,
                    datetimeObject=True
                )
                utDate = utDate.strftime("%Y-%m-%d")
                urls.append("http://ipp0022.ifa.hawaii.edu/ps1sc/metrics/%(utDate)s/index.html" % locals(
                ))

            localUrls = multiobject_download(
                urlList=urls,
                downloadDirectory="/tmp",
                log=self.log,
                timeStamp=True,
                timeout=180,
                concurrentDownloads=2,
                resetFilename=False,
                credentials=False,  # { 'username' : "...", "password", "..." }
                longTime=True,
                indexFilenames=False
            )

            for url in localUrls:
                if not url:
                    continue
                pathToReadFile = url
                try:
                    self.log.debug("attempting to open the file %s" %
                                   (pathToReadFile,))
                    readFile = codecs.open(
                        pathToReadFile, encoding='utf-8', mode='r')
                    thisData = readFile.read()
                    readFile.close()
                except IOError, e:
                    message = 'could not open the file %s' % (pathToReadFile,)
                    self.log.critical(message)
                    raise IOError(message)
                readFile.close()

                regex = re.compile(r'<pre>\s*# (filesetID.*?)</pre>', re.S)
                matchObject = re.finditer(
                    regex,
                    thisData
                )

                for match in matchObject:
                    csvReader = csv.DictReader(
                        io.StringIO(match.group(1)), delimiter='|')
                    nightLog = []
                    for row in csvReader:
                        cleanDict = {}
                        for k, v in row.iteritems():
                            cleanDict[k.strip().replace(" ", "_")] = v.strip()
                        if "telescope_pointing" in cleanDict:
                            cleanDict["raDeg"] = cleanDict["telescope_pointing"].split()[
                                0]
                            cleanDict["decDeg"] = cleanDict["telescope_pointing"].split()[
                                1]
                        if "time_registered" in cleanDict:
                            cleanDict["time_registered"] = cleanDict[
                                "time_registered"].replace("Z", "")
                        nightLog.append(cleanDict)

                dataSet = list_of_dictionaries(
                    log=self.log,
                    listOfDictionaries=nightLog
                )
                # Recursively create missing directories
                if not os.path.exists("/tmp/ps1_nightlogs"):
                    os.makedirs("/tmp/ps1_nightlogs")
                mysqlData = dataSet.mysql(
                    tableName="ps1_nightlogs", filepath="/tmp/ps1_nightlogs/ps1_nightlog_%(utDate)s.sql" % locals(), createStatement=createStatement)

                directory_script_runner(
                    log=self.log,
                    pathToScriptDirectory="/tmp/ps1_nightlogs",
                    databaseName=self.settings["database settings"][
                        "ligo_virgo_waves"]["db"],
                    loginPath=self.settings["database settings"][
                        "ligo_virgo_waves"]["loginPath"],
                    successRule="delete",
                    failureRule="failed"
                )
示例#13
0
def main(arguments=None):
    """
    *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
    """

    # setup the command-line util settings
    su = tools(arguments=arguments,
               docString=__doc__,
               logLevel="WARNING",
               options_first=False,
               projectName="rockfinder",
               defaultSettingsFile=True)
    arguments, settings, log, dbConn = su.setup()

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val, ))
        else:
            exec(varname + " = %s" % (val, ))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (
            varname,
            val,
        ))

    ## START LOGGING ##
    startTime = times.get_now_sql_datetime()
    log.info('--- STARTING TO RUN THE cl_utils.py AT %s' % (startTime, ))

    # CALL FUNCTIONS/OBJECTS

    if init:
        from os.path import expanduser
        home = expanduser("~")
        filepath = home + "/.config/rockfinder/rockfinder.yaml"
        cmd = """open %(filepath)s""" % locals()
        p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        try:
            cmd = """open %(filepath)s""" % locals()
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        except:
            pass
        try:
            cmd = """start %(filepath)s""" % locals()
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        except:
            pass
        return

    if where and orbfitFlag:
        from rockfinder import orbfit_ephemeris
        eph = orbfit_ephemeris(log=log,
                               objectId=objectId,
                               mjd=mjd,
                               obscode=obscode,
                               settings=settings,
                               verbose=extraFlag)
    else:
        from rockfinder import jpl_horizons_ephemeris
        eph = jpl_horizons_ephemeris(log=log,
                                     objectId=objectId,
                                     mjd=mjd,
                                     obscode=obscode,
                                     verbose=extraFlag)

    dataSet = list_of_dictionaries(log=log, listOfDictionaries=eph)
    # xfundamentals-render-list-of-dictionaries

    output = dataSet.table(filepath=None)
    if csv:
        output = dataSet.csv(filepath=None)
    elif json:
        output = dataSet.json(filepath=None)
    elif yaml:
        output = dataSet.yaml(filepath=None)
    elif md:
        output = dataSet.markdown(filepath=None)
    elif rst:
        output = dataSet.reST(filepath=None)
    print(output)

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = times.get_now_sql_datetime()
    runningTime = times.calculate_time_difference(startTime, endTime)
    log.info(
        '-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' % (
            endTime,
            runningTime,
        ))

    return
示例#14
0
    def get(self):
        """
        *get the cone_search object*

        **Return:**
            - ``results`` -- the results of the conesearch

        """
        self.log.info('starting the ``get`` method')

        # sort results by angular separation
        from operator import itemgetter
        results = list(self.squareResults)
        results = sorted(
            results, key=itemgetter('separation_arcsec'), reverse=True)

        # order of results
        headers = ["sdss_name", "type", "ra", "dec", "specz", "specz_err", "photoz",
                   "photoz_err", "separation_arcsec", "separation_north_arcsec", "separation_east_arcsec"]

        import collections
        orderDict = collections.OrderedDict(sorted({}.items()))

        # filter out results greater than the search radius
        filteredResults = []
        for row in results:
            if float(row["separation_arcsec"]) < self.searchRadius:
                orderDict = collections.OrderedDict(sorted({}.items()))
                for h in headers:
                    if h in row.keys():
                        orderDict[h] = row[h]
                filteredResults.append(orderDict)
            else:
                pass

        if self.nearest and len(filteredResults):
            orderDict = collections.OrderedDict(sorted({}.items()))
            for h in headers:
                if h in filteredResults[0].keys():
                    orderDict[h] = row[h]
            filteredResults = [orderDict]
            # filteredResults = [filteredResults[0]]

        if not len(filteredResults):
            orderDict = collections.OrderedDict(sorted({}.items()))
            for h in headers:
                if self.galaxyType == "all" or self.galaxyType == False or (self.galaxyType == "specz" and h not in ["photoz_err", "photoz"]) or (self.galaxyType == "photoz" and h not in ["specz", "specz_err"]):
                    orderDict[h] = ""
            filteredResults = [orderDict]

        # pretty format print
        dataSet = list_of_dictionaries(
            log=self.log,
            listOfDictionaries=list(reversed(filteredResults))
        )
        if self.outputFormat == "csv":
            results = dataSet.csv()
        else:
            results = dataSet.table()

        # sdss only allows 60 hits per minute
        sleep(1)

        self.log.info('completed the ``get`` method')
        return results
示例#15
0
def main(arguments=None):
    """
    The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command

    .. todo ::

        - update key arguments values and definitions with defaults
        - update return values and definitions
        - update usage examples and text
        - update docstring text
        - check sublime snippet exists
        - clip any useful text to docs mindmap
        - regenerate the docs and check redendering of this docstring
    """
    # setup the command-line util settings

    su = tools(arguments=arguments,
               docString=__doc__,
               logLevel="WARNING",
               options_first=False,
               projectName="sherlock",
               distributionName="qub-sherlock")
    arguments, settings, log, dbConn = su.setup()

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
            if varname == "import":
                varname = "iimport"
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val, ))
        else:
            exec(varname + " = %s" % (val, ))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (
            varname,
            val,
        ))

    ## START LOGGING ##
    startTime = times.get_now_sql_datetime()
    log.debug('--- STARTING TO RUN THE cl_utils.py AT %s' % (startTime, ))

    # call the worker function
    # x-if-settings-or-database-credientials
    if init:
        from os.path import expanduser
        home = expanduser("~")
        filepath = home + "/.config/sherlock/sherlock.yaml"
        cmd = """open %(filepath)s""" % locals()
        p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        try:
            cmd = """open %(filepath)s""" % locals()
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        except:
            pass
        try:
            cmd = """start %(filepath)s""" % locals()
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        except:
            pass

    if match or dbmatch:
        if verboseFlag:
            verbose = 2
        else:
            verbose = 1

        if skipNedUpdateFlag:
            updateNed = False
        else:
            updateNed = True

        if skipMagUpdateFlag:
            updatePeakMags = False
        else:
            updatePeakMags = True

        classifier = transient_classifier.transient_classifier(
            log=log,
            settings=settings,
            ra=ra,
            dec=dec,
            name=False,
            verbose=verbose,
            update=updateFlag,
            updateNed=updateNed,
            updatePeakMags=updatePeakMags)
        classifier.classify()

    if clean:
        cleaner = database_cleaner(log=log, settings=settings)
        cleaner.clean()
    if wiki:
        updateWiki = update_wiki_pages(log=log, settings=settings)
        updateWiki.update()

    if iimport and ned:
        ned = nedStreamImporter(log=log,
                                settings=settings,
                                coordinateList=["%(ra)s %(dec)s" % locals()],
                                radiusArcsec=radiusArcsec)
        ned.ingest()
    if iimport and cat:

        if cat_name == "veron":
            catalogue = veronImporter(log=log,
                                      settings=settings,
                                      pathToDataFile=pathToDataFile,
                                      version=cat_version,
                                      catalogueName=cat_name)
            catalogue.ingest()

        if "ned_d" in cat_name:
            catalogue = nedImporter(log=log,
                                    settings=settings,
                                    pathToDataFile=pathToDataFile,
                                    version=cat_version,
                                    catalogueName=cat_name)
            catalogue.ingest()
    if iimport and stream:
        if "marshall" in stream_name:
            stream = marshallImporter(
                log=log,
                settings=settings,
            )
            stream.ingest()
        if "ifs" in stream_name:
            stream = ifsImporter(log=log, settings=settings)
            stream.ingest()
    if not init and not match and not clean and not wiki and not iimport and ra:

        classifier = transient_classifier.transient_classifier(
            log=log,
            settings=settings,
            ra=ra,
            dec=dec,
            name=False,
            verbose=verboseFlag)
        classifier.classify()

    if info:
        print "sherlock-catalogues"
        wiki = update_wiki_pages(log=log, settings=settings)
        table = list(wiki._get_table_infos(trimmed=True))

        dataSet = list_of_dictionaries(log=log, listOfDictionaries=table)
        tableData = dataSet.reST(filepath=None)
        print tableData
        print

        print "Crossmatch Streams"
        table = list(wiki._get_stream_view_infos(trimmed=True))
        dataSet = list_of_dictionaries(log=log, listOfDictionaries=table)
        tableData = dataSet.reST(filepath=None)
        print tableData
        print

        print "Views on Catalogues and Streams"

        table = list(wiki._get_view_infos(trimmed=True))
        dataSet = list_of_dictionaries(log=log, listOfDictionaries=table)
        tableData = dataSet.reST(filepath=None)
        print tableData

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = times.get_now_sql_datetime()
    runningTime = times.calculate_time_difference(startTime, endTime)
    log.debug(
        '-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' % (
            endTime,
            runningTime,
        ))

    return
示例#16
0
    def get(self):
        """
        *get the survey footprint stats and print to screen/file*

        **Return:**
            - ``None``
        """
        self.log.debug('starting the ``get`` method')

        # GRAB METADATA FROM THE DATABASES
        this = plot_wave_observational_timelines(
            log=self.log, settings=self.settings)
        plotParameters, ps1Transients, ps1Pointings, altasPointings, atlasTransients = this.get_gw_parameters_from_settings(
            gwid=self.gwid,
            stackOnly=False)

        if self.telescope == "atlas":
            pointings = altasPointings
            pointingSide = 5.46
        if self.telescope == "ps1":
            pointings = ps1Pointings
            pointingSide = 0.4
        telescope = self.telescope.upper()

        # SORT ALL POINTINGS VIA MJD
        pointings = sorted(list(pointings),
                           key=itemgetter('mjd'), reverse=False)

        nside, hpixArea, aMap, healpixIds, wr, wd = self._create_healpixid_coordinate_grid()

        print "EXPID, RA, DEC, MJD, EXPTIME, FILTER, LIM-MAG, EXP-AREA, EXP-LIKELIHOOD, CUM-AREA, CUM-LIKELIHOOD" % locals()

        allHealpixIds = np.array([])
        dictList = []
        iindex = 0
        count = len(pointings)
        cumArea = 0
        cumProb = 0
        for pti, pt in enumerate(pointings):
            pti = pti + 1

            if pti > 1:
                # Cursor up one line and clear line
                sys.stdout.write("\x1b[1A\x1b[2K")

            percent = (float(pti) / float(count)) * 100.
            print '%(pti)s/%(count)s (%(percent)1.1f%% done): summing total area and likelihood covered by %(telescope)s' % locals()

            thisDict = collections.OrderedDict(sorted({}.items()))

            pra = pt["raDeg"]
            pdec = pt["decDeg"]
            pmjd = pt["mjd"]
            pexpid = pt["exp_id"]
            pexptime = pt["exp_time"]
            pfilter = pt["filter"]
            plim = pt["limiting_magnitude"]

            # DETERMINE THE CORNERS FOR EACH ATLAS EXPOSURE AS MAPPED TO THE
            # SKY
            decCorners = (pdec - pointingSide / 2,
                          pdec + pointingSide / 2)
            corners = []
            for d in decCorners:
                if d > 90.:
                    d = 180. - d
                elif d < -90.:
                    d = -180 - d
                raCorners = (pra - (pointingSide / 2) / np.cos(d * self.DEG_TO_RAD_FACTOR),
                             pra + (pointingSide / 2) / np.cos(d * self.DEG_TO_RAD_FACTOR))
                for r in raCorners:
                    if r > 360.:
                        r = 720. - r
                    elif r < 0.:
                        r = 360. + r
                    corners.append(hp.ang2vec(r, d, lonlat=True))

            # FLIP CORNERS 3 & 4 SO HEALPY UNDERSTANDS POLYGON SHAPE
            corners = [corners[0], corners[1],
                       corners[3], corners[2]]

            # RETURN HEALPIXELS IN EXPOSURE AREA
            expPixels = hp.query_polygon(nside, np.array(
                corners))

            expProb = []
            expProb[:] = [aMap[i] for i in expPixels]
            expProb = sum(expProb)
            expArea = len(expPixels) * hpixArea
            if expProb / expArea < 2e-6:
                continue

            pindex = "%(iindex)05d" % locals()
            iindex += 1

            allHealpixIds = np.append(allHealpixIds, expPixels)
            allHealpixIds = np.unique(allHealpixIds)
            cumProb = []
            cumProb[:] = [aMap[int(i)] for i in allHealpixIds]
            cumProb = sum(cumProb)
            cumArea = len(allHealpixIds) * hpixArea
            thisDict["INDEX"] = pindex
            thisDict["EXPID"] = pexpid
            thisDict["RA"] = "%(pra)5.5f" % locals()
            thisDict["DEC"] = "%(pdec)5.5f" % locals()
            thisDict["MJD"] = "%(pmjd)6.6f" % locals()
            thisDict["EXPTIME"] = "%(pexptime)02.1f" % locals()
            thisDict["FILTER"] = pfilter
            try:
                thisDict["LIM-MAG"] = "%(plim)5.2f" % locals()
            except:
                thisDict["LIM-MAG"] = "NaN"
            # thisDict["EXP-AREA"] = expArea
            # thisDict["EXP-LIKELIHOOD"] = expProb
            thisDict["CUM-AREA"] = "%(cumArea)05.2f" % locals()
            thisDict["CUM-LIKELIHOOD"] = "%(cumProb)05.2f" % locals()
            dictList.append(thisDict)

        if not len(dictList):
            thisDict = {}
            thisDict["INDEX"] = "NULL"
            thisDict["EXPID"] = "NULL"
            thisDict["RA"] = "NULL"
            thisDict["DEC"] = "NULL"
            thisDict["MJD"] = "NULL"
            thisDict["EXPTIME"] = "NULL"
            thisDict["FILTER"] = "NULL"
            thisDict["LIM-MAG"] = "NULL"
            dictList.append(thisDict)

        print "AREA: %(cumArea)0.2f. PROB: %(cumProb)0.5f" % locals()

        printFile = self.settings["output directory"] + "/" + \
            self.gwid + "/" + self.gwid + "-" + self.telescope + "-coverage-stats.csv"

        # RECURSIVELY CREATE MISSING DIRECTORIES
        if not os.path.exists(self.settings["output directory"] + "/" + self.gwid):
            os.makedirs(self.settings["output directory"] + "/" + self.gwid)

        dataSet = list_of_dictionaries(
            log=self.log,
            listOfDictionaries=dictList,
        )
        csvData = dataSet.csv(filepath=printFile)

        print "The coverage stats file was written to `%(printFile)s`" % locals()

        self.log.debug('completed the ``get`` method')
        return None
示例#17
0
    def convert_sqlite_to_mysql(self):
        """*copy the contents of the sqlite database into the mysql database*

        See class docstring for usage
        """
        from fundamentals.renderer import list_of_dictionaries
        from fundamentals.mysql import directory_script_runner
        self.log.debug('starting the ``convert_sqlite_to_mysql`` method')

        con = lite.connect(self.pathToSqlite)
        con.row_factory = lite.Row
        cur = con.cursor()

        # GET ALL TABLE NAMES
        cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
        tables = cur.fetchall()

        createStatements = []
        inserts = []
        for table in tables:
            table = table['name']
            if table == "sqlite_sequence":
                continue

            # CREATE TABLE collection_books (folder_id, fingerprint, primary key(folder_id, fingerprint));
            # GENEREATE THE MYSQL CREATE STATEMENTS FOR EACH TABLE
            cur.execute(
                "SELECT sql FROM sqlite_master WHERE name = '%(table)s';" %
                locals())
            createStatement = cur.fetchone()
            createStatement = createStatement[0].replace('"', '`') + ";"
            if "DEFAULT" not in createStatement:
                if "primary key(" in createStatement:
                    tmp = createStatement.split("primary key(")
                    tmp[0] = tmp[0].replace(",", " varchar(150) DEFAULT NULL,")
                    createStatement = ("primary key(").join(tmp)
                if "primary key," in createStatement:
                    tmp = createStatement.split("primary key,")
                    tmp[1] = tmp[1].replace(",", " varchar(150) DEFAULT NULL,")
                    tmp[1] = tmp[1].replace(");",
                                            " varchar(150) DEFAULT NULL);")
                    createStatement = ("primary key,").join(tmp)
            createStatement = createStatement.replace(
                "INTEGER PRIMARY KEY", "INTEGER AUTO_INCREMENT PRIMARY KEY")
            createStatement = createStatement.replace("AUTOINCREMENT",
                                                      "AUTO_INCREMENT")
            createStatement = createStatement.replace("DEFAULT 't'",
                                                      "DEFAULT '1'")
            createStatement = createStatement.replace("DEFAULT 'f'",
                                                      "DEFAULT '0'")
            createStatement = createStatement.replace(",'t'", ",'1'")
            createStatement = createStatement.replace(",'f'", ",'0'")
            if "CREATE TABLE `" in createStatement:
                createStatement = createStatement.replace(
                    "CREATE TABLE `",
                    "CREATE TABLE IF NOT EXISTS `" + self.tablePrefix)
            else:
                createStatement = createStatement.replace(
                    "CREATE TABLE ",
                    "CREATE TABLE IF NOT EXISTS " + self.tablePrefix)
            if ", primary key(" in createStatement:
                createStatement = createStatement.replace(
                    ", primary key(", """,
`dateLastModified` datetime DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
primary key(""")
            else:
                createStatement = createStatement.replace(
                    ");", """,
    `dateLastModified` datetime DEFAULT NULL,
    `updated` tinyint(4) DEFAULT '0');
                """)
            createStatement = createStatement.replace(
                " text primary key", " varchar(100) primary key")
            createStatement = createStatement.replace(
                "`EntryText` TEXT NOT NULL,", "`EntryText` TEXT,")
            createStatement = createStatement.replace(
                "`SelectionText` TEXT NOT NULL", "`SelectionText` TEXT")
            createStatement = createStatement.replace(
                "`Filename` INTEGER NOT NULL,", "`Filename` TEXT NOT NULL,")
            createStatement = createStatement.replace(
                "`SessionPartUUID` TEXT NOT NULL UNIQUE,",
                "`SessionPartUUID` VARCHAR(100) NOT NULL UNIQUE,")
            createStatement = createStatement.replace(
                "`Name` TEXT PRIMARY KEY NOT NULL",
                "`Name` VARCHAR(100) PRIMARY KEY NOT NULL")

            # GRAB THE DATA TO ADD TO THE MYSQL DATABASE TABLES
            cur.execute("SELECT * from '%(table)s';" % locals())
            rows = cur.fetchall()

            allRows = []
            for row in rows:
                allRows.append(dict(row))

            # RECURSIVELY CREATE MISSING DIRECTORIES
            if not os.path.exists("/tmp/headjack/"):
                os.makedirs("/tmp/headjack/")

            # DUMP THE DATA INTO A MYSQL DATABASE
            dataSet = list_of_dictionaries(log=self.log,
                                           listOfDictionaries=allRows)
            originalList = dataSet.list
            now = datetime.now()
            now = now.strftime("%Y%m%dt%H%M%S%f.sql")
            mysqlData = dataSet.mysql(tableName=self.tablePrefix + table,
                                      filepath="/tmp/headjack/" + now,
                                      createStatement=createStatement)

            directory_script_runner(
                log=self.log,
                pathToScriptDirectory="/tmp/headjack/",
                databaseName=self.settings["database settings"]["db"],
                loginPath=self.settings["database settings"]["loginPath"],
                successRule="delete",
                failureRule="failed")

        con.close()

        self.log.debug('completed the ``convert_sqlite_to_mysql`` method')
        return None
    def update_gravity_event_annotations(
            self):
        """*update gravity event annotations*

        **Key Arguments:**
            # -

        **Return:**
            - None

        **Usage:**
            ..  todo::

                - add usage info
                - create a sublime snippet for usage
                - write a command-line tool for this method
                - update package tutorial with command-line tool info if needed

            .. code-block:: python

                usage code

        """
        self.log.debug(
            'completed the ````update_gravity_event_annotations`` method')

        from breaker.transients import annotator

        # CREATE THE ANNOTATION HELPER TABLES IF THEY DON"T EXIST
        moduleDirectory = os.path.dirname(__file__)
        mysql_scripts = moduleDirectory + "/resources/mysql"
        for db in ["ps1gw", "ps13pi", "atlas"]:
            directory_script_runner(
                log=self.log,
                pathToScriptDirectory=mysql_scripts,
                databaseName=self.settings["database settings"][db]["db"],
                loginPath=self.settings["database settings"][db]["loginPath"],
                waitForResult=True,
                successRule=False,
                failureRule=False
            )
        for db in ["ligo_virgo_waves"]:
            directory_script_runner(
                log=self.log,
                pathToScriptDirectory=mysql_scripts + "/ps1_skycell_help_tables",
                databaseName=self.settings["database settings"][db]["db"],
                loginPath=self.settings["database settings"][db]["loginPath"],
                waitForResult=True,
                successRule=False,
                failureRule=False
            )

        # UPDATE THE TABLE WITH THE METADATA OF EACH GRAVITY EVENT
        sqlQuery = ""
        for g in self.settings["gravitational waves"]:
            h = self.settings["gravitational waves"][g]["human-name"]
            m = self.settings["gravitational waves"][g]["mjd"]
            cmd = """insert ignore into tcs_gravity_events (`gracedb_id`, `gravity_event_id`, `mjd`) VALUES ("%(g)s", "%(h)s", %(m)s) on duplicate key update mjd=%(m)s;\n""" % locals(
            )
            sqlQuery += cmd
        for db in [self.atlasDbConn, self.ps1gwDbConn, self.ps13piDbConn]:
            writequery(
                log=self.log,
                sqlQuery=sqlQuery,
                dbConn=db
            )
        sqlQuery = sqlQuery.replace("tcs_gravity_events", "gravity_events")
        writequery(
            log=self.log,
            sqlQuery=sqlQuery,
            dbConn=self.ligo_virgo_wavesDbConn,
        )
        for db in ["ps1gw", "ps13pi", "atlas"]:
            directory_script_runner(
                log=self.log,
                pathToScriptDirectory=mysql_scripts,
                databaseName=self.settings["database settings"][db]["db"],
                loginPath=self.settings["database settings"][db]["loginPath"],
                waitForResult=True,
                successRule=False,
                failureRule=False
            )
        for db in ["ligo_virgo_waves"]:
            directory_script_runner(
                log=self.log,
                pathToScriptDirectory=mysql_scripts + "/ps1_skycell_help_tables",
                databaseName=self.settings["database settings"][db]["db"],
                loginPath=self.settings["database settings"][db]["loginPath"],
                waitForResult=True,
                successRule=False,
                failureRule=False
            )

        dbDict = {
            "ps1gw": self.ps1gwDbConn,
            "atlas": self.atlasDbConn,
            "ps13pi": self.ps13piDbConn,
            "ligo_virgo_waves": self.ligo_virgo_wavesDbConn
        }

        for db in dbDict.keys():

            for g in self.settings["gravitational waves"]:
                h = self.settings["gravitational waves"][g]["human-name"]
                print "Annotating new transients associated with gravity event %(h)s" % locals()
                m = self.settings["gravitational waves"][g]["mjd"]
                mapPath = self.settings["gravitational waves"][g]["mapPath"]
                mapName = os.path.basename(mapPath)

                thisDbConn = dbDict[db]

                if thisDbConn in [self.ps1gwDbConn, self.ps13piDbConn]:

                    sqlQuery = u"""
                        SELECT
                            a.transient_object_id, a.gracedb_id, t.ra_psf, t.dec_psf
                        FROM
                            tcs_transient_objects t,
                            tcs_gravity_event_annotations a
                        WHERE
                            a.transient_object_id = t.id
                                AND t.detection_list_id != 0
                                AND (a.map_name !=
                                     "%(mapName)s"  or a.map_name is null)
                                AND a.gracedb_id="%(g)s";
                    """ % locals()

                    rows = readquery(
                        log=self.log,
                        sqlQuery=sqlQuery,
                        dbConn=thisDbConn,
                        quiet=False
                    )

                    transients = {}
                    for r in rows:
                        transients[r["transient_object_id"]] = (
                            r["ra_psf"], r["dec_psf"])

                    an = annotator(
                        log=self.log,
                        settings=self.settings,
                        gwid=g
                    )
                    transientNames, probs = an.annotate(transients)

                if thisDbConn in [self.atlasDbConn]:
                    sqlQuery = u"""
                        SELECT
                            a.transient_object_id, a.gracedb_id, t.ra, t.dec
                        FROM
                            atlas_diff_objects t,
                            tcs_gravity_event_annotations a
                        WHERE
                            a.transient_object_id = t.id
                                AND t.detection_list_id != 0
                                AND (a.map_name !=
                                     "%(mapName)s"  or a.map_name is null)
                                AND a.gracedb_id="%(g)s";
                    """ % locals()
                    rows = readquery(
                        log=self.log,
                        sqlQuery=sqlQuery,
                        dbConn=thisDbConn,
                        quiet=False
                    )

                    transients = {}
                    for r in rows:
                        transients[r["transient_object_id"]] = (
                            r["ra"], r["dec"])

                    an = annotator(
                        log=self.log,
                        settings=self.settings,
                        gwid=g
                    )
                    transientNames, probs = an.annotate(transients)

                if thisDbConn in [self.ligo_virgo_wavesDbConn]:

                    # PANSTARRS SKYCELLS
                    sqlQuery = u"""
                        SELECT 
                                a.skycell_id, a.gracedb_id, t.raDeg, t.decDeg
                            FROM
                                ps1_skycell_map t,
                                ps1_skycell_gravity_event_annotations a
                            WHERE
                                a.skycell_id = t.skycell_id
                                AND (a.map_name != "%(mapName)s"  or a.map_name is null)
                                AND a.gracedb_id="%(g)s"; 
                    """ % locals()
                    rows = readquery(
                        log=self.log,
                        sqlQuery=sqlQuery,
                        dbConn=thisDbConn,
                        quiet=False
                    )

                    exposures = {}
                    for r in rows:
                        exposures[r["skycell_id"]] = (
                            r["raDeg"], r["decDeg"])

                    stats = survey_footprint(
                        log=self.log,
                        settings=self.settings,
                        gwid=g
                    )
                    exposureIDs, probs = stats.annotate_exposures(
                        exposures=exposures,
                        pointingSide=0.4
                    )

                    dataList = []
                    for p, t in zip(probs, exposureIDs):
                        dataList.append({
                            "skycell_id": t,
                            "prob_coverage": p,
                            "gracedb_id": g,
                            "map_name": mapName
                        })
                    tableName = "ps1_skycell_gravity_event_annotations"

                    dataSet = list_of_dictionaries(
                        log=self.log,
                        listOfDictionaries=dataList,
                        reDatetime=re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}T')
                    )
                    # RECURSIVELY CREATE MISSING DIRECTORIES
                    if not os.path.exists("/tmp/mysqlinsert/%(db)s" % locals()):
                        os.makedirs("/tmp/mysqlinsert/%(db)s" % locals())
                    now = datetime.now()
                    now = now.strftime("%Y%m%dt%H%M%S%f")
                    mysqlData = dataSet.mysql(
                        tableName=tableName, filepath="/tmp/mysqlinsert/%(db)s/%(now)s.sql" % locals(), createStatement=False)

                    # ATLAS EXPOSURES
                    sqlQuery = u"""
                        SELECT 
                                atlas_object_id, gracedb_id, raDeg, decDeg
                            FROM
                                atlas_exposure_gravity_event_annotations
                            WHERE
                                (map_name != "%(mapName)s"  or map_name is null)
                                AND gracedb_id="%(g)s"; 
                    """ % locals()
                    rows = readquery(
                        log=self.log,
                        sqlQuery=sqlQuery,
                        dbConn=thisDbConn,
                        quiet=False
                    )

                    exposures = {}
                    for r in rows:
                        exposures[r["atlas_object_id"]] = (
                            r["raDeg"], r["decDeg"])

                    stats = survey_footprint(
                        log=self.log,
                        settings=self.settings,
                        gwid=g
                    )
                    exposureIDs, probs = stats.annotate_exposures(
                        exposures=exposures,
                        pointingSide=5.46
                    )

                    dataList = []
                    for p, t in zip(probs, exposureIDs):
                        dataList.append({
                            "atlas_object_id": t,
                            "prob_coverage": p,
                            "gracedb_id": g,
                            "map_name": mapName
                        })
                    tableName = "atlas_exposure_gravity_event_annotations"

                    dataSet = list_of_dictionaries(
                        log=self.log,
                        listOfDictionaries=dataList,
                        reDatetime=re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}T')
                    )
                    # RECURSIVELY CREATE MISSING DIRECTORIES
                    if not os.path.exists("/tmp/mysqlinsert/%(db)s" % locals()):
                        os.makedirs("/tmp/mysqlinsert/%(db)s" % locals())
                    now = datetime.now()
                    now = now.strftime("%Y%m%dt%H%M%S%f")
                    mysqlData = dataSet.mysql(
                        tableName=tableName, filepath="/tmp/mysqlinsert/%(db)s/%(now)s.sql" % locals(), createStatement=False)

                if thisDbConn not in [self.ligo_virgo_wavesDbConn]:
                    dataList = []
                    for p, t in zip(probs, transientNames):
                        dataList.append({
                            "transient_object_id": t,
                            "enclosing_contour": p,
                            "gracedb_id": g,
                            "map_name": mapName
                        })
                        tableName = "tcs_gravity_event_annotations"

                    dataSet = list_of_dictionaries(
                        log=self.log,
                        listOfDictionaries=dataList,
                        reDatetime=re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}T')
                    )
                    # RECURSIVELY CREATE MISSING DIRECTORIES
                    if not os.path.exists("/tmp/mysqlinsert/%(db)s" % locals()):
                        os.makedirs("/tmp/mysqlinsert/%(db)s" % locals())
                    now = datetime.now()
                    now = now.strftime("%Y%m%dt%H%M%S%f")
                    mysqlData = dataSet.mysql(
                        tableName=tableName, filepath="/tmp/mysqlinsert/%(db)s/%(now)s.sql" % locals(), createStatement=False)

        for db in dbDict.keys():
            directory_script_runner(
                log=self.log,
                pathToScriptDirectory="/tmp/mysqlinsert/%(db)s" % locals(),
                databaseName=self.settings["database settings"][db]["db"],
                loginPath=self.settings["database settings"][db]["loginPath"],
                waitForResult=True,
                successRule=False,
                failureRule=False
            )

        self.log.debug(
            'completed the ``update_gravity_event_annotations`` method')
        return None
def main(arguments=None):
    """
    *The main function used when ``find_atlas_exposure_containing_ssobject.py`` is run as a single script from the cl*
    """

    # SETUP VARIABLES
    # MAKE SURE HEALPIX SMALL ENOUGH TO MATCH FOOTPRINTS CORRECTLY
    nside = 1024
    pi = (4 * math.atan(1.0))
    DEG_TO_RAD_FACTOR = pi / 180.0
    RAD_TO_DEG_FACTOR = 180.0 / pi
    tileSide = 5.46

    i = 0
    outputList = []
    rsyncContent = []
    obscodes = {"02": "T05", "01": "T08"}

    # SETUP THE COMMAND-LINE UTIL SETTINGS
    su = tools(arguments=arguments,
               docString=__doc__,
               logLevel="WARNING",
               options_first=False,
               projectName=False)
    arguments, settings, log, dbConn = su.setup()

    # UNPACK REMAINING CL ARGUMENTS USING `EXEC` TO SETUP THE VARIABLE NAMES
    # AUTOMATICALLY
    for arg, val in arguments.iteritems():
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val, ))
        else:
            exec(varname + " = %s" % (val, ))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (
            varname,
            val,
        ))

    dbSettings = {
        'host': '127.0.0.1',
        'user': '******',
        'tunnel': {
            'remote ip': 'starbase.mp.qub.ac.uk',
            'remote datbase host': 'dormammu',
            'remote user': '******',
            'port': 5003
        },
        'password': '******',
        'db': 'atlas_moving_objects'
    }

    # SETUP DATABASE CONNECTIONS
    dbConn = database(log=log, dbSettings=dbSettings).connect()

    # GRAB THE EXPOSURE LISTING
    for expPrefix, obscode in obscodes.iteritems():
        exposureList = []
        mjds = []
        sqlQuery = "select * from atlas_exposures where expname like '%(expPrefix)s%%'" % locals(
        )
        connected = 0
        while connected == 0:
            try:
                rows = readquery(log=log,
                                 sqlQuery=sqlQuery,
                                 dbConn=dbConn,
                                 quiet=False)
                connected = 1
            except:
                # SETUP DATABASE CONNECTIONS
                dbConn = database(log=log, dbSettings=dbSettings).connect()
                print "Can't connect to DB - try again"
                time.sleep(2)

        t = len(rows)

        print "There are %(t)s '%(expPrefix)s' exposures to check - hang tight" % locals(
        )

        for row in rows:
            row["mjd"] = row["mjd"] + row["exp_time"] / (2. * 60 * 60 * 24)
            exposureList.append(row)
            mjds.append(row["mjd"])

        results = []

        batchSize = 500
        total = len(mjds[1:])
        batches = int(total / batchSize)

        start = 0
        end = 0
        theseBatches = []
        for i in range(batches + 1):
            end = end + batchSize
            start = i * batchSize
            thisBatch = mjds[start:end]
            theseBatches.append(thisBatch)

        i = 0
        totalLen = len(theseBatches)
        index = 0
        for batch in theseBatches:
            i += 1

            if index > 1:
                # Cursor up one line and clear line
                sys.stdout.write("\x1b[1A\x1b[2K")
            print "Requesting batch %(i)04d/%(totalLen)s from JPL" % locals()
            index += 1

            eph = jpl_horizons_ephemeris(log=log,
                                         objectId=[ssobject],
                                         mjd=batch,
                                         obscode=obscode,
                                         verbose=False)

            for b in batch:
                match = 0
                # print b
                for row in eph:
                    if math.floor(row["mjd"] * 10000 +
                                  0.01) == math.floor(b * 10000 + 0.01):
                        match = 1
                        results.append(row)
                if match == 0:
                    for row in eph:
                        if math.floor(row["mjd"] * 10000) == math.floor(b *
                                                                        10000):
                            match = 1
                            results.append(row)
                if match == 0:
                    results.append(None)
                    this = math.floor(b * 10000 + 0.01)
                    print "MJD %(b)s (%(this)s) is missing" % locals()
                    for row in eph:
                        print math.floor(row["mjd"] * 10000 + 0.00001)
                    print ""

        print "Finding the exopsures containing the SS object"

        for e, r in zip(exposureList, results):
            # CALCULATE SEPARATION IN ARCSEC
            if not r:
                continue

            calculator = separations(
                log=log,
                ra1=r["ra_deg"],
                dec1=r["dec_deg"],
                ra2=e["raDeg"],
                dec2=e["decDeg"],
            )
            angularSeparation, north, east = calculator.get()
            sep = float(angularSeparation) / 3600.
            if sep < 5.:

                # THE SKY-LOCATION AS A HEALPIXEL ID
                pinpoint = hp.ang2pix(nside,
                                      theta=r["ra_deg"],
                                      phi=r["dec_deg"],
                                      lonlat=True)

                decCorners = (e["decDeg"] - tileSide / 2,
                              e["decDeg"] + tileSide / 2)
                corners = []
                for d in decCorners:
                    if d > 90.:
                        d = 180. - d
                    elif d < -90.:
                        d = -180 - d
                    raCorners = (
                        e["raDeg"] -
                        (tileSide / 2) / np.cos(d * DEG_TO_RAD_FACTOR),
                        e["raDeg"] +
                        (tileSide / 2) / np.cos(d * DEG_TO_RAD_FACTOR))
                    for rc in raCorners:
                        if rc > 360.:
                            rc = 720. - rc
                        elif rc < 0.:
                            rc = 360. + rc
                        corners.append(hp.ang2vec(rc, d, lonlat=True))

                # NEAR THE POLES RETURN SQUARE INTO TRIANGE - ALMOST DEGENERATE
                pole = False
                for d in decCorners:
                    if d > 87.0 or d < -87.0:
                        pole = True

                if pole == True:
                    corners = corners[1:]
                else:
                    # FLIP CORNERS 3 & 4 SO HEALPY UNDERSTANDS POLYGON SHAPE
                    corners = [corners[0], corners[1], corners[3], corners[2]]

                # RETURN HEALPIXELS IN EXPOSURE AREA
                expPixels = hp.query_polygon(nside, np.array(corners))
                if pinpoint in expPixels:
                    outputList.append({
                        "obs": e["expname"],
                        "mjd": e["mjd"],
                        "raDeg": r["ra_deg"],
                        "decDeg": r["dec_deg"],
                        "mag": r["apparent_mag"],
                        "sep": sep
                    })
                    thisMjd = int(math.floor(e["mjd"]))
                    expname = e["expname"]
                    ssobject_ = ssobject.replace(" ", "_")
                    raStr = r["ra_deg"]
                    decStr = r["dec_deg"]
                    rsyncContent.append(
                        "rsync -av [email protected]:/atlas/red/%(expPrefix)sa/%(thisMjd)s/%(expname)s.fits.fz %(ssobject_)s_atlas_exposures/"
                        % locals())
                    rsyncContent.append(
                        "touch %(ssobject_)s_atlas_exposures/%(expname)s.location"
                        % locals())
                    rsyncContent.append(
                        'echo "_RAJ2000,_DEJ2000,OBJECT\n%(raStr)s,%(decStr)s,%(ssobject)s" > %(ssobject_)s_atlas_exposures/%(expname)s.location'
                        % locals())

    dataSet = list_of_dictionaries(
        log=log,
        listOfDictionaries=outputList,
        # use re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}T') for mysql
        reDatetime=False)

    ssobject = ssobject.replace(" ", "_")
    csvData = dataSet.csv(
        filepath="./%(ssobject)s_atlas_exposure_matches.csv" % locals())

    rsyncContent = ("\n").join(rsyncContent)
    pathToWriteFile = "./%(ssobject)s_atlas_exposure_rsync.sh" % locals()
    try:
        log.debug("attempting to open the file %s" % (pathToWriteFile, ))
        writeFile = codecs.open(pathToWriteFile, encoding='utf-8', mode='w')
    except IOError, e:
        message = 'could not open the file %s' % (pathToWriteFile, )
        log.critical(message)
        raise IOError(message)
示例#20
0
def main(arguments=None):
    """
    *The main function used when `cl_utils.py` is run as a single script from the cl, or when installed as a cl command*
    """
    # setup the command-line util settings
    su = tools(arguments=arguments,
               docString=__doc__,
               logLevel="WARNING",
               options_first=False,
               distributionName="qub-sherlock",
               projectName="sherlock",
               defaultSettingsFile=True)
    arguments, settings, log, dbConn = su.setup()

    # tab completion for raw_input
    readline.set_completer_delims(' \t\n;')
    readline.parse_and_bind("tab: complete")
    readline.set_completer(tab_complete)

    # UNPACK REMAINING CL ARGUMENTS USING `EXEC` TO SETUP THE VARIABLE NAMES
    # AUTOMATICALLY
    a = {}
    for arg, val in list(arguments.items()):
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
        a[varname] = val
        if arg == "--dbConn":
            dbConn = val
            a["dbConn"] = val
        log.debug('%s = %s' % (
            varname,
            val,
        ))

    ## START LOGGING ##
    startTime = times.get_now_sql_datetime()
    log.info('--- STARTING TO RUN THE cl_utils.py AT %s' % (startTime, ))

    # set options interactively if user requests
    if "interactiveFlag" in a and a["interactiveFlag"]:

        # load previous settings
        moduleDirectory = os.path.dirname(__file__) + "/resources"
        pathToPickleFile = "%(moduleDirectory)s/previousSettings.p" % locals()
        try:
            with open(pathToPickleFile):
                pass
            previousSettingsExist = True
        except:
            previousSettingsExist = False
        previousSettings = {}
        if previousSettingsExist:
            previousSettings = pickle.load(open(pathToPickleFile, "rb"))

        # x-raw-input
        # x-boolean-raw-input
        # x-raw-input-with-default-value-from-previous-settings

        # save the most recently used requests
        pickleMeObjects = []
        pickleMe = {}
        theseLocals = locals()
        for k in pickleMeObjects:
            pickleMe[k] = theseLocals[k]
        pickle.dump(pickleMe, open(pathToPickleFile, "wb"))

    if a["init"]:
        from os.path import expanduser
        home = expanduser("~")
        filepath = home + "/.config/sherlock/sherlock.yaml"
        try:
            cmd = """open %(filepath)s""" % locals()
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        except:
            pass
        try:
            cmd = """start %(filepath)s""" % locals()
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        except:
            pass
        return

    init = a["init"]
    match = a["match"]
    dbmatch = a["dbmatch"]
    clean = a["clean"]
    wiki = a["wiki"]
    iimport = a["import"]
    ned = a["ned"]
    cat = a["cat"]
    stream = a["stream"]
    info = a["info"]
    ra = a["ra"]
    dec = a["dec"]
    radiusArcsec = a["radiusArcsec"]
    cat_name = a["cat_name"]
    stream_name = a["stream_name"]
    skipNedUpdateFlag = a["skipNedUpdateFlag"]
    skipMagUpdateFlag = a["skipMagUpdateFlag"]
    settingsFlag = a["settingsFlag"]
    verboseFlag = a["verboseFlag"]
    updateFlag = a["updateFlag"]

    # CALL FUNCTIONS/OBJECTS
    if match or dbmatch:
        if verboseFlag:
            verbose = 2
        else:
            verbose = 1

        if skipNedUpdateFlag:
            updateNed = False
        else:
            updateNed = True

        if skipMagUpdateFlag:
            updatePeakMags = False
        else:
            updatePeakMags = True

        classifier = transient_classifier.transient_classifier(
            log=log,
            settings=settings,
            ra=ra,
            dec=dec,
            name=False,
            verbose=verbose,
            update=updateFlag,
            updateNed=updateNed,
            updatePeakMags=updatePeakMags)
        classifier.classify()

    if clean:
        cleaner = database_cleaner(log=log, settings=settings)
        cleaner.clean()
    if wiki:
        updateWiki = update_wiki_pages(log=log, settings=settings)
        updateWiki.update()

    if iimport and ned:
        ned = nedStreamImporter(log=log,
                                settings=settings,
                                coordinateList=["%(ra)s %(dec)s" % locals()],
                                radiusArcsec=radiusArcsec)
        ned.ingest()
    if iimport and cat:

        if cat_name == "veron":
            catalogue = veronImporter(log=log,
                                      settings=settings,
                                      pathToDataFile=pathToDataFile,
                                      version=cat_version,
                                      catalogueName=cat_name)
            catalogue.ingest()

        if "ned_d" in cat_name:
            catalogue = nedImporter(log=log,
                                    settings=settings,
                                    pathToDataFile=pathToDataFile,
                                    version=cat_version,
                                    catalogueName=cat_name)
            catalogue.ingest()
    if iimport and stream:
        if "ifs" in stream_name:
            stream = ifsImporter(log=log, settings=settings)
            stream.ingest()
    if not init and not match and not clean and not wiki and not iimport and ra:

        classifier = transient_classifier.transient_classifier(
            log=log,
            settings=settings,
            ra=ra,
            dec=dec,
            name=False,
            verbose=verboseFlag)
        classifier.classify()

    if info:
        print("sherlock-catalogues")
        wiki = update_wiki_pages(log=log, settings=settings)
        table = list(wiki._get_table_infos(trimmed=True))

        dataSet = list_of_dictionaries(log=log, listOfDictionaries=table)
        tableData = dataSet.reST(filepath=None)
        print(tableData)
        print()

        print("Crossmatch Streams")
        table = list(wiki._get_stream_view_infos(trimmed=True))
        dataSet = list_of_dictionaries(log=log, listOfDictionaries=table)
        tableData = dataSet.reST(filepath=None)
        print(tableData)
        print()

        print("Views on Catalogues and Streams")

        table = list(wiki._get_view_infos(trimmed=True))
        dataSet = list_of_dictionaries(log=log, listOfDictionaries=table)
        tableData = dataSet.reST(filepath=None)
        print(tableData)

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = times.get_now_sql_datetime()
    runningTime = times.calculate_time_difference(startTime, endTime)
    log.info(
        '-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' % (
            endTime,
            runningTime,
        ))

    return