示例#1
0
    def _add_htmids_to_database_table(self):
        """*Add HTMIDs to database table once all the data has been imported (HTM Levels 10,13,16)*

        **Usage:**

            .. code-block:: python 

                self._add_htmids_to_database_table()
        """
        self.log.debug('starting the ``add_htmids_to_database_table`` method')

        tableName = self.dbTableName

        self.log.info("Adding HTMIds to %(tableName)s" % locals())

        add_htm_ids_to_mysql_database_table(
            raColName=self.raColName,
            declColName=self.declColName,
            tableName=self.dbTableName,
            dbConn=self.cataloguesDbConn,
            log=self.log,
            primaryIdColumnName=self.primaryIdColumnName)

        self.log.debug('completed the ``add_htmids_to_database_table`` method')
        return None
示例#2
0
    def test_add_htm_ids_to_mysql_database_table_function(self):

        from HMpTy.mysql import add_htm_ids_to_mysql_database_table
        add_htm_ids_to_mysql_database_table(raColName="raDeg",
                                            declColName="decDeg",
                                            tableName="tcs_cat_ned_d_v10_2_0",
                                            dbConn=dbConn,
                                            log=log,
                                            primaryIdColumnName="primaryId")
    def test_add_htm_ids_to_mysql_database_table_function(self):

        from HMpTy.mysql import add_htm_ids_to_mysql_database_table
        add_htm_ids_to_mysql_database_table(
            raColName="raDeg",
            declColName="decDeg",
            tableName="tcs_cat_ned_d_v10_2_0",
            dbConn=dbConn,
            log=log,
            primaryIdColumnName="primaryId"
        )
示例#4
0
    def _update_htm_columns(self):
        """*update the htm columns in the transientSummaries table so we can crossmatch if needed*
        """
        self.log.debug('starting the ``_update_htm_columns`` method')

        add_htm_ids_to_mysql_database_table(
            raColName="raDeg",
            declColName="decDeg",
            tableName="transientBucketSummaries",
            dbConn=self.dbConn,
            log=self.log,
            primaryIdColumnName="transientBucketId",
            dbSettings=self.settings["database settings"])

        self.log.debug('completed the ``_update_htm_columns`` method')
        return None
示例#5
0
def main(arguments=None):
    """
    *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
    """
    # setup the command-line util settings
    su = tools(arguments=arguments,
               docString=__doc__,
               logLevel="DEBUG",
               options_first=False,
               projectName="HMpTy")
    arguments, settings, log, dbConn = su.setup()

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.items():
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
        if isinstance(val, str) or isinstance(val, str):
            exec(varname + " = '%s'" % (val, ))
        else:
            exec(varname + " = %s" % (val, ))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (
            varname,
            val,
        ))

    ## START LOGGING ##
    startTime = times.get_now_sql_datetime()
    log.info('--- STARTING TO RUN THE cl_utils.py AT %s' % (startTime, ))

    # CALL FUNCTIONS/OBJECTS
    if index:
        add_htm_ids_to_mysql_database_table(raColName=raCol,
                                            declColName=decCol,
                                            tableName=tableName,
                                            dbConn=dbConn,
                                            log=log,
                                            primaryIdColumnName=primaryIdCol,
                                            reindex=forceFlag)

    if search:
        cs = conesearch(log=log,
                        dbConn=dbConn,
                        tableName=tableName,
                        columns=False,
                        ra=ra,
                        dec=dec,
                        radiusArcsec=float(radius),
                        separations=True,
                        distinct=False,
                        sqlWhere=False)
        matchIndies, matches = cs.search()
        if not renderFlag:
            print(matches.table())
        elif renderFlag == "json":
            print(matches.json())
        elif renderFlag == "csv":
            print(matches.csv())
        elif renderFlag == "yaml":
            print(matches.yaml())
        elif renderFlag == "md":
            print(matches.markdown())
        elif renderFlag == "table":
            print(matches.markdown())
        elif renderFlag == "mysql":
            print(matches.mysql(tableName=resultsTable))

    if level:
        from HMpTy import HTM
        mesh = HTM(depth=int(level), log=log)

        htmids = mesh.lookup_id(ra, dec)
        print(htmids[0])

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = times.get_now_sql_datetime()
    runningTime = times.calculate_time_difference(startTime, endTime)
    log.info(
        '-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' % (
            endTime,
            runningTime,
        ))

    return
示例#6
0
    def _update_ned_query_history(self):
        """*Update the database helper table to give details of the ned cone searches performed*

        *Usage:*

            .. code-block:: python

                stream._update_ned_query_history()
        """
        self.log.debug('starting the ``_update_ned_query_history`` method')

        myPid = self.myPid

        # ASTROCALC UNIT CONVERTER OBJECT
        converter = unit_conversion(log=self.log)

        # UPDATE THE DATABASE HELPER TABLE TO GIVE DETAILS OF THE NED CONE
        # SEARCHES PERFORMED
        dataList = []
        for i, coord in enumerate(self.coordinateList):
            if isinstance(coord, str):
                ra = coord.split(" ")[0]
                dec = coord.split(" ")[1]
            elif isinstance(coord, tuple) or isinstance(coord, list):
                ra = coord[0]
                dec = coord[1]

            dataList.append({
                "raDeg": ra,
                "decDeg": dec,
                "arcsecRadius": self.radiusArcsec
            })

        if len(dataList) == 0:
            return None

        # CREATE TABLE IF NOT EXIST
        createStatement = """CREATE TABLE IF NOT EXISTS `tcs_helper_ned_query_history` (
  `primaryId` bigint(20) NOT NULL AUTO_INCREMENT,
  `raDeg` double DEFAULT NULL,
  `decDeg` double DEFAULT NULL,
  `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
  `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
  `updated` varchar(45) DEFAULT '0',
  `arcsecRadius` int(11) DEFAULT NULL,
  `dateQueried` datetime DEFAULT CURRENT_TIMESTAMP,
  `htm16ID` bigint(20) DEFAULT NULL,
  `htm13ID` int(11) DEFAULT NULL,
  `htm10ID` int(11) DEFAULT NULL,
  PRIMARY KEY (`primaryId`),
  KEY `idx_htm16ID` (`htm16ID`),
  KEY `dateQueried` (`dateQueried`),
  KEY `dateHtm16` (`dateQueried`,`htm16ID`),
  KEY `idx_htm10ID` (`htm10ID`),
  KEY `idx_htm13ID` (`htm13ID`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
        """
        writequery(log=self.log,
                   sqlQuery=createStatement,
                   dbConn=self.cataloguesDbConn)

        # USE dbSettings TO ACTIVATE MULTIPROCESSING
        insert_list_of_dictionaries_into_database_tables(
            dbConn=self.cataloguesDbConn,
            log=self.log,
            dictList=dataList,
            dbTableName="tcs_helper_ned_query_history",
            uniqueKeyList=[],
            dateModified=True,
            batchSize=10000,
            replace=True,
            dbSettings=self.settings["database settings"]["static catalogues"])

        # INDEX THE TABLE FOR LATER SEARCHES
        add_htm_ids_to_mysql_database_table(
            raColName="raDeg",
            declColName="decDeg",
            tableName="tcs_helper_ned_query_history",
            dbConn=self.cataloguesDbConn,
            log=self.log,
            primaryIdColumnName="primaryId")

        self.log.debug('completed the ``_update_ned_query_history`` method')
        return None
    def populate_ps1_subdisk_table(
            self):
        """
        *Calculate 49 subdisks for each of the PS1 pointings (used to query NED in manageable sized batches) and add them to the ``ps1_pointings_subdisks`` table of the database*

        .. image:: http://i.imgur.com/y3G0aax.png
            :width: 600 px

        **Return:**
            - None

         **Usage:**

            .. code-block:: python

                # SPLIT PS1 POINTINGS INTO SUB-DISKS AND ADD TO LV DATABASE
                from breaker import update_ps1_atlas_footprint_tables
                dbUpdater = update_ps1_atlas_footprint_tables(
                    log=log,
                    settings=settings
                )
                dbUpdater.populate_ps1_subdisk_table()
        """
        self.log.debug(
            'completed the ````populate_ps1_subdisk_table`` method')

        # SELECT THE PS1 POINTINGS NEEDING SUBDISKS CALCULATED
        sqlQuery = u"""
            select ps1_exp_id, raDeg, decDeg from ps1_pointings where subdisks_calculated = 0 and raDeg is not null
        """ % locals()

        rows = readquery(
            log=self.log,
            sqlQuery=sqlQuery,
            dbConn=self.ligo_virgo_wavesDbConn,
            quiet=False
        )
        ps1PointNum = len(rows)

        # CALCULATE ALL OF THE SUBDISKS
        inserts = []
        expIds = []
        for row in rows:
            subDiskCoordinates = self._get_subdisk_parameters(
                row["raDeg"], row["decDeg"], 1.5)
            ps1_exp_id = row["ps1_exp_id"]
            expIds.append(ps1_exp_id)
            for i, c in enumerate(subDiskCoordinates):
                insert = {
                    "raDeg": c[0],
                    "decDeg": c[1],
                    "ps1_exp_id": ps1_exp_id,
                    "circleId": i + 1
                }
                inserts.append(insert)

        # ADD SUBDISKS TO DATABASE
        if len(inserts):

            insert_list_of_dictionaries_into_database_tables(
                dbConn=self.ligo_virgo_wavesDbConn,
                log=self.log,
                dictList=inserts,
                dbTableName="ps1_pointings_subdisks",
                uniqueKeyList=["ps1_exp_id", "circleId"],
                dateModified=False,
                batchSize=2500,
                replace=True
            )

            # UPDATE POINTINGS TABLE TO INDICATE SUBDISKS HAVE BEEN CALCULATED
            theseIds = ",".join(expIds)
            sqlQuery = u"""
                update ps1_pointings set subdisks_calculated = 1 where ps1_exp_id in (%(theseIds)s)
            """ % locals()
            writequery(
                log=self.log,
                sqlQuery=sqlQuery,
                dbConn=self.ligo_virgo_wavesDbConn,
            )

        if ps1PointNum == 0:
            print "All PS1 pointings have been split into their 49 sub-disks" % locals()
        else:
            print "%(ps1PointNum)s new PS1 pointings have been split into 49 sub-disks - parameters added to the `ps1_pointings_subdisks` database table" % locals()

        # APPEND HTMIDs TO THE ps1_pointings_subdisks TABLE
        add_htm_ids_to_mysql_database_table(
            raColName="raDeg",
            declColName="decDeg",
            tableName="ps1_pointings_subdisks",
            dbConn=self.ligo_virgo_wavesDbConn,
            log=self.log,
            primaryIdColumnName="primaryId"
        )

        self.log.debug(
            'completed the ``populate_ps1_subdisk_table`` method')
        return None
    def import_new_atlas_pointings(
            self,
            recent=False):
        """
        *Import any new ATLAS GW pointings from the atlas3 database into the ``atlas_pointings`` table of the Ligo-Virgo Waves database*

        **Key Arguments:**
            - ``recent`` -- only sync the most recent 2 months of data (speeds things up)

        **Return:**
            - None

         **Usage:**

            .. code-block:: python

                # IMPORT NEW ATLAS POINTINGS FROM ATLAS DATABASE INTO
                # LIGO-VIRGO WAVES DATABASE
                from breaker import update_ps1_atlas_footprint_tables
                dbUpdater = update_ps1_atlas_footprint_tables(
                    log=log,
                    settings=settings
                )
                dbUpdater.import_new_atlas_pointings()
        """
        self.log.debug('starting the ``import_new_atlas_pointings`` method')

        if recent:
            mjd = mjdnow(
                log=self.log
            ).get_mjd()
            recent = mjd - 62
            recent = " mjd_obs > %(recent)s " % locals()
        else:
            recent = "1=1"

        # SELECT ALL OF THE POINTING INFO REQUIRED FROM THE ps1gw DATABASE
        sqlQuery = u"""
            SELECT
                `dec` as `decDeg`,
                `exptime` as `exp_time`,
                `filter`,
                `mjd_obs` as `mjd`,
                `ra` as `raDeg`,
                if(mjd_obs<57855.0,mag5sig-0.75,mag5sig) as `limiting_magnitude`,
                `object` as `atlas_object_id` from atlas_metadata where %(recent)s and object like "TA%%" order by mjd_obs desc;
        """ % locals()
        rows = readquery(
            log=self.log,
            sqlQuery=sqlQuery,
            dbConn=self.atlasDbConn,
            quiet=False
        )

        # TIDY RESULTS BEFORE IMPORT
        entries = list(rows)

        # ADD THE NEW RESULTS TO THE ps1_pointings TABLE
        insert_list_of_dictionaries_into_database_tables(
            dbConn=self.ligo_virgo_wavesDbConn,
            log=self.log,
            dictList=entries,
            dbTableName="atlas_pointings",
            uniqueKeyList=["raDeg", "decDeg", "mjd"],
            dateModified=False,
            batchSize=2500,
            replace=True
        )

        # APPEND HTMIDs TO THE ps1_pointings TABLE
        add_htm_ids_to_mysql_database_table(
            raColName="raDeg",
            declColName="decDeg",
            tableName="atlas_pointings",
            dbConn=self.ligo_virgo_wavesDbConn,
            log=self.log,
            primaryIdColumnName="primaryId"
        )

        print "ATLAS pointings synced between `atlas_metadata` and `altas_pointings` database tables"

        self.log.debug('completed the ``import_new_atlas_pointings`` method')
        return None
示例#9
0
def main(arguments=None):
    """
    *The main function used when `cl_utils.py` is run as a single script from the cl, or when installed as a cl command*
    """
    # setup the command-line util settings
    su = tools(arguments=arguments,
               docString=__doc__,
               logLevel="WARNING",
               options_first=False,
               projectName="HMpTy",
               defaultSettingsFile=True)
    arguments, settings, log, dbConn = su.setup()

    # tab completion for raw_input
    readline.set_completer_delims(' \t\n;')
    readline.parse_and_bind("tab: complete")
    readline.set_completer(tab_complete)

    # UNPACK REMAINING CL ARGUMENTS USING `EXEC` TO SETUP THE VARIABLE NAMES
    # AUTOMATICALLY
    a = {}
    for arg, val in list(arguments.items()):
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
        a[varname] = val
        if arg == "--dbConn":
            dbConn = val
            a["dbConn"] = val
        log.debug('%s = %s' % (
            varname,
            val,
        ))

    hostFlag = a["hostFlag"]
    userFlag = a["userFlag"]
    passwdFlag = a["passwdFlag"]
    dbNameFlag = a["dbNameFlag"]
    tableName = a["tableName"]
    index = a["index"]
    htmid = a["htmid"]
    primaryIdCol = a["primaryIdCol"]
    raCol = a["raCol"]
    decCol = a["decCol"]
    ra = a["ra"]
    dec = a["dec"]
    radius = a["radius"]
    level = a["level"]
    forceFlag = a["forceFlag"]
    renderFlag = a["renderFlag"]
    search = a["search"]

    if "database settings" in settings:
        dbSettings = settings["database settings"]
    else:
        dbSettings = False

    ## START LOGGING ##
    startTime = times.get_now_sql_datetime()
    log.info('--- STARTING TO RUN THE cl_utils.py AT %s' % (startTime, ))

    # set options interactively if user requests
    if "interactiveFlag" in a and a["interactiveFlag"]:

        # load previous settings
        moduleDirectory = os.path.dirname(__file__) + "/resources"
        pathToPickleFile = "%(moduleDirectory)s/previousSettings.p" % locals()
        try:
            with open(pathToPickleFile):
                pass
            previousSettingsExist = True
        except:
            previousSettingsExist = False
        previousSettings = {}
        if previousSettingsExist:
            previousSettings = pickle.load(open(pathToPickleFile, "rb"))

        # x-raw-input
        # x-boolean-raw-input
        # x-raw-input-with-default-value-from-previous-settings

        # save the most recently used requests
        pickleMeObjects = []
        pickleMe = {}
        theseLocals = locals()
        for k in pickleMeObjects:
            pickleMe[k] = theseLocals[k]
        pickle.dump(pickleMe, open(pathToPickleFile, "wb"))

    if a["init"]:
        from os.path import expanduser
        home = expanduser("~")
        filepath = home + "/.config/HMpTy/HMpTy.yaml"
        try:
            cmd = """open %(filepath)s""" % locals()
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        except:
            pass
        try:
            cmd = """start %(filepath)s""" % locals()
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
        except:
            pass
        return

    # CALL FUNCTIONS/OBJECTS
    if index:
        add_htm_ids_to_mysql_database_table(raColName=raCol,
                                            declColName=decCol,
                                            tableName=tableName,
                                            dbConn=dbConn,
                                            log=log,
                                            primaryIdColumnName=primaryIdCol,
                                            reindex=forceFlag,
                                            dbSettings=dbSettings)

    if search:
        cs = conesearch(log=log,
                        dbConn=dbConn,
                        tableName=tableName,
                        columns=False,
                        ra=ra,
                        dec=dec,
                        radiusArcsec=float(radius),
                        separations=True,
                        distinct=False,
                        sqlWhere=False)
        matchIndies, matches = cs.search()
        if not renderFlag:
            print(matches.table())
        elif renderFlag == "json":
            print(matches.json())
        elif renderFlag == "csv":
            print(matches.csv())
        elif renderFlag == "yaml":
            print(matches.yaml())
        elif renderFlag == "md":
            print(matches.markdown())
        elif renderFlag == "table":
            print(matches.markdown())
        elif renderFlag == "mysql":
            print(matches.mysql(tableName=resultsTable))

    if level:
        from HMpTy import HTM
        mesh = HTM(depth=int(level), log=log)

        htmids = mesh.lookup_id(ra, dec)
        print(htmids[0])

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = times.get_now_sql_datetime()
    runningTime = times.calculate_time_difference(startTime, endTime)
    log.info(
        '-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' % (
            endTime,
            runningTime,
        ))

    return
示例#10
0
def main(arguments=None):
    """
    *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
    """
    # setup the command-line util settings
    su = tools(
        arguments=arguments,
        docString=__doc__,
        logLevel="DEBUG",
        options_first=False,
        projectName="HMpTy"
    )
    arguments, settings, log, dbConn = su.setup()

    # unpack remaining cl arguments using `exec` to setup the variable names
    # automatically
    for arg, val in arguments.iteritems():
        if arg[0] == "-":
            varname = arg.replace("-", "") + "Flag"
        else:
            varname = arg.replace("<", "").replace(">", "")
        if isinstance(val, str) or isinstance(val, unicode):
            exec(varname + " = '%s'" % (val,))
        else:
            exec(varname + " = %s" % (val,))
        if arg == "--dbConn":
            dbConn = val
        log.debug('%s = %s' % (varname, val,))

    ## START LOGGING ##
    startTime = times.get_now_sql_datetime()
    log.info(
        '--- STARTING TO RUN THE cl_utils.py AT %s' %
        (startTime,))

    # CALL FUNCTIONS/OBJECTS
    if index:
        add_htm_ids_to_mysql_database_table(
            raColName=raCol,
            declColName=decCol,
            tableName=tableName,
            dbConn=dbConn,
            log=log,
            primaryIdColumnName=primaryIdCol,
            reindex=forceFlag
        )

    if search:
        cs = conesearch(
            log=log,
            dbConn=dbConn,
            tableName=tableName,
            columns=False,
            ra=ra,
            dec=dec,
            radiusArcsec=float(radius),
            separations=True,
            distinct=False,
            sqlWhere=False
        )
        matchIndies, matches = cs.search()
        if not renderFlag:
            print matches.table()
        elif renderFlag == "json":
            print matches.json()
        elif renderFlag == "csv":
            print matches.csv()
        elif renderFlag == "yaml":
            print matches.yaml()
        elif renderFlag == "md":
            print matches.markdown()
        elif renderFlag == "table":
            print matches.markdown()
        elif renderFlag == "mysql":
            print matches.mysql(tableName=resultsTable)

    if "dbConn" in locals() and dbConn:
        dbConn.commit()
        dbConn.close()
    ## FINISH LOGGING ##
    endTime = times.get_now_sql_datetime()
    runningTime = times.calculate_time_difference(startTime, endTime)
    log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
             (endTime, runningTime, ))

    return
示例#11
0
    def import_new_atlas_pointings(self, recent=False):
        """
        *Import any new ATLAS pointings from the atlas3/atlas4 databases into the ``atlas_exposures`` table of the Atlas Movers database*

        **Key Arguments:**
            - ``recent`` -- only sync the most recent 2 weeks of data (speeds things up)

        **Return:**
            - None

         **Usage:**

            .. code-block:: python

                from rockAtlas.bookkeeping import bookkeeper
                bk = bookkeeper(
                    log=log,
                    settings=settings
                )
                bk.import_new_atlas_pointings()
        """
        self.log.info('starting the ``import_new_atlas_pointings`` method')

        if recent:
            mjd = mjdnow(log=self.log).get_mjd()
            recent = mjd - 14
            recent = " mjd_obs > %(recent)s " % locals()
        else:
            recent = "1=1"

        # SELECT ALL OF THE POINTING INFO REQUIRED FROM THE ATLAS3 DATABASE
        sqlQuery = u"""
            SELECT
                `expname`,
                `dec` as `decDeg`,
                `exptime` as `exp_time`,
                `filter`,
                `mjd_obs` as `mjd`,
                `ra` as `raDeg`,
                if(mjd_obs<57855.0,mag5sig-0.75,mag5sig) as `limiting_magnitude`,
                `object` as `atlas_object_id` from atlas_metadata where %(recent)s and object like "TA%%" order by mjd_obs desc;
        """ % locals()
        rows = readquery(log=self.log,
                         sqlQuery=sqlQuery,
                         dbConn=self.atlas3DbConn,
                         quiet=False)

        dbSettings = self.settings["database settings"]["atlasMovers"]

        # TIDY RESULTS BEFORE IMPORT
        entries = list(rows)

        if len(rows) > 0:
            # ADD THE NEW RESULTS TO THE atlas_exposures TABLE
            insert_list_of_dictionaries_into_database_tables(
                dbConn=self.atlasMoversDBConn,
                log=self.log,
                dictList=entries,
                dbTableName="atlas_exposures",
                uniqueKeyList=["expname"],
                dateModified=False,
                batchSize=10000,
                replace=True,
                dbSettings=dbSettings)

        recent = recent.replace("mjd_obs", "mjd")

        # SELECT ALL OF THE POINTING INFO REQUIRED FROM THE ATLAS4 DATABASE
        sqlQuery = u"""
            SELECT
                `obs` as `expname`,
                `dec` as `decDeg`,
                `texp` as `exp_time`,
                `filt` as `filter`,
                `mjd`,
                `ra` as `raDeg`,
                `mag5sig` as `limiting_magnitude`,
                `obj` as `atlas_object_id` from atlas_metadataddc where %(recent)s and obj like "TA%%" order by mjd desc;
        """ % locals()
        rows = readquery(log=self.log,
                         sqlQuery=sqlQuery,
                         dbConn=self.atlas4DbConn,
                         quiet=False)

        # TIDY RESULTS BEFORE IMPORT
        entries = list(rows)

        if len(rows) > 0:
            # ADD THE NEW RESULTS TO THE atlas_exposures TABLE
            insert_list_of_dictionaries_into_database_tables(
                dbConn=self.atlasMoversDBConn,
                log=self.log,
                dictList=entries,
                dbTableName="atlas_exposures",
                uniqueKeyList=["expname"],
                dateModified=False,
                batchSize=10000,
                replace=True,
                dbSettings=dbSettings)

        # APPEND HTMIDs TO THE atlas_exposures TABLE
        add_htm_ids_to_mysql_database_table(raColName="raDeg",
                                            declColName="decDeg",
                                            tableName="atlas_exposures",
                                            dbConn=self.atlasMoversDBConn,
                                            log=self.log,
                                            primaryIdColumnName="primaryId")

        print "ATLAS pointings synced between ATLAS3/ATLAS4 databases and the ATLAS Movers `atlas_exposures` database table"

        self.log.info('completed the ``import_new_atlas_pointings`` method')
        return None
示例#12
0
    def insert_into_transientBucket(self,
                                    importUnmatched=True,
                                    updateTransientSummaries=True):
        """*insert objects/detections from the feeder survey table into the transientbucket*

        **Key Arguments**

        - ``importUnmatched`` -- import unmatched (new) transients into the marshall (not wanted in some circumstances)
        - ``updateTransientSummaries`` -- update the transient summaries and lightcurves? Can be True or False, or alternatively a specific transientBucketId


        This method aims to reduce crossmatching and load on the database by:

        1. automatically assign the transientbucket id to feeder survey detections where the object name is found in the transientbukcet (no spatial crossmatch required). Copy matched feeder survey rows to the transientbucket.
        2. crossmatch remaining unique, unmatched sources in feeder survey with sources in the transientbucket. Add associated transientBucketIds to matched feeder survey sources. Copy matched feeder survey rows to the transientbucket.
        3. assign a new transientbucketid to any feeder survey source not matched in steps 1 & 2. Copy these unmatched feeder survey rows to the transientbucket as new transient detections.

        **Return**

        - None


        **Usage**

        ```python
        ingester.insert_into_transientBucket()
        ```

        """
        self.log.debug(
            'starting the ``crossmatch_with_transientBucket`` method')

        fsTableName = self.fsTableName

        # 1. automatically assign the transientbucket id to feeder survey
        # detections where the object name is found in the transientbukcet (no
        # spatial crossmatch required). Copy matched feeder survey rows to the
        # transientbucket.
        self._feeder_survey_transientbucket_name_match_and_import()

        # 2. crossmatch remaining unique, unmatched sources in feeder survey
        # with sources in the transientbucket. Add associated
        # transientBucketIds to matched feeder survey sources. Copy matched
        # feeder survey rows to the transientbucket.
        from HMpTy.mysql import add_htm_ids_to_mysql_database_table
        add_htm_ids_to_mysql_database_table(
            raColName="raDeg",
            declColName="decDeg",
            tableName="transientBucket",
            dbConn=self.dbConn,
            log=self.log,
            primaryIdColumnName="primaryKeyId",
            dbSettings=self.settings["database settings"])
        unmatched = self._feeder_survey_transientbucket_crossmatch()

        # 3. assign a new transientbucketid to any feeder survey source not
        # matched in steps 1 & 2. Copy these unmatched feeder survey rows to
        # the transientbucket as new transient detections.
        if importUnmatched:
            self._import_unmatched_feeder_survey_sources_to_transientbucket(
                unmatched)

        # UPDATE OBSERVATION DATES FROM MJDs
        sqlQuery = "call update_transientbucket_observation_dates()"
        writequery(log=self.log, sqlQuery=sqlQuery, dbConn=self.dbConn)

        # UPDATE THE TRANSIENT BUCKET SUMMARY TABLE IN THE MARSHALL DATABASE
        if updateTransientSummaries:
            if isinstance(updateTransientSummaries, int) and not isinstance(
                    updateTransientSummaries, bool):
                transientBucketId = updateTransientSummaries
            else:
                transientBucketId = False
            from marshallEngine.housekeeping import update_transient_summaries
            updater = update_transient_summaries(
                log=self.log,
                settings=self.settings,
                dbConn=self.dbConn,
                transientBucketId=transientBucketId)
            updater.update()

        self.log.debug(
            'completed the ``crossmatch_with_transientBucket`` method')
        return None