コード例 #1
0
  def query(self, arg):
    """
    Execute an SQL query on the database and return the result as LIGO_LW XML

    @param arg: a text string containing an SQL query to be executed

    @return: None
    """
    global logger
    global xmlparser, dbobj

    # get the query string and log it
    querystr = arg[0]
    logger.debug("Method query called with %s" % querystr)

    # assume failure
    code = 1

    try:
      # create a ligo metadata object
      lwtparser = ldbd.LIGOLwParser()
      ligomd = ldbd.LIGOMetadata(xmlparser,lwtparser,dbobj)

      # execute the query
      rowcount = ligomd.select(querystr)

      # convert the result to xml
      result = ligomd.xml()

      logger.debug("Method query: %d rows returned" % rowcount)
      code = 0
    except Exception, e:
      result = ("Error querying metadata database: %s" % e)
      logger.error(result)
コード例 #2
0
	def __init__(self, client):
		def dtd_uri_callback(uri):
			if uri == 'http://ldas-sw.ligo.caltech.edu/doc/ligolwAPI/html/ligolw_dtd.txt':
				return 'file://localhost' + os.path.join( os.environ["GLUE_PREFIX"], 'etc/ligolw_dtd.txt' )
			else:
				return uri

		self.client	 = client
		self.xmlparser      = pyRXP.Parser()
		self.xmlparser.eoCB = dtd_uri_callback		
		self.lwtparser      = ldbd.LIGOLwParser()
		self.ligomd	 = ldbd.LIGOMetadata(self.xmlparser, self.lwtparser, None)
コード例 #3
0
            logger.info("Method QUERY is being called from internal network")

        # pick off the format input type
        format = environ['wsgiorg.routing_args'][1]['format']

        # for a POST the format must be JSON
        if format != 'json':
            start_response("400 Bad Request", [('Content-type', 'text/plain')])
            msg = "400 Bad Request\n\nformat must be 'json' for POST operation"
            return [msg]

        logger.debug("Method query called with '%s'" % querystr)

        try:
            # create a ligo metadata object
            lwtparser = ldbd.LIGOLwParser()
            ligomd = ldbd.LIGOMetadata(xmlparser, lwtparser, dbobj)

            # execute the query
            rowcount = ligomd.select(querystr)

            # convert the result to xml
            result = ligomd.xml()

            logger.debug("Method query: %d rows returned" % rowcount)
        except Exception, e:
            start_response("500 Internal Server Error",
                           [('Content-type', 'text/plain')])
            msg = "500 Internal Server Error\n\n%s" % e
            logger.error(msg)
            return [msg]
コード例 #4
0
ファイル: apicalls.py プロジェクト: tjma12/dqsegdb
def InsertMultipleDQXMLFileThreaded(filenames,
                                    logger,
                                    server='http://slwebtest.virgo.infn.it',
                                    hackDec11=True,
                                    debug=True,
                                    threads=1,
                                    testing_options={}):
    """ 
    Inserts multiple dqxml files of data into the DQSEGDB.
    - filenames is a list of string filenames for  DQXML files.
    - hackDec11 is used to turn off good features that the server doesn't
    yet support.
    returns True if it completes sucessfully
    - options is a dictionary including (optionally):offset(int),synchronize(time in 'HH:MM' format (string))
    """
    logger.info(
        "Beginning call to InsertMultipleDQXMLFileThreaded.  This message last updated April 14 2015, Ciao da Italia!"
    )
    from threading import Thread
    from Queue import Queue
    import sys

    # Make a call to server+'/dq':
    protocol = server.split(':')[0]
    serverfqdn = server.split('/')[-1]
    apiResult = queryAPIVersion(protocol, serverfqdn, False)
    # If the API change results in a backwards incompatibility, handle it here with a flag that affects behavior below
    if apiResult >= "2.1.0":
        # S6 style comments are needed
        new_comments = True
    else:
        # Older server, so don't want to supply extra comments...
        new_comments = False
    if apiResult >= "2.1.15":
        # Alteration to insertion_metadata from uri to comment to accomodate s6 data conversion
        use_new_insertion_metadata = True
    else:
        use_new_insertion_metadata = False

    if 'offset' in testing_options:
        offset = int(testing_options['offset'])
    else:
        offset = 0
    if 'synchronize' in testing_options:
        synchronize = testing_options['synchronize']

    xmlparser = pyRXP.Parser()
    lwtparser = ldbd.LIGOLwParser()

    flag_versions = {}

    # flag_versions, filename, server, hackDec11, debug are current variables

    # This next bunch of code is specific to a given file:
    if len(filenames) < 1:
        print "Empty file list sent to InsertMultipleDQXMLFileThreaded"
        raise ValueError
    for filename in filenames:

        segment_md = setupSegment_md(filename, xmlparser, lwtparser, debug)

        # segment_md, flag_versions, filename, server, hackDec11, debug are current variables

        flag_versions_numbered = {}

        for j in range(len(segment_md.table['segment_definer']['stream'])):
            flag_versions_numbered[j] = {}
            for i, entry in enumerate(
                    segment_md.table['segment_definer']['orderedcol']):
                #print j,entry,segment_md.table['segment_definer']['stream'][j][i]
                flag_versions_numbered[j][entry] = segment_md.table[
                    'segment_definer']['stream'][j][i]

        # parse process table and make a dict that corresponds with each
        # process, where the keys for the dict are like "process:process_id:1"
        # so that we can match
        # these to the flag_versions from the segment definer in the next
        # section

        # Note:  Wherever temp_ preceeds a name, it is generally an identifier
        # field from the dqxml, that is only good for the single dqxml file
        # being parsed

        process_dict = {}
        # Going to assign process table streams to process_dict with a key
        # matching process_id (process:process_id:0 for example)
        for j in range(len(segment_md.table['process']['stream'])):
            process_id_index = segment_md.table['process']['orderedcol'].index(
                'process_id')
            temp_process_id = segment_md.table['process']['stream'][j][
                process_id_index]
            # Now we're going to assign elements to process_dict[process_id]
            process_dict[temp_process_id] = {}
            for i, entry in enumerate(
                    segment_md.table['process']['orderedcol']):
                #print j,entry,segment_md.table['process']['stream'][j][i]
                process_dict[temp_process_id][entry] = segment_md.table[
                    'process']['stream'][j][i]
                # Note that the segment_md.table['process']['stream'][0] looks like this:
                #0 program SegGener
                #0 version 6831
                #0 cvs_repository https://redoubt.ligo-wa.caltech.edu/
                #0                svn/gds/trunk/Monitors/SegGener/SegGener.cc
                #0 cvs_entry_time 1055611021
                #0 comment Segment generation from an OSC condition
                #0 node l1gds2
                #0 username [email protected]
                #0 unix_procid 24286
                #0 start_time 1065916603
                #0 end_time 1070395521
                #0 process_id process:process_id:0
                #0 ifos L0L1
                # So now I have all of that info stored by the process_id keys
                # Eventually I have to map these elements to the process_metadata
                # style.. maybe I can do that now:
            process_dict[temp_process_id]['process_metadata'] = {}
            if hackDec11:
                process_dict[temp_process_id]['process_metadata'][
                    'process_start_time'] = process_dict[temp_process_id][
                        'start_time']
            else:  # This is for the newer server APIs:  (April 24 2015 we checked it (it probably changed before ER6 finally))
                process_dict[temp_process_id]['process_metadata'][
                    'process_start_timestamp'] = process_dict[temp_process_id][
                        'start_time']
            if new_comments:
                process_dict[temp_process_id][
                    'process_comment'] = process_dict[temp_process_id][
                        'comment']
            process_dict[temp_process_id]['process_metadata'][
                'uid'] = process_dict[temp_process_id]['username']
            process_dict[temp_process_id]['process_metadata']['args'] = [
            ]  ### Fix!!! dqxml has no args???
            process_dict[temp_process_id]['process_metadata'][
                'pid'] = process_dict[temp_process_id]['unix_procid']
            process_dict[temp_process_id]['process_metadata'][
                'name'] = process_dict[temp_process_id]['program']
            process_dict[temp_process_id]['process_metadata'][
                'fqdn'] = process_dict[temp_process_id][
                    'node']  ### Fix!!! Improvement: not really fqdn, just the node name

        # So now I have process_dict[temp_process_id]['process_metadata'] for each
        # process_id, and can add it to a flag version when it uses it;  really I
        # should group it with the segment summary info because that has the
        # insertion_metadata start and stop time

        ### Fix!!! Get the args from the *other* process table... yikes
        ### Double check what is done below works!
        # First pass:
        #if debug:
        #    import pdb
        #    pdb.set_trace()

        temp_process_params_process_id = None
        try:
            len(segment_md.table['process_params']['stream'])
        except:
            logger.info("No process_params table for file: %s" % filename)
        else:
            for j in range(len(segment_md.table['process_params']['stream'])):
                process_id_index = segment_md.table['process_params'][
                    'orderedcol'].index('process_id')
                temp_process_params_process_id = segment_md.table[
                    'process_params']['stream'][j][process_id_index]
                #  This next bit looks a bit strange, but the goal is to pull off only the param and value from each row of the process_params table, and then put them into the process_metadata
                #  Thus we loop through the columns in each row and toss out everything but the param and value entries, and then outside the for loop, append them to the args list
                for i, entry in enumerate(
                        segment_md.table['process_params']['orderedcol']):
                    if entry == "param":
                        temp_param = str(
                            segment_md.table['process_params']['stream'][j][i])
                    if entry == "value":
                        temp_value = str(
                            segment_md.table['process_params']['stream'][j][i])
                process_dict[temp_process_params_process_id][
                    'process_metadata']['args'].append(str(temp_param))
                process_dict[temp_process_params_process_id][
                    'process_metadata']['args'].append(str(temp_value))

        #if debug:
        #    import pdb
        #    pdb.set_trace()

        temp_id_to_flag_version = {}

        for i in flag_versions_numbered.keys():
            ifo = flag_versions_numbered[i]['ifos']
            name = flag_versions_numbered[i]['name']
            version = flag_versions_numbered[i]['version']
            if (ifo, name, version) not in flag_versions.keys():
                if new_comments == True:
                    flag_versions[(ifo, name, version)] = InsertFlagVersion(
                        ifo, name, version)
                else:
                    flag_versions[(ifo, name, version)] = InsertFlagVersionOld(
                        ifo, name, version)
                if new_comments:
                    flag_versions[(ifo, name, version)].flag_description = str(
                        flag_versions_numbered[i]['comment']
                    )  # old segment_definer comment = new flag_description
                    # OUTDATED PLACEHOLDER: flag_versions[(ifo,name,version)].version_comment=str(flag_versions_numbered[i]['comment'])
                else:
                    flag_versions[(ifo, name, version)].flag_comment = str(
                        flag_versions_numbered[i]['comment'])
                    flag_versions[(ifo, name, version)].version_comment = str(
                        flag_versions_numbered[i]['comment'])
            flag_versions[(
                ifo, name,
                version)].temporary_definer_id = flag_versions_numbered[i][
                    'segment_def_id']
            flag_versions[(
                ifo, name, version
            )].temporary_process_id = flag_versions_numbered[i]['process_id']
            # Populate reverse lookup dictionary:
            temp_id_to_flag_version[flag_versions[(
                ifo, name, version)].temporary_definer_id] = (ifo, name,
                                                              version)

        # ways to solve the metadata problem:
        # Associate each insertion_metadata block with a process, then group
        # them and take the min insert_data_start and max insert_data_stop

        # parse segment_summary table and associate known segments with
        # flag_versions above:
        ## Note this next line is needed for looping over multiple files
        for i in flag_versions.keys():
            flag_versions[i].temp_process_ids = {}
        for j in range(len(segment_md.table['segment_summary']['stream'])):
            #flag_versions_numbered[j] = {}
            seg_def_index = segment_md.table['segment_summary'][
                'orderedcol'].index('segment_def_id')
            #print "associated seg_def_id is: "+ segment_md.table['segment_summary']['stream'][j][seg_def_index]
            (ifo, name, version
             ) = temp_id_to_flag_version[segment_md.table['segment_summary']
                                         ['stream'][j][seg_def_index]]
            seg_sum_index = segment_md.table['segment_summary'][
                'orderedcol'].index('segment_sum_id')
            # Unneeded:
            #flag_versions[(ifo,name,version)].temporary_segment_sum_id = segment_md.table['segment_summary']['stream'][j][seg_sum_index]
            start_time_index = segment_md.table['segment_summary'][
                'orderedcol'].index('start_time')
            end_time_index = segment_md.table['segment_summary'][
                'orderedcol'].index('end_time')
            start_time = segment_md.table['segment_summary']['stream'][j][
                start_time_index] + offset
            end_time = segment_md.table['segment_summary']['stream'][j][
                end_time_index] + offset
            comment_index = segment_md.table['segment_summary'][
                'orderedcol'].index('comment')
            seg_sum_comment = segment_md.table['segment_summary']['stream'][j][
                comment_index]
            new_seg_summary = segments.segmentlist(
                [segments.segment(start_time, end_time)])
            flag_versions[(ifo, name, version)].appendKnown(new_seg_summary)
            # Now I need to build up the insertion_metadata dictionary for this
            # summary:
            # Now I need to associate the right process with the known
            # segments here, and put the start and end time into the
            # insertion_metadata part of the
            #  insert_history dict
            # Plan for processes and affected data:
            # Loop through segment summaries
            # If we haven't seen the associated process before, create it:
            # First, append the temp_process_id to temp_process_ids
            # Then, each temp_process_ids entry is a dictionary, where the one
            # element is start_affected time, and the other is end_affected
            # time, and later we will combine this with the correct
            # process_metadata dictionary
            process_id_index = segment_md.table['segment_summary'][
                'orderedcol'].index('process_id')
            temp_process_id = segment_md.table['segment_summary']['stream'][j][
                process_id_index]
            if temp_process_id in flag_versions[(
                    ifo, name, version)].temp_process_ids.keys():
                # We don't need to append this process metadata, as it already
                # exists We do need to extend the affected data start and stop
                # to match
                if start_time < flag_versions[(
                        ifo, name, version
                )].temp_process_ids[temp_process_id]['insert_data_start']:
                    flag_versions[(ifo, name, version)].temp_process_ids[
                        temp_process_id]['insert_data_start'] = start_time
                if end_time > flag_versions[(
                        ifo, name, version
                )].temp_process_ids[temp_process_id]['insert_data_stop']:
                    flag_versions[(ifo, name, version)].temp_process_ids[
                        temp_process_id]['insert_data_stop'] = end_time
            else:
                # Need to make the dictionary entry for this process_id
                if seg_sum_comment != None:
                    flag_versions[(ifo, name,
                                   version)].provenance_url = seg_sum_comment
                else:
                    flag_versions[(ifo, name, version)].provenance_url = ''
                flag_versions[(
                    ifo, name,
                    version)].temp_process_ids[temp_process_id] = {}
                flag_versions[(ifo, name, version)].temp_process_ids[
                    temp_process_id]['insert_data_start'] = start_time
                flag_versions[(ifo, name, version)].temp_process_ids[
                    temp_process_id]['insert_data_stop'] = end_time

        # Now, I need to append an insert_history element to the flag_versions
        # for this ifo,name, version, as I have the correct insertion_metadata
        # and the correct
        # process_metadata (from the process_dict earlier
        if debug:
            t1 = time.time()
        for i in flag_versions.keys():
            for pid in flag_versions[i].temp_process_ids.keys():
                start = flag_versions[i].temp_process_ids[pid][
                    'insert_data_start']
                stop = flag_versions[i].temp_process_ids[pid][
                    'insert_data_stop']
                if new_comments:
                    flag_versions[i].flag_version_comment = process_dict[pid][
                        'process_comment']
                insert_history_dict = {}
                try:
                    insert_history_dict['process_metadata'] = process_dict[
                        pid]['process_metadata']
                except:
                    raise
                #    import pdb
                #    pdb.set_trace()
                insert_history_dict['insertion_metadata'] = {}
                insert_history_dict['insertion_metadata'][
                    'insert_data_stop'] = stop
                insert_history_dict['insertion_metadata'][
                    'insert_data_start'] = start
                ifo = flag_versions[i].ifo
                version = flag_versions[i].version
                name = flag_versions[i].name
                if use_new_insertion_metadata == True:
                    insert_history_dict['insertion_metadata'][
                        'comment'] = '/dq/' + '/'.join(
                            [str(ifo), str(name),
                             str(version)]
                        )  # FIX make dq a constant string in case we ever change it
                else:
                    insert_history_dict['insertion_metadata'][
                        'uri'] = '/dq/' + '/'.join(
                            [str(ifo), str(name),
                             str(version)]
                        )  # FIX make dq a constant string in case we ever change it
                #print ifo,name,version
                insert_history_dict['insertion_metadata'][
                    'timestamp'] = _UTCToGPS(time.gmtime())
                insert_history_dict['insertion_metadata'][
                    'auth_user'] = process.get_username()
                #if hackDec11:
                #    # note that this only uses one insert_history...despite
                #    all that hard work to get the list right...
                #    # so this might break something...
                #    flag_versions[i].insert_history=insert_history_dict
                #else:
                #    flag_versions[i].insert_history.append(insert_history_dict)
                flag_versions[i].insert_history.append(insert_history_dict)

        # parse segment table and associate known segments with flag_versions
        # above:
        try:
            for j in range(len(segment_md.table['segment']['stream'])):
                #flag_versions_numbered[j] = {}
                seg_def_index = segment_md.table['segment'][
                    'orderedcol'].index('segment_def_id')
                #print "associated seg_def_id is: "+
                #    segment_md.table['segment']['stream'][j][seg_def_index]
                (ifo, name, version) = temp_id_to_flag_version[
                    segment_md.table['segment']['stream'][j][seg_def_index]]
                #seg_sum_index = segment_md.table['segment']['orderedcol'].index('segment_sum_id')
                start_time_index = segment_md.table['segment'][
                    'orderedcol'].index('start_time')
                end_time_index = segment_md.table['segment'][
                    'orderedcol'].index('end_time')
                start_time = segment_md.table['segment']['stream'][j][
                    start_time_index] + offset
                end_time = segment_md.table['segment']['stream'][j][
                    end_time_index] + offset
                new_seg = segments.segmentlist(
                    [segments.segment(start_time, end_time)])
                flag_versions[(ifo, name, version)].appendActive(new_seg)
        except KeyError:
            logger.info("No segment table for this file: %s" % filename)
            if debug:
                print "No segment table for this file: %s" % filename
        except:
            print "Unexpected error:", sys.exc_info()[0]
            raise

    for i in flag_versions.keys():
        flag_versions[i].coalesceInsertHistory()

    if threads > 1:
        # Call this after the loop over files, and we should be good to go
        concurrent = min(threads, len(i))  # Fix!!! why did I do len(i) ???
        q = Queue(concurrent *
                  2)  # Fix!!! Improvement: remove hardcoded concurrency
        for i in range(concurrent):
            t = Thread(target=threadedPatchWithFailCases,
                       args=[q, server, debug, logger])
            t.daemon = True
            t.start()
        for i in flag_versions.values():
            i.buildFlagDictFromInsertVersion()
            #i.flagDict
            url = i.buildURL(server)
            if debug:
                print url
                logger.debug("json.dumps(i.flagDict):")
                logger.debug("%s" % json.dumps(i.flagDict))
            #if hackDec11:
            #    if len(i.active)==0:
            #        print "No segments for this url"
            #        continue
            q.put(i)
        q.join()
    else:
        for i in flag_versions.values():
            i.buildFlagDictFromInsertVersion()
            #i.flagDict
            url = i.buildURL(server)
            if debug:
                logger.debug("Url for the following data: %s" % url)
                #print url
                logger.debug("json.dumps(i.flagDict):")
                logger.debug("%s" % json.dumps(i.flagDict))
            #if hackDec11:
            #    if len(i.active)==0:
            #        print "No segments for this url"
            #        continue
            patchWithFailCases(i, url, debug, logger, testing_options)

    if debug:
        logger.debug(
            "If we made it this far, no errors were encountered in the inserts."
        )
        #print "If we made it this far, no errors were encountered in the inserts."
    ### Fix!!! Improvement: Should be more careful about error handling here.
    if debug:
        t2 = time.time()
        logger.debug("Time elapsed for file %s = %d." % (filename, t2 - t1))
        #print "Time elapsed for file %s = %d." % (filename,t2-t1)
    return True
コード例 #5
0
  def insertdmt(self, environ, start_response):
    """
    """
    global xmlparser, dbobj
    global dmt_proc_dict, dmt_seg_def_dict, creator_db
    proc_key = {}
    known_proc = {}
    seg_def_key = {}

    logger.debug( "Method dmtinsert called." )
    logger.debug( "Known processes %s, " % str(dmt_proc_dict) )
    logger.debug( "Known segment_definers %s" % str(dmt_seg_def_dict) )

    # use specific grid-mapfile for insertdmt operation
    mapfile = self.configuration['gridmap_insertdmt']

    # check authorization
    authorized, subject = self.checkAuthorizationGridMap(environ, mapfile)
    if not authorized:
        start_response("401 Unauthorized", [('Content-type', 'text/plain')])
        msg = "401 Unauthorized\n\nSubject %s is not authorized for method insertdmt" % subject
        logger.info("Subject %s is not authorized for method insertdmt" % subject)
        return [ msg ]
    else:
        logger.info("Subject %s is authorized for method insertdmt" % subject)

    # pick off the format input type
    format = environ['wsgiorg.routing_args'][1]['format']

    # for a POST the format must be JSON
    if format != 'json':
        start_response("400 Bad Request", [('Content-type', 'text/plain')])
        msg = "400 Bad Request\n\nformat must be 'json' for POST operation"
        return [ msg ]

    # read the incoming payload
    try:
        inputString = cjson.decode(environ['wsgi.input'].read())
    except Exception as e:
        start_response("400 Bad Request", [('Content-type', 'text/plain')])
        msg = "400 Bad Request"
        logger.debug("Error decoding input: %s" % e)
        return [ msg ]

    logger.debug("Method insertdmt called with '%s'" % inputString)

    try:
      # create a ligo metadata object
      lwtparser = ldbd.LIGOLwParser()
      ligomd = ldbd.LIGOMetadata(xmlparser,lwtparser,dbobj)

      # parse the input string into a metadata object
      logger.debug("parsing xml data")
      ligomd.parse(inputString)

      # store the users dn in the process table
      ligomd.set_dn(subject)

      # determine the local creator_db number
      if creator_db is None:
        sql = "SELECT DEFAULT FROM SYSCAT.COLUMNS WHERE "
        sql += "TABNAME = 'PROCESS' AND COLNAME = 'CREATOR_DB'"
        ligomd.curs.execute(sql)
        creator_db = ligomd.curs.fetchone()[0]

      # determine the locations of columns we need in the process table
      process_cols = ligomd.table['process']['orderedcol']
      node_col = process_cols.index('node')
      prog_col = process_cols.index('program')
      upid_col = process_cols.index('unix_procid')
      start_col = process_cols.index('start_time')
      end_col = process_cols.index('end_time')
      pid_col = process_cols.index('process_id')

      # determine and remove known entries from the process table
      rmv_idx = []
      for row_idx,row in enumerate(ligomd.table['process']['stream']):
        uniq_proc = (row[node_col],row[prog_col],row[upid_col],row[start_col])
        try:
          proc_key[str(row[pid_col])] = dmt_proc_dict[uniq_proc]
          known_proc[str(dmt_proc_dict[uniq_proc])] = row[end_col]
          rmv_idx.append(row_idx)
        except KeyError:
          # we know nothing about this process, so query the database
          sql = "SELECT BLOB(process_id) FROM process WHERE "
          sql += "creator_db = " + str(creator_db) + " AND "
          sql += "node = '" + row[node_col] + "' AND "
          sql += "program = '" + row[prog_col] + "' AND "
          sql += "unix_procid = " + str(row[upid_col]) + " AND "
          sql += "start_time = " + str(row[start_col])
          ligomd.curs.execute(sql)
          db_proc_ids = ligomd.curs.fetchall()
          if len(db_proc_ids) == 0:
            # this is a new process with no existing entry
            dmt_proc_dict[uniq_proc] = row[pid_col]
          elif len(db_proc_ids) == 1:
            # the process_id exists in the database so use that insted
            dmt_proc_dict[uniq_proc] = db_proc_ids[0][0]
            proc_key[str(row[pid_col])] = dmt_proc_dict[uniq_proc]
            known_proc[str(dmt_proc_dict[uniq_proc])] = row[end_col]
            rmv_idx.append(row_idx)
          else:
            # multiple entries for this process, needs human assistance
            raise ServerHandlerException("multiple entries for dmt process")

      # delete the duplicate processs rows and clear the table if necessary
      newstream = []
      for row_idx,row in enumerate(ligomd.table['process']['stream']):
        try:
          rmv_idx.index(row_idx)
        except ValueError:
          newstream.append(row)
      ligomd.table['process']['stream'] = newstream
      if len(ligomd.table['process']['stream']) == 0:
        del ligomd.table['process']

      # delete the duplicate process_params rows and clear the table if necessary
      # (the DMT does not write a process_params table, so check for one first)
      if 'process_params' in ligomd.table:
        ppid_col = ligomd.table['process_params']['orderedcol'].index('process_id')
        newstream = []
        for row_idx,row in enumerate(ligomd.table['process_params']['stream']):
          # if the process_id in this row is known, delete (i.e. don't copy) it
          try:
            proc_key[str(row[ppid_col])]
          except KeyError:
            newstream.append(row)
        ligomd.table['process_params']['stream'] = newstream
        if len(ligomd.table['process_params']['stream']) == 0:
          del ligomd.table['process_params']

      # turn the known process_id binary for this insert into ascii
      for pid in known_proc.keys():
        pid_str = "x'"
        for ch in pid:
          pid_str += "%02x" % ord(ch)
        pid_str += "'"
        known_proc[pid] = (pid_str, known_proc[pid])

      # determine the locations of columns we need in the segment_definer table
      seg_def_cols = ligomd.table['segment_definer']['orderedcol']
      ifos_col = seg_def_cols.index('ifos')
      name_col = seg_def_cols.index('name')
      vers_col = seg_def_cols.index('version')
      sdid_col = seg_def_cols.index('segment_def_id')

      # determine and remove known entries in the segment_definer table
      rmv_idx = []
      for row_idx,row in enumerate(ligomd.table['segment_definer']['stream']):
        uniq_def = (row[ifos_col],row[name_col],row[vers_col])
        try:
          seg_def_key[str(row[sdid_col])] = dmt_seg_def_dict[uniq_def]
          rmv_idx.append(row_idx)
        except KeyError:
          # we know nothing about this segment_definer, so query the database
          sql = "SELECT BLOB(segment_def_id) FROM segment_definer WHERE "
          sql += "creator_db = " + str(creator_db) + " AND "
          sql += "ifos = '" + row[ifos_col] + "' AND "
          sql += "name = '" + row[name_col] + "' AND "
          sql += "version = " + str(row[vers_col])
          ligomd.curs.execute(sql)
          db_seg_def_id = ligomd.curs.fetchall()
          if len(db_seg_def_id) == 0:
            # this is a new segment_defintion with no existing entry
            dmt_seg_def_dict[uniq_def] = row[sdid_col]
          else:
            dmt_seg_def_dict[uniq_def] = db_seg_def_id[0][0]
            seg_def_key[str(row[sdid_col])] = dmt_seg_def_dict[uniq_def]
            rmv_idx.append(row_idx)

      # delete the necessary rows. if the table is empty, delete it
      newstream = []
      for row_idx,row in enumerate(ligomd.table['segment_definer']['stream']):
        try:
          rmv_idx.index(row_idx)
        except ValueError:
          newstream.append(row)
      ligomd.table['segment_definer']['stream'] = newstream
      if len(ligomd.table['segment_definer']['stream']) == 0:
        del ligomd.table['segment_definer']

      # now update the values in the xml with the values we know about
      for tabname in ligomd.table.keys():
        table = ligomd.table[tabname]
        if tabname == 'process':
          # we do nothing to the process table
          pass
        elif tabname == 'segment' or tabname == 'segment_summary':
          # we need to update the process_id and the segment_def_id columns
          pid_col = table['orderedcol'].index('process_id')
          sdid_col = table['orderedcol'].index('segment_def_id')
          row_idx = 0
          for row in table['stream']:
            try:
              repl_pid = proc_key[str(row[pid_col])]
            except KeyError:
              repl_pid = row[pid_col]
            try:
              repl_sdid = seg_def_key[str(row[sdid_col])]
            except KeyError:
              repl_sdid = row[sdid_col]
            row = list(row)
            row[pid_col] = repl_pid
            row[sdid_col] = repl_sdid
            table['stream'][row_idx] = tuple(row)
            row_idx += 1
        else:
          # we just need to update the process_id column
          pid_col = table['orderedcol'].index('process_id')
          row_idx = 0
          for row in table['stream']:
            try:
              repl_pid = proc_key[str(row[pid_col])]
              row = list(row)
              row[pid_col] = repl_pid
              table['stream'][row_idx] = tuple(row)
            except KeyError:
              pass
            row_idx += 1

      # insert the metadata into the database
      logger.debug("inserting xml data")
      result = str(ligomd.insert())
      logger.debug("insertion complete")

      # update the end time of known processes in the process table
      for pid in known_proc.keys():
        # first check to see if we are backfilling missing segments
        sql = "SELECT end_time,domain FROM process "
        sql += " WHERE process_id = " + known_proc[pid][0]
        ligomd.curs.execute(sql)
        last_end_time = ligomd.curs.fetchone()

        # check the dn in the row we are about to update matches the users dn
        dn = last_end_time[1].strip()
        if subject != dn:
          msg = "\"%s\" does not match dn in existing row entries: " % subject
          msg += "%s (process_id %s)" % (dn, known_proc[pid][0])
          logger.warn(msg)
        else:
          logger.debug('"%s" updating process_id %s' % (dn, known_proc[pid][0]))

        if int(known_proc[pid][1]) <= int(last_end_time[0]):
          logger.debug("Backfilling missing segments for process_id " +
            known_proc[pid][0] + " not updating end_time")
        else:
          # if we are not backfilling, update the end_time of the process
          sql = "UPDATE process SET end_time = " + str(known_proc[pid][1])
          sql += " WHERE process_id = " + known_proc[pid][0]
          sql += " AND end_time < " + str(known_proc[pid][1])
          ligomd.curs.execute(sql)
      ligomd.dbcon.commit()

      logger.info("Method insert: %s rows affected by insert" % result)

    except Exception as e:
      start_response("500 Internal Server Error", [('Content-type', 'text/plain')])
      msg = "500 Internal Server Error\n\n%s" % e
      logger.error(msg)
      return [ msg ]

    try:
      del ligomd
      del lwtparser
      del known_proc
      del seg_def_key
      del proc_key
    except Exception as e:
      logger.error("Error deleting metadata object in method insertdmt: %s" % e)

    # encode the result
    result = cjson.encode(result)
    
    # return the result
    header = [('Content-Type', 'application/json')]
    start_response("200 OK", header)

    return [ result ]
コード例 #6
0
  def insert(self, environ, start_response):
    """
    """
    global xmlparser, dbobj

    logger.debug("Method insert called")

    # use specific grid-mapfile for insert operation
    mapfile = self.configuration['gridmap_insert']

    # check authorization
    authorized, subject = self.checkAuthorizationGridMap(environ, mapfile)
    if not authorized:
        start_response("401 Unauthorized", [('Content-type', 'text/plain')])
        msg = "401 Unauthorized\n\nSubject %s is not authorized for method insert" % subject
        logger.info("Subject %s is not authorized for method insert" % subject)
        return [ msg ]
    else:
        logger.info("Subject %s is authorized for method insert" % subject)

    # pick off the format input type
    format = environ['wsgiorg.routing_args'][1]['format']

    # for a POST the format must be JSON
    if format != 'json':
        start_response("400 Bad Request", [('Content-type', 'text/plain')])
        msg = "400 Bad Request\n\nformat must be 'json' for POST operation"
        return [ msg ]

    # read the incoming payload
    try:
        #import simplejson  (moved to top)
        wsgiIn=environ['wsgi.input'].read()
        inputString=simplejson.loads(wsgiIn)
        #inputString = cjson.decode(environ['wsgi.input'].read())
    except Exception as e:
        start_response("400 Bad Request", [('Content-type', 'text/plain')])
        msg = "400 Bad Request"
        logger.debug("Error decoding input: %s" % e)
        return [ msg ]

    logger.debug("Method insert called with '%s'" % inputString)

    try:
      # create a ligo metadata object
      lwtparser = ldbd.LIGOLwParser()
      ligomd = ldbd.LIGOMetadata(xmlparser,lwtparser,dbobj)

      # parse the input string into a metadata object
      ligomd.parse(inputString)

      # add a gridcert table to this request containing the users dn
      ligomd.set_dn(subject)

      # insert the metadata into the database
      result = str(ligomd.insert())

      logger.info("Method insert: %s rows affected by insert" % result)
    except Exception as e:
      start_response("500 Internal Server Error", [('Content-type', 'text/plain')])
      msg = "500 Internal Server Error\n\n%s" % e
      logger.error(msg)
      return [ msg ]

    try:
      del ligomd
      del lwtparser
    except Exception as e:
      logger.error("Error deleting metadata object in method query: %s" % e)

    # encode the result
    result = cjson.encode(result)
    
    # return the result
    header = [('Content-Type', 'application/json')]
    start_response("200 OK", header)

    return [ result ]
コード例 #7
0
  def query(self, environ, start_response):
    """
    """
    global xmlparser, dbobj

    logger.debug("Method query called")

    # determine protocol
    try:
        protocol, querystr = (cjson.decode(environ['wsgi.input'].read())).split(":")
    except Exception as e:
        start_response("400 Bad Request", [('Content-type', 'text/plain')])
        msg = "400 Bad Request"
        logger.debug("Error decoding input: %s" % e)
        return [ msg ]


    if protocol == "https":
      # use generic grid-mapfile for query operation
      mapfile = self.configuration['gridmap']

      # check authorization
      authorized, subject = self.checkAuthorizationGridMap(environ, mapfile)
      if not authorized:
        start_response("401 Unauthorized", [('Content-type', 'text/plain')])
        msg = "401 Unauthorized\n\nSubject %s is not authorized for method query" % subject
        logger.info("Subject %s is not authorized for method query" % subject)
        return [ msg ]
      else:
        logger.info("Subject %s is authorized for method query" % subject)
    else:
      logger.info("Method QUERY is being called from internal network")


    # pick off the format input type
    format = environ['wsgiorg.routing_args'][1]['format']

    # for a POST the format must be JSON
    if format != 'json':
        start_response("400 Bad Request", [('Content-type', 'text/plain')])
        msg = "400 Bad Request\n\nformat must be 'json' for POST operation"
        return [ msg ]

    logger.debug("Method query called with '%s'" % querystr)

    try:
      # create a ligo metadata object
      lwtparser = ldbd.LIGOLwParser()
      ligomd = ldbd.LIGOMetadata(xmlparser,lwtparser,dbobj)

      # execute the query
      rowcount = ligomd.select(querystr)

      # convert the result to xml
      result = ligomd.xml()

      logger.debug("Method query: %d rows returned" % rowcount)
    except Exception as e:
      start_response("500 Internal Server Error", [('Content-type', 'text/plain')])
      msg = "500 Internal Server Error\n\n%s" % e
      logger.error(msg)
      return [ msg ]

    try:
      del ligomd
      del lwtparser
    except Exception as e:
      logger.error("Error deleting metadata object in method query: %s" % e)

    # encode the result
    result = cjson.encode(result)
    
    # return the result
    header = [('Content-Type', 'application/json')]
    start_response("200 OK", header)

    return [ result ]