Example #1
0
def checkWellFormed(data):
    """Checks content is valid XML.
    >>> 2 + 2
    4
    >>> 2 + 3
    6
    >>> data = [["<p>blah blah >/p>"]]
    >>> checkWellFormed(data)
    0
    """
    import pyRXP
    parser = pyRXP.Parser(
        ExpandCharacterEntities=0,
        ExpandGeneralEntities=0,
        XMLPredefinedEntities=1,
        ErrorOnBadCharacterEntities=0,
        ErrorOnUndefinedEntities=0,
        CaseInsensitive=0,
        IgnoreEntities=1,
        ReturnList=1,
        AllowMultipleElements=1,
    )
    if str(data).find('<') > -1 or str(data).find('>') > -1:
        try:
            parser('<fragment>%s</fragment>' % str(data))
        except pyRXP.error as e:
            print(e.args[0])  #first error message
            print(str(data) + '\n')
Example #2
0
def checkTags(data, tags=None):
    if tags == None:
        tags = []
    import pyRXP
    parser = pyRXP.Parser(
        ExpandCharacterEntities=0,
        ExpandGeneralEntities=0,
        XMLPredefinedEntities=1,
        ErrorOnBadCharacterEntities=0,
        ErrorOnUndefinedEntities=0,
        CaseInsensitive=0,
        IgnoreEntities=1,
        ReturnList=1,
        AllowMultipleElements=0,
    )
    if str(data).find('<') > -1 or str(data).find('>') > -1:
        try:
            d = parser('<fragment>%s</fragment>' % str(data))
            newTags = findTags(d[0])
            for t in newTags:
                if t not in tags:
                    tags.append(t)
            tags.remove('fragment')
            return tags
        except pyRXP.error:
            pass
Example #3
0
def parse_string(s):
    '''Parses an XML document, returns a tree structure, removing comments
    and expanding entities and CDATA sections. If a parser error occurs,
    the error is printed and an exception is thrown.'''
    p = pyRXP.Parser()
    p.ReturnComments = 0
    p.ReturnCDATASectionsAsTuples = 0
    p.ReturnProcessingInstructions = 1
    p.ExpandGeneralEntities = 1
    p.ExpandCharacterEntities = 1
    return objectify(p(s))
Example #4
0
	def __init__(self, client):
		def dtd_uri_callback(uri):
			if uri == 'http://ldas-sw.ligo.caltech.edu/doc/ligolwAPI/html/ligolw_dtd.txt':
				return 'file://localhost' + os.path.join( os.environ["GLUE_PREFIX"], 'etc/ligolw_dtd.txt' )
			else:
				return uri

		self.client	 = client
		self.xmlparser      = pyRXP.Parser()
		self.xmlparser.eoCB = dtd_uri_callback		
		self.lwtparser      = ldbd.LIGOLwParser()
		self.ligomd	 = ldbd.LIGOMetadata(self.xmlparser, self.lwtparser, None)
Example #5
0
def parse_string_for_rewrite(s):
    '''Parses an XML document, returns a tree structure from which the
    file can be reconstructed (i.e. entities, comments and CDATA sections
    are preserved). If a parser error occurs, the error is printed and
    an exception is thrown.'''
    p = pyRXP.Parser()
    p.ReturnComments = 1
    p.ReturnCDATASectionsAsTuples = 1
    p.ReturnProcessingInstructions = 1
    p.ExpandGeneralEntities = 0
    p.ExpandCharacterEntities = 0
    #p.ReturnList = 1
    return objectify(p(s))
Example #6
0
    def _get(self, startNumber, reqsize):
        import pyRXP
        from urllib import urlopen

        hippoXML = urlopen(
            "http://www.searchhippo.com/qxml.php?q=%s&c=%i&i=%i" % (
                self._searchTerms,
                self.requestSize,
                startNumber,
            )).read()
        if self.parser == None: self.parser = pyRXP.Parser()
        tree = self.parser.parse(hippoXML)
        self._descend(tree)
Example #7
0
def test(fn):
    cwd = os.getcwd()
    try:
        print 'BEGIN'
        t0 = time.time()
        xml = open(fn, 'r').read()
        os.chdir(os.path.dirname(fn))
        t1 = time.time()
        D = pyRXP.Parser().parse(xml)
        t2 = time.time()
        print 'END read took %7.2f" parse took %7.2f"' % (t1 - t0, t2 - t1)
        return D
    finally:
        os.chdir(cwd)
Example #8
0
def validate(rawdata):
    global _pyRXP_Parser
    if not _pyRXP_Parser:
        try:
            import pyRXP
        except ImportError:
            return
        from reportlab.lib.utils import open_and_read, rl_isfile
        dtd = 'pythonpoint.dtd'
        if not rl_isfile(dtd):
            dtd = os.path.join(toolsDir(),'pythonpoint','pythonpoint.dtd')
            if not rl_isfile(dtd): return
        def eocb(URI,dtdText=open_and_read(dtd),dtd=dtd):
            if os.path.basename(URI)=='pythonpoint.dtd': return dtd,dtdText
            return URI
        _pyRXP_Parser = pyRXP.Parser(eoCB=eocb)
    return _pyRXP_Parser.parse(rawdata)
Example #9
0
def initialize(configuration,log):
  # define the global variables used by the server
  global logger, max_bytes, xmlparser, dbobj, rls
  global dmt_proc_dict, dmt_seg_def_dict, creator_db
  global ldbd_com, db2_com, port

  # initialize the logger
  logger = log
  logger.info("Initializing server module %s" % __name__ )
  
  # initialize the database hash table
  dbobj = ldbd.LIGOMetadataDatabase(configuration['dbname'])
  max_bytes = configuration['max_client_byte_string']

  # initialize the ldbd commands and db2 command restrictions
  port = configuration['port']
  ldbd_com = configuration['ldbd_com'].split(',')
  db2_com = configuration['db2_com'].split(',')

  # create the xml parser
  xmlparser = pyRXP.Parser()

  # use a local copy of the DTD, if one is available
  try:
    GLUE_PREFIX = os.environ["GLUE_PREFIX"]
    xmlparser.eoCB = dtd_uri_callback
    logger.info("Using local DTD in " + 
      'file://localhost' + os.path.join( GLUE_PREFIX, 'etc') )
  except KeyError:
    logger.warning('GLUE_PREFIX not set, unable to use local DTD') 

  # open a connection to the rls server
  rls_server = configuration['rls']
  cert = configuration['certfile']
  key = configuration['keyfile']
  try:
    rls = rlsClient.RlsClient(rls_server,cert,key)
  except:
    rls = None

  # initialize dictionaries for the dmt processes and segments definers
  dmt_proc_dict = {}
  dmt_seg_def_dict = {}
  creator_db = None
Example #10
0
    def readXML(self, input, **kwargs):

        if isinstance(input, QPCore.STRING_TYPES):
            istream = QPUtils.getQPDataSource(input, **kwargs)
        else:
            istream = input

        # get whole content of stream at once
        lines = istream.read()

        # check if it is XML
        if not lines.startswith('<?xml'):
            raise IOError, 'QPGrid::readXML - input stream is not XML'

        tree = pyRXP.Parser().parse(lines)

        if tree[QPCore.POS_TAGNAME] != ROOT_ELEMENT_NAME:
            raise TypeError, 'input stream is not of QPGrid type'

        # NOTE: annotation element is not read from XML
        for child in tree[QPCore.POS_CHILDREN]:

            # possible child elements: grid
            if child[QPCore.POS_TAGNAME] == PACKAGE_ELEMENT_NAME:

                # QPGrid can only contain one single grid element
                # check if grid already existing
                if hasattr(self, PACKAGE_ELEMENT_NAME):
                    raise TypeError, \
                        'only single occurrence of grid element allowed'

                self.grid = Grid(parentAxis=self.elementAxis,
                                 elementName=PACKAGE_ELEMENT_NAME)

                self.grid.fromXML(child, self.elements)

        if not hasattr(self, PACKAGE_ELEMENT_NAME):
            raise TypeError, 'no grid tag found'
Example #11
0
def _create_record_rxp(marcxml,
                       verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
                       correct=CFG_BIBRECORD_DEFAULT_CORRECT,
                       keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
    """Creates a record object using the RXP parser.

    If verbose>3 then the parser will be strict and will stop in case of
    well-formedness errors or DTD errors.
    If verbose=0, the parser will not give warnings.
    If 0 < verbose <= 3, the parser will not give errors, but will warn
    the user about possible mistakes

    correct != 0 -> We will try to correct errors such as missing
    attributes
    correct = 0 -> there will not be any attempt to correct errors"""
    if correct:
        # Note that with pyRXP < 1.13 a memory leak has been found
        # involving DTD parsing. So enable correction only if you have
        # pyRXP 1.13 or greater.
        marcxml = ('<?xml version="1.0" encoding="UTF-8"?>\n'
                   '<collection>\n%s\n</collection>' % (marcxml))

    # Create the pyRXP parser.
    pyrxp_parser = pyRXP.Parser(ErrorOnValidityErrors=0,
                                ProcessDTD=1,
                                ErrorOnUnquotedAttributeValues=0,
                                srcName='string input')

    if verbose > 3:
        pyrxp_parser.ErrorOnValidityErrors = 1
        pyrxp_parser.ErrorOnUnquotedAttributeValues = 1

    try:
        root = pyrxp_parser.parse(marcxml)
    except pyRXP.error, ex1:
        raise InvenioBibRecordParserError(str(ex1))
Example #12
0
def InsertMultipleDQXMLFileThreaded(filenames,
                                    logger,
                                    server='http://slwebtest.virgo.infn.it',
                                    hackDec11=True,
                                    debug=True,
                                    threads=1,
                                    testing_options={}):
    """ 
    Inserts multiple dqxml files of data into the DQSEGDB.
    - filenames is a list of string filenames for  DQXML files.
    - hackDec11 is used to turn off good features that the server doesn't
    yet support.
    returns True if it completes sucessfully
    - options is a dictionary including (optionally):offset(int),synchronize(time in 'HH:MM' format (string))
    """
    logger.info(
        "Beginning call to InsertMultipleDQXMLFileThreaded.  This message last updated April 14 2015, Ciao da Italia!"
    )
    from threading import Thread
    from Queue import Queue
    import sys

    # Make a call to server+'/dq':
    protocol = server.split(':')[0]
    serverfqdn = server.split('/')[-1]
    apiResult = queryAPIVersion(protocol, serverfqdn, False)
    # If the API change results in a backwards incompatibility, handle it here with a flag that affects behavior below
    if apiResult >= "2.1.0":
        # S6 style comments are needed
        new_comments = True
    else:
        # Older server, so don't want to supply extra comments...
        new_comments = False
    if apiResult >= "2.1.15":
        # Alteration to insertion_metadata from uri to comment to accomodate s6 data conversion
        use_new_insertion_metadata = True
    else:
        use_new_insertion_metadata = False

    if 'offset' in testing_options:
        offset = int(testing_options['offset'])
    else:
        offset = 0
    if 'synchronize' in testing_options:
        synchronize = testing_options['synchronize']

    xmlparser = pyRXP.Parser()
    lwtparser = ldbd.LIGOLwParser()

    flag_versions = {}

    # flag_versions, filename, server, hackDec11, debug are current variables

    # This next bunch of code is specific to a given file:
    if len(filenames) < 1:
        print "Empty file list sent to InsertMultipleDQXMLFileThreaded"
        raise ValueError
    for filename in filenames:

        segment_md = setupSegment_md(filename, xmlparser, lwtparser, debug)

        # segment_md, flag_versions, filename, server, hackDec11, debug are current variables

        flag_versions_numbered = {}

        for j in range(len(segment_md.table['segment_definer']['stream'])):
            flag_versions_numbered[j] = {}
            for i, entry in enumerate(
                    segment_md.table['segment_definer']['orderedcol']):
                #print j,entry,segment_md.table['segment_definer']['stream'][j][i]
                flag_versions_numbered[j][entry] = segment_md.table[
                    'segment_definer']['stream'][j][i]

        # parse process table and make a dict that corresponds with each
        # process, where the keys for the dict are like "process:process_id:1"
        # so that we can match
        # these to the flag_versions from the segment definer in the next
        # section

        # Note:  Wherever temp_ preceeds a name, it is generally an identifier
        # field from the dqxml, that is only good for the single dqxml file
        # being parsed

        process_dict = {}
        # Going to assign process table streams to process_dict with a key
        # matching process_id (process:process_id:0 for example)
        for j in range(len(segment_md.table['process']['stream'])):
            process_id_index = segment_md.table['process']['orderedcol'].index(
                'process_id')
            temp_process_id = segment_md.table['process']['stream'][j][
                process_id_index]
            # Now we're going to assign elements to process_dict[process_id]
            process_dict[temp_process_id] = {}
            for i, entry in enumerate(
                    segment_md.table['process']['orderedcol']):
                #print j,entry,segment_md.table['process']['stream'][j][i]
                process_dict[temp_process_id][entry] = segment_md.table[
                    'process']['stream'][j][i]
                # Note that the segment_md.table['process']['stream'][0] looks like this:
                #0 program SegGener
                #0 version 6831
                #0 cvs_repository https://redoubt.ligo-wa.caltech.edu/
                #0                svn/gds/trunk/Monitors/SegGener/SegGener.cc
                #0 cvs_entry_time 1055611021
                #0 comment Segment generation from an OSC condition
                #0 node l1gds2
                #0 username [email protected]
                #0 unix_procid 24286
                #0 start_time 1065916603
                #0 end_time 1070395521
                #0 process_id process:process_id:0
                #0 ifos L0L1
                # So now I have all of that info stored by the process_id keys
                # Eventually I have to map these elements to the process_metadata
                # style.. maybe I can do that now:
            process_dict[temp_process_id]['process_metadata'] = {}
            if hackDec11:
                process_dict[temp_process_id]['process_metadata'][
                    'process_start_time'] = process_dict[temp_process_id][
                        'start_time']
            else:  # This is for the newer server APIs:  (April 24 2015 we checked it (it probably changed before ER6 finally))
                process_dict[temp_process_id]['process_metadata'][
                    'process_start_timestamp'] = process_dict[temp_process_id][
                        'start_time']
            if new_comments:
                process_dict[temp_process_id][
                    'process_comment'] = process_dict[temp_process_id][
                        'comment']
            process_dict[temp_process_id]['process_metadata'][
                'uid'] = process_dict[temp_process_id]['username']
            process_dict[temp_process_id]['process_metadata']['args'] = [
            ]  ### Fix!!! dqxml has no args???
            process_dict[temp_process_id]['process_metadata'][
                'pid'] = process_dict[temp_process_id]['unix_procid']
            process_dict[temp_process_id]['process_metadata'][
                'name'] = process_dict[temp_process_id]['program']
            process_dict[temp_process_id]['process_metadata'][
                'fqdn'] = process_dict[temp_process_id][
                    'node']  ### Fix!!! Improvement: not really fqdn, just the node name

        # So now I have process_dict[temp_process_id]['process_metadata'] for each
        # process_id, and can add it to a flag version when it uses it;  really I
        # should group it with the segment summary info because that has the
        # insertion_metadata start and stop time

        ### Fix!!! Get the args from the *other* process table... yikes
        ### Double check what is done below works!
        # First pass:
        #if debug:
        #    import pdb
        #    pdb.set_trace()

        temp_process_params_process_id = None
        try:
            len(segment_md.table['process_params']['stream'])
        except:
            logger.info("No process_params table for file: %s" % filename)
        else:
            for j in range(len(segment_md.table['process_params']['stream'])):
                process_id_index = segment_md.table['process_params'][
                    'orderedcol'].index('process_id')
                temp_process_params_process_id = segment_md.table[
                    'process_params']['stream'][j][process_id_index]
                #  This next bit looks a bit strange, but the goal is to pull off only the param and value from each row of the process_params table, and then put them into the process_metadata
                #  Thus we loop through the columns in each row and toss out everything but the param and value entries, and then outside the for loop, append them to the args list
                for i, entry in enumerate(
                        segment_md.table['process_params']['orderedcol']):
                    if entry == "param":
                        temp_param = str(
                            segment_md.table['process_params']['stream'][j][i])
                    if entry == "value":
                        temp_value = str(
                            segment_md.table['process_params']['stream'][j][i])
                process_dict[temp_process_params_process_id][
                    'process_metadata']['args'].append(str(temp_param))
                process_dict[temp_process_params_process_id][
                    'process_metadata']['args'].append(str(temp_value))

        #if debug:
        #    import pdb
        #    pdb.set_trace()

        temp_id_to_flag_version = {}

        for i in flag_versions_numbered.keys():
            ifo = flag_versions_numbered[i]['ifos']
            name = flag_versions_numbered[i]['name']
            version = flag_versions_numbered[i]['version']
            if (ifo, name, version) not in flag_versions.keys():
                if new_comments == True:
                    flag_versions[(ifo, name, version)] = InsertFlagVersion(
                        ifo, name, version)
                else:
                    flag_versions[(ifo, name, version)] = InsertFlagVersionOld(
                        ifo, name, version)
                if new_comments:
                    flag_versions[(ifo, name, version)].flag_description = str(
                        flag_versions_numbered[i]['comment']
                    )  # old segment_definer comment = new flag_description
                    # OUTDATED PLACEHOLDER: flag_versions[(ifo,name,version)].version_comment=str(flag_versions_numbered[i]['comment'])
                else:
                    flag_versions[(ifo, name, version)].flag_comment = str(
                        flag_versions_numbered[i]['comment'])
                    flag_versions[(ifo, name, version)].version_comment = str(
                        flag_versions_numbered[i]['comment'])
            flag_versions[(
                ifo, name,
                version)].temporary_definer_id = flag_versions_numbered[i][
                    'segment_def_id']
            flag_versions[(
                ifo, name, version
            )].temporary_process_id = flag_versions_numbered[i]['process_id']
            # Populate reverse lookup dictionary:
            temp_id_to_flag_version[flag_versions[(
                ifo, name, version)].temporary_definer_id] = (ifo, name,
                                                              version)

        # ways to solve the metadata problem:
        # Associate each insertion_metadata block with a process, then group
        # them and take the min insert_data_start and max insert_data_stop

        # parse segment_summary table and associate known segments with
        # flag_versions above:
        ## Note this next line is needed for looping over multiple files
        for i in flag_versions.keys():
            flag_versions[i].temp_process_ids = {}
        for j in range(len(segment_md.table['segment_summary']['stream'])):
            #flag_versions_numbered[j] = {}
            seg_def_index = segment_md.table['segment_summary'][
                'orderedcol'].index('segment_def_id')
            #print "associated seg_def_id is: "+ segment_md.table['segment_summary']['stream'][j][seg_def_index]
            (ifo, name, version
             ) = temp_id_to_flag_version[segment_md.table['segment_summary']
                                         ['stream'][j][seg_def_index]]
            seg_sum_index = segment_md.table['segment_summary'][
                'orderedcol'].index('segment_sum_id')
            # Unneeded:
            #flag_versions[(ifo,name,version)].temporary_segment_sum_id = segment_md.table['segment_summary']['stream'][j][seg_sum_index]
            start_time_index = segment_md.table['segment_summary'][
                'orderedcol'].index('start_time')
            end_time_index = segment_md.table['segment_summary'][
                'orderedcol'].index('end_time')
            start_time = segment_md.table['segment_summary']['stream'][j][
                start_time_index] + offset
            end_time = segment_md.table['segment_summary']['stream'][j][
                end_time_index] + offset
            comment_index = segment_md.table['segment_summary'][
                'orderedcol'].index('comment')
            seg_sum_comment = segment_md.table['segment_summary']['stream'][j][
                comment_index]
            new_seg_summary = segments.segmentlist(
                [segments.segment(start_time, end_time)])
            flag_versions[(ifo, name, version)].appendKnown(new_seg_summary)
            # Now I need to build up the insertion_metadata dictionary for this
            # summary:
            # Now I need to associate the right process with the known
            # segments here, and put the start and end time into the
            # insertion_metadata part of the
            #  insert_history dict
            # Plan for processes and affected data:
            # Loop through segment summaries
            # If we haven't seen the associated process before, create it:
            # First, append the temp_process_id to temp_process_ids
            # Then, each temp_process_ids entry is a dictionary, where the one
            # element is start_affected time, and the other is end_affected
            # time, and later we will combine this with the correct
            # process_metadata dictionary
            process_id_index = segment_md.table['segment_summary'][
                'orderedcol'].index('process_id')
            temp_process_id = segment_md.table['segment_summary']['stream'][j][
                process_id_index]
            if temp_process_id in flag_versions[(
                    ifo, name, version)].temp_process_ids.keys():
                # We don't need to append this process metadata, as it already
                # exists We do need to extend the affected data start and stop
                # to match
                if start_time < flag_versions[(
                        ifo, name, version
                )].temp_process_ids[temp_process_id]['insert_data_start']:
                    flag_versions[(ifo, name, version)].temp_process_ids[
                        temp_process_id]['insert_data_start'] = start_time
                if end_time > flag_versions[(
                        ifo, name, version
                )].temp_process_ids[temp_process_id]['insert_data_stop']:
                    flag_versions[(ifo, name, version)].temp_process_ids[
                        temp_process_id]['insert_data_stop'] = end_time
            else:
                # Need to make the dictionary entry for this process_id
                if seg_sum_comment != None:
                    flag_versions[(ifo, name,
                                   version)].provenance_url = seg_sum_comment
                else:
                    flag_versions[(ifo, name, version)].provenance_url = ''
                flag_versions[(
                    ifo, name,
                    version)].temp_process_ids[temp_process_id] = {}
                flag_versions[(ifo, name, version)].temp_process_ids[
                    temp_process_id]['insert_data_start'] = start_time
                flag_versions[(ifo, name, version)].temp_process_ids[
                    temp_process_id]['insert_data_stop'] = end_time

        # Now, I need to append an insert_history element to the flag_versions
        # for this ifo,name, version, as I have the correct insertion_metadata
        # and the correct
        # process_metadata (from the process_dict earlier
        if debug:
            t1 = time.time()
        for i in flag_versions.keys():
            for pid in flag_versions[i].temp_process_ids.keys():
                start = flag_versions[i].temp_process_ids[pid][
                    'insert_data_start']
                stop = flag_versions[i].temp_process_ids[pid][
                    'insert_data_stop']
                if new_comments:
                    flag_versions[i].flag_version_comment = process_dict[pid][
                        'process_comment']
                insert_history_dict = {}
                try:
                    insert_history_dict['process_metadata'] = process_dict[
                        pid]['process_metadata']
                except:
                    raise
                #    import pdb
                #    pdb.set_trace()
                insert_history_dict['insertion_metadata'] = {}
                insert_history_dict['insertion_metadata'][
                    'insert_data_stop'] = stop
                insert_history_dict['insertion_metadata'][
                    'insert_data_start'] = start
                ifo = flag_versions[i].ifo
                version = flag_versions[i].version
                name = flag_versions[i].name
                if use_new_insertion_metadata == True:
                    insert_history_dict['insertion_metadata'][
                        'comment'] = '/dq/' + '/'.join(
                            [str(ifo), str(name),
                             str(version)]
                        )  # FIX make dq a constant string in case we ever change it
                else:
                    insert_history_dict['insertion_metadata'][
                        'uri'] = '/dq/' + '/'.join(
                            [str(ifo), str(name),
                             str(version)]
                        )  # FIX make dq a constant string in case we ever change it
                #print ifo,name,version
                insert_history_dict['insertion_metadata'][
                    'timestamp'] = _UTCToGPS(time.gmtime())
                insert_history_dict['insertion_metadata'][
                    'auth_user'] = process.get_username()
                #if hackDec11:
                #    # note that this only uses one insert_history...despite
                #    all that hard work to get the list right...
                #    # so this might break something...
                #    flag_versions[i].insert_history=insert_history_dict
                #else:
                #    flag_versions[i].insert_history.append(insert_history_dict)
                flag_versions[i].insert_history.append(insert_history_dict)

        # parse segment table and associate known segments with flag_versions
        # above:
        try:
            for j in range(len(segment_md.table['segment']['stream'])):
                #flag_versions_numbered[j] = {}
                seg_def_index = segment_md.table['segment'][
                    'orderedcol'].index('segment_def_id')
                #print "associated seg_def_id is: "+
                #    segment_md.table['segment']['stream'][j][seg_def_index]
                (ifo, name, version) = temp_id_to_flag_version[
                    segment_md.table['segment']['stream'][j][seg_def_index]]
                #seg_sum_index = segment_md.table['segment']['orderedcol'].index('segment_sum_id')
                start_time_index = segment_md.table['segment'][
                    'orderedcol'].index('start_time')
                end_time_index = segment_md.table['segment'][
                    'orderedcol'].index('end_time')
                start_time = segment_md.table['segment']['stream'][j][
                    start_time_index] + offset
                end_time = segment_md.table['segment']['stream'][j][
                    end_time_index] + offset
                new_seg = segments.segmentlist(
                    [segments.segment(start_time, end_time)])
                flag_versions[(ifo, name, version)].appendActive(new_seg)
        except KeyError:
            logger.info("No segment table for this file: %s" % filename)
            if debug:
                print "No segment table for this file: %s" % filename
        except:
            print "Unexpected error:", sys.exc_info()[0]
            raise

    for i in flag_versions.keys():
        flag_versions[i].coalesceInsertHistory()

    if threads > 1:
        # Call this after the loop over files, and we should be good to go
        concurrent = min(threads, len(i))  # Fix!!! why did I do len(i) ???
        q = Queue(concurrent *
                  2)  # Fix!!! Improvement: remove hardcoded concurrency
        for i in range(concurrent):
            t = Thread(target=threadedPatchWithFailCases,
                       args=[q, server, debug, logger])
            t.daemon = True
            t.start()
        for i in flag_versions.values():
            i.buildFlagDictFromInsertVersion()
            #i.flagDict
            url = i.buildURL(server)
            if debug:
                print url
                logger.debug("json.dumps(i.flagDict):")
                logger.debug("%s" % json.dumps(i.flagDict))
            #if hackDec11:
            #    if len(i.active)==0:
            #        print "No segments for this url"
            #        continue
            q.put(i)
        q.join()
    else:
        for i in flag_versions.values():
            i.buildFlagDictFromInsertVersion()
            #i.flagDict
            url = i.buildURL(server)
            if debug:
                logger.debug("Url for the following data: %s" % url)
                #print url
                logger.debug("json.dumps(i.flagDict):")
                logger.debug("%s" % json.dumps(i.flagDict))
            #if hackDec11:
            #    if len(i.active)==0:
            #        print "No segments for this url"
            #        continue
            patchWithFailCases(i, url, debug, logger, testing_options)

    if debug:
        logger.debug(
            "If we made it this far, no errors were encountered in the inserts."
        )
        #print "If we made it this far, no errors were encountered in the inserts."
    ### Fix!!! Improvement: Should be more careful about error handling here.
    if debug:
        t2 = time.time()
        logger.debug("Time elapsed for file %s = %d." % (filename, t2 - t1))
        #print "Time elapsed for file %s = %d." % (filename,t2-t1)
    return True
Example #13
0
def getNonValidatingPyRXPParser():
    import pyRXP
    p = pyRXP.Parser(Validate=0)
    return p
Example #14
0
        for child in tree[2]:
            if type(child) == str:
                datafile = child.strip()

                if os.path.isfile(datafile):
                    os.system('rm %s' % datafile)
                elif os.path.isfile(filedir + datafile):
                    os.system('rm %s' % filedir + datafile)
    else:
        if tree[2] != None:
            for child in tree[2]:
                if type(child) == tuple:
                    recursedelete(child, filedir)


p = pyRXP.Parser()

errors = 0

for filename in sys.argv[1:]:
    try:
        tree = p(open(filename).read())
    except:
        print "!!! Cannot open file %s, or it is malformed XML/XSIL. I will skip it." % filename
        errors += 1
        continue

    recursedelete(tree, os.path.dirname(os.path.abspath(filename)) + '/')
    os.system('rm %s' % filename)

sys.exit(errors)
Example #15
0
import pyRXP
import sys

parser = pyRXP.Parser()
res = parser.parse(open(sys.argv[1], 'r').read())
if res:
    print('SUCCESS')
Example #16
0
def xml2doctree(xml):
    pyRXP_parse = pyRXP.Parser(ErrorOnValidityErrors=1,
                               NoNoDTDWarning=1,
                               ExpandCharacterEntities=0,
                               ExpandGeneralEntities=0)
    return pyRXP_parse.parse(xml)
Example #17
0
def getPyRXPParser():
    import pyRXP
    p = pyRXP.Parser()
    return p