Example #1
0
File: fasta.py Project: qqss88/tcga
def fasta_dic(fastafile):
    id_seq_dic ={}
    id_list = []

    line = fastafile.readline()
    seq_list =[]
    id =''
    while (line!=''):
        if line[0] =='>':
            if len(seq_list) == 0 and id =='':
                id = string.strip(line[1:])  #id is whole line
            else:
                seq = string.join(seq_list,'')
                seq = string.upper(string.strip(string.replace(seq,' ','')))
                seq = string.upper(string.strip(string.replace(seq,'\n','')))
                id_seq_dic[id] = seq
                id_list.append(id)
                id = string.strip(line[1:])  #id is whole line
                seq_list =[]
        else:
            seq_list.append(line)
        line = fastafile.readline()

    if len(seq_list)==0 and id =='':
        pass
    else:
        seq = string.join(seq_list,'')
        seq = string.upper(string.strip(string.replace(seq,' ','')))
        seq = string.upper(string.strip(string.replace(seq,'\n','')))
        id_seq_dic[id] = seq
        id_list.append(id)

    fastafile.close()
    return id_seq_dic, id_list
Example #2
0
 def hasPrefix(self,String,StartPosition,punctuationlist):
     First = String[StartPosition:StartPosition+1]
     fcd = self.FirstcharDict
     caseins = self.caseInsensitive
     if caseins:
         First = string.upper(First)
     if fcd.has_key(First):
         Keylist = fcd[First]
     else:
         return 0
     for (key,value) in Keylist:
         offset = len(key)
         EndPosition = StartPosition+offset
         match = String[StartPosition : EndPosition]
         if caseins:
             match = string.upper(match)
         if key == match:
             if len(key)==1 and key in punctuationlist:
                 # punctuations are recognized regardless of nextchar
                 return (value,offset)
             else:
                 # nonpuncts must have punct or whitespace following
                 #(uses punct as single char convention)
                 if EndPosition == len(String):
                     return (value, offset)
                 else:
                     nextchar = String[EndPosition]
                     if nextchar in string.whitespace\
                         or nextchar in punctuationlist:
                         return (value, offset)
     return 0 # if no exit inside for loop, fail
Example #3
0
    def create_columns(self, table_id):
        """
        Create the columns for the table in question.  We call fix_datatype to
        clean up anything PGSQL or MYSQL specific.  The function is to be
        overidden by the database-specific subclass in question.

        """
        col_list = self.tables[table_id].get_columns()
        table_name = self.tables[table_id].name
        num_cols = len(col_list)
        if verbose:
            print "Found %s columns" % num_cols
        for index in range(num_cols):
            colname = col_list[index]

            coltype = self.tables[table_id].get_col_type(colname)
            coltype = self.fix_datatype(coltype)

            colvalue = self.tables[table_id].get_col_value(colname)
            colcomm = self.tables[table_id].get_col_comm(colname)
            if colcomm:
                colcomm.strip()
                self.sql_create = string.join((self.sql_create, "\n\t/* ", \
                         colcomm, " */"), "")
            col_statement = self.fix_value(table_name, colname, coltype, colvalue)
            if index < num_cols-1:
                self.sql_create = string.join((self.sql_create, "\n\t", \
                            string.upper(col_statement)+","), "")
            else:
                self.sql_create = string.join((self.sql_create, "\n\t", \
                            string.upper(col_statement)), "")
Example #4
0
  def getfingerprint(self,digest='md5',delimiter=':'):
    digest = string.upper(digest)
    if not digest in ['MD2','MD5','MDC2','RMD160','SHA','SHA1']:
      raise ValueError, 'Illegal parameter for digest: %s' % digest
    elif self.fingerprint.has_key(digest):
      result = self.fingerprint[digest]
    elif digest=='MD5':
      return certhelper.MD5Fingerprint(certhelper.pem2der(open(self.filename,'r').read()),delimiter)
    elif digest=='SHA1':
      return certhelper.SHA1Fingerprint(certhelper.pem2der(open(self.filename,'r').read()),delimiter)
    else:
      opensslcommand = '%s x509 -in %s -inform %s -outform DER | %s %s' % (
        openssl.bin_filename,
        self.filename,
        self.format,
        openssl.bin_filename,
        string.lower(digest)
      )
      f = os.popen(opensslcommand)
      rawdigest = string.strip(f.read())
      rc = f.close()
      if rc and rc!=256:
	raise IOError,"Error %s: %s" % (rc,opensslcommand)
      result = []
      for i in range(len(rawdigest)/2):
        result.append(rawdigest[2*i:2*(i+1)])
      self.fingerprint[digest] = result
    return string.upper(string.join(result,delimiter))
Example #5
0
    def match(self, pattern, that, topic):
        """Return the template which is the closest match to pattern. The
        'that' parameter contains the bot's previous response. The 'topic'
        parameter contains the current topic of conversation.

        Returns None if no template is found.
        
        """
        if len(pattern) == 0:
            return None
        pattern = self._lang_support(pattern)
        # Mutilate the input.  Remove all punctuation and convert the
        # text to all caps.
        input = string.upper(pattern)
        input = self._puncStripRE.sub("", input)
        input = self._upuncStripRE.sub(u"", input)
        #print input
        if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
        thatInput = string.upper(that)
        thatInput = re.sub(self._whitespaceRE, " ", thatInput)
        thatInput = re.sub(self._puncStripRE, "", thatInput)
        if topic.strip() == u"": topic = u"ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
        topicInput = string.upper(topic)
        topicInput = re.sub(self._puncStripRE, "", topicInput)
        
        # Pass the input off to the recursive call
        patMatch, template = self._match(input.split(), thatInput.split(), topicInput.split(), self._root)
        return template
Example #6
0
    def persist(self,reportd,mail_message):
            boats = Boat.all()
            boats.filter("passcode = ", reportd['passcode'])
            boats.filter("active =", True)

            emaillog=EmailLog()
            emaillog.rawemail=db.Text(mail_message.original.as_string())

            #TODO: I don't like this way of figuring out if we got any rows
            count=0
            for boat in boats:
                if (boat.key().name()==string.upper(reportd['boat'])):
                    count=count+1
                    break # Return the first one
                else:
                    boat=None

            if (count <1 or boats == None or boat == None):
                logging.error("Unable to persist report could not find boat "+string.upper(reportd['boat'])+":"+reportd['passcode'])
            else:
                report=Report(parent=boat)
                report.lat=reportd['lat']
                report.lon=reportd['lon']
                payload={
                         'comment' : reportd['comment']
                        }
                report.setPayload(payload)
                report.put()
                emaillog.report_ref=report
                reportd['time']=strftime("%Y/%m/%d %H:%M", gmtime())
                self.outbound(boat,reportd)

            emaillog.put()
Example #7
0
def Dependencies(lTOC):
    """Expand LTOC to include all the closure of binary dependencies.

     LTOC is a logical table of contents, ie, a seq of tuples (name, path).
     Return LTOC expanded by all the binary dependencies of the entries
     in LTOC, except those listed in the module global EXCLUDES"""
    for (nm, pth) in lTOC:
        fullnm = string.upper(os.path.basename(pth))
        if seen.get(string.upper(nm), 0):
            continue
        print "analyzing", nm
        seen[string.upper(nm)] = 1
        dlls = getImports(pth)
        for lib in dlls:
            print " found", lib
            if excludes.get(string.upper(lib), 0):
                continue
            if seen.get(string.upper(lib), 0):
                continue
            npth = getfullnameof(lib)
            if npth:
                lTOC.append((lib, npth))
            else:
                print " lib not found:", lib, "dependency of",
    return lTOC
Example #8
0
File: ftp.py Project: fxia22/ASM_xf
 def lineReceived(self, line):
     "Process the input from the client"
     line = string.strip(line)
     if self.debug:
         log.msg(repr(line))
     command = string.split(line)
     if command == []:
         self.reply('unknown')
         return 0
     commandTmp, command = command[0], ''
     for c in commandTmp:
         if ord(c) < 128:
             command = command + c
     command = string.capitalize(command)
     if self.debug:
         log.msg("-"+command+"-")
     if command == '':
         return 0
     if string.count(line, ' ') > 0:
         params = line[string.find(line, ' ')+1:]
     else:
         params = ''
     # Does this work at all? Quit at ctrl-D
     if ( string.find(line, "\x1A") > -1):
         command = 'Quit'
     method = getattr(self, "ftp_%s" % command, None)
     if method is not None:
         n = method(params)
         if n == 1:
             self.reply('unknown', string.upper(command))
     else:
         self.reply('unknown', string.upper(command))
Example #9
0
	def execute(self, operation, parameters = None):
		"""Execute a query or command"""

		# TODO:
		# Support parameters. See the todo under callproc

		sqlCom = self.con.CreateCommand()
		sqlCom.Transaction = self.tran
		sqlCom.CommandText = operation

		self._disposedatareader() #( Needed even for ExecuteNonQuery().

		if string.upper(string.lstrip(operation))[:6] == 'SELECT':
			sqlCom.CommandType = System.Data.CommandType.Text #( Just in case
			self._executereader(sqlCom)

		elif string.upper(string.lstrip(operation))[:4] == 'EXEC':
			# TODO: Include this command:
			# sqlCom.CommandType = System.Data.CommandType.StoredProcedure

			# TODO:
			# This isn't working for some reason with this command from the
			# calling program:
			# cur2.execute('exec Test 1, "test2", 3, 4')
			# self._executereader(sqlCom)

			pass

		else:
			sqlCom.CommandType = System.Data.CommandType.Text
			self.datareader = sqlCom.ExecuteNonQuery()

		sqlCom.Dispose()
Example #10
0
        def match(self, pattern, that, topic):
                """Return the template which is the closest match to pattern. The
                'that' parameter contains the bot's previous response. The 'topic'
                parameter contains the current topic of conversation.

                Returns None if no template is found.

                """
                if len(pattern) == 0:
                        return None
                # Mutilate the input.  Remove all punctuation and convert the
                # text to all caps.
                input = string.upper(pattern)
                input = re.sub(self._puncStripRE, " ", input)
                if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
                thatInput = string.upper(that)
                thatInput = re.sub(self._puncStripRE, " ", thatInput)
                thatInput = re.sub(self._whitespaceRE, " ", thatInput)
                # if topic.strip() in ["object","people","default"]: 
                #     topic = u"ULTRABOGUSDUMMYTOPIC"+" "+topic # 'topic' must never be empty
                #topic will never be empty now
                topicInput = string.upper(topic)
                topicInput = re.sub(self._puncStripRE, " ", topicInput)

                # thatInput = u""#for debug purpose
                logging.info("Match Input:"+str(input.split()))
                logging.info("Match That:"+str(thatInput.split()))
                logging.info("Match Topic:"+str(topicInput.split()))
                # Pass the input off to the recursive call
                patMatch, template = self._match(input.split(), thatInput.split(), topicInput.split(), self._root)
                logging.info("Find Matched Pattern:"+str(patMatch))
                logging.info("Find Matched Template:"+str(template))
                return  template,patMatch
def cnToUser(cn):
    components = split_re.split(cn)
    max_score = 0
    best = None
    for component in components:
        score = 0
        if all_numbers.match(component):
            continue
        len_score = len(component.split())
        if len_score == 1:
            len_score = len(component.split('-'))
        score += len_score
        if score > max_score:
            best = component
            max_score = score
    if max_score == 0:
        best = component[-1]
    result = []
    for word in best.split():
        word_0 = string.upper(word[0])
        word_rest = string.lower(word[1:])
        word_rest2 = string.upper(word[1:])
        if word[1:] == word_rest2:
            word = word_0 + word_rest
        else:
            word = word_0 + word[1:]
        if not all_numbers.match(word):
            result.append(word)
    result = ' '.join(result)
    return result
    def OnJobSubmitted(self, job):
        # List of environment keys you don't want passed.
        unwantedkeys = self.GetConfigEntryWithDefault("UnwantedEnvKeys", "")

        # Split out on commas, gettting rid of excess whitespace and make uppercase
        unwantedkeys = [string.upper(x.strip()) for x in unwantedkeys.split(',')]

        wantedkeys = []

        self.LogInfo("Unwanted Environment keys defined as:")
        self.LogInfo(str(unwantedkeys))

        self.LogInfo("On Job Submitted Event Plugin: Custom Environment Copy Started")

        # Go through the current system environment variables not copying the unwanted keys
        for key in os.environ:
            if string.upper(key) not in unwantedkeys:
                wantedkeys.append(key)

        # Set chosen variables to job
        for key in wantedkeys:
            self.LogInfo("Setting %s to %s" % (key, os.environ[key]))
            job.SetJobEnvironmentKeyValue(key, os.environ[key])

        RepositoryUtils.SaveJob(job)

        self.LogInfo("On Job Submitted Event Plugin: Custom Environment Copy Finished")
Example #13
0
def get_capi_name(cppname, isclassname, prefix = None):
    """ Convert a C++ CamelCaps name to a C API underscore name. """
    result = ''
    lastchr = ''
    for chr in cppname:
        # add an underscore if the current character is an upper case letter
        # and the last character was a lower case letter
        if len(result) > 0 and not chr.isdigit() \
            and string.upper(chr) == chr \
            and not string.upper(lastchr) == lastchr:
            result += '_'
        result += string.lower(chr)
        lastchr = chr
    
    if isclassname:
        result += '_t'
        
    if not prefix is None:
        if prefix[0:3] == 'cef':
            # if the prefix name is duplicated in the function name
            # remove that portion of the function name
            subprefix = prefix[3:]
            pos = result.find(subprefix)
            if pos >= 0:
                result = result[0:pos]+ result[pos+len(subprefix):]
        result = prefix+'_'+result
        
    return result
Example #14
0
 def __getitem__( self, key ):
     if upper( key ) == 'A0':
         return self._a0
     elif upper( key ) == 'D0':
         return self._d0
     else:
         return self._params[ key ]._getvalue()
Example #15
0
def Dependencies(lTOC):
  """Expand LTOC to include all the closure of binary dependencies.
  
     LTOC is a logical table of contents, ie, a seq of tuples (name, path).
     Return LTOC expanded by all the binary dependencies of the entries
     in LTOC, except those listed in the module global EXCLUDES"""
  for nm, pth, typ in lTOC:
    fullnm = string.upper(os.path.basename(pth))
    if seen.get(string.upper(nm),0):
      continue
    #print "I: analyzing", pth
    seen[string.upper(nm)] = 1
    dlls = getImports(pth)
    for lib in dlls:
        #print "I: found", lib
        if not iswin and not cygwin:
            npth = lib
            dir, lib = os.path.split(lib)
            if excludes.get(dir,0):
                continue
        if excludes.get(string.upper(lib),0):
            continue
        if seen.get(string.upper(lib),0):
            continue
        if iswin or cygwin:
            npth = getfullnameof(lib, os.path.dirname(pth))
        if npth:
            lTOC.append((lib, npth, 'BINARY'))
        else:
            print "E: lib not found:", lib, "dependency of", pth
  return lTOC
Example #16
0
def firstToken(transRefLine):
    import re
    import string
    
    transRefLine = string.upper(transRefLine)
    transRefLine = re.sub(r'(?ms)([A-Z][0-9]{4,4}).*',r'\1',transRefLine)
    return string.upper(transRefLine)
Example #17
0
def readMeAConfigPortion(fd,patterns):
    pos = fd.tell()
    data = fd.readline()
    while (string.strip(data) == '') or (data[0]=='#'):
        data = fd.readline()
        #print " [replaced by ",fd.tell(),"] ",data
        if data == '':
            return (None,"End-of-file")
    print "WORKING (at",pos,") ",data,
    endoflinepos = fd.tell()
    for p in patterns:
        if type(p) == type(""):
            print " --> Does <<",string.strip(data),">> match",p,"?"
            if string.upper(data[:len(p)])==string.upper(p):
                print "       yes, it does match ",p,"!"
                return (p,string.strip(data[len(p):]))
        elif callable(p):
            print " --> Does <<",string.strip(data),">> match",repr(p),"?"
            try:
                fd.seek(pos)
                x = p(fd)
                print "      yes it does match ",repr(p),"!"
                return (p,x)
            except notOneOfTheseThanks:
                fd.seek(endoflinepos)
                continue
    print " Sadly,  no,  <<",string.strip(data),">> does not match any of",repr(patterns)
    fd.seek(pos)
    return (None,None)
Example #18
0
File: utils.py Project: Fat-Zer/LDP
def unique_options(a_sequence):
    """Eliminates duplicate items in manage_options tuples,
       and canonicalizes their order."""

    # Turn the tuple of dictionaries into a single dictionary
    # indexed by the uppercased label.
    a_list = list(a_sequence)
    dict = {}
    for action in a_list:
        label = action['label']
        dict[string.upper(label)] = action

    # Move the dictionary items into the result set
    # in the same order as they appear in LABEL_ORDER.
    results = []
    for label in LABEL_ORDER:
        uc_label = string.upper(label)
        if dict.has_key(uc_label):
            results.append(dict[uc_label])
            del dict[uc_label]

    # Append any remaining items to the result set.
    for key in dict.keys():
        item = dict[key]
        results.append(item)

    # Turn it back into a tuple before returning.
    results = tuple(results)
    print 'results: ', results
    return results
Example #19
0
    def __init__(self, A, **kwargs):
        PysparseDirectSolver.__init__(self, A, **kwargs)

        if 'strategy' in kwargs.keys():
            strategy = upper(kwargs.get('strategy'))
            if strategy not in ['AUTO', 'UNSYMMETRIC', 'SYMMETRIC', '2BY2']:
                strategy = 'AUTO'
            kwargs['strategy'] = 'UMFPACK_STRATEGY_' + strategy

        if 'scale' in kwargs.keys():
            scale = upper(kwargs.get('scale'))
            if scale not in ['NONE', 'SUM', 'MAX']: scale = 'SUM'
            kwargs['scale'] = 'UMFPACK_SCALE_' + scale
        
        self.type = numpy.float
        self.nrow, self.ncol = A.getShape()
        t = cputime()
        self.LU = umfpack.factorize(A.matrix, **kwargs)
        self.factorizationTime = cputime() - t
        self.solutionTime = 0.0
        self.sol = None
        self.L = self.U = None
        self.P = self.Q = self.R = None
        self.do_recip = False
        self.lnz = self.unz = self.nz_udiag = None
        return
Example #20
0
    def _pattern(self, pattern, that, topic):
        """
        Modified version of aiml.PatternMgr._match(). Now returns the pattern that
        matches the input as well as the matching template.
        """

        if len(pattern) == 0:
            return None
        # Mutilate the input.  Remove all punctuation and convert the
        # text to all caps.
        input = string.upper(pattern)
        input = re.sub(self._brain._puncStripRE, " ", input)
        if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
        thatInput = string.upper(that)
        thatInput = re.sub(self._brain._puncStripRE, " ", thatInput)
        thatInput = re.sub(self._brain._whitespaceRE, " ", thatInput)
        if topic.strip() == u"": topic = u"ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
        topicInput = string.upper(topic)
        topicInput = re.sub(self._brain._puncStripRE, " ", topicInput)
        

        # Pass the input off to the recursive call
        patMatch, template = self._brain._match(input.split(), thatInput.split(), topicInput.split(), self._brain._root)

        return patMatch, template
Example #21
0
 def Save(self, file_name, format="ASCII", overwrite=False):
     """Save HiddenMarkovIndOutTree object into a file.
     
     Argument file_name is a string designing the file name and path.
     String argument format must be "ASCII" or "SpreadSheet".
     Boolean argument overwrite is false is the file should not 
     be overwritten."""
     if not overwrite:
         try:
             f=file(file_name, 'r')
         except IOError:
             f=file(file_name, 'w+')
         else:
             msg="File "+file_name+" already exists"
             raise IOError, msg
         f.close()
     import string
     if not (string.upper(format)=="ASCII" 
             or string.upper(format)=="SPREADSHEET"):
         msg="unknown file format: "+str(format)
         raise ValueError, msg
     elif (string.upper(format)=="ASCII"):
         self.__chmt.FileAsciiWrite(file_name)
     else:
         self.__chmt.SpreadsheetWrite(file_name)
Example #22
0
def translate(DNA_sequence):
    """Translate the DNA sequence in three forward frames"""
    #
    # Check the sequence first
    #
    ok,DNA_sequence=check_DNA(DNA_sequence)
    if not ok:
        print
        print 'DNA sequence not ok'
        print DNA_sequence
        print
        return None
    #
    # Translate
    #
    import string
    translation_3=[]
    translation_1=[]
    for start in range(3):
        position=start
        thisAA=''
        thisframe3=[]
        thisframe1=''
        while position<len(DNA_sequence):
            thisAA=thisAA+DNA_sequence[position]
            position=position+1
            if len(thisAA)==3:
                thisframe1=thisframe1+three_to_one[string.upper(genetic_code[thisAA])]
                thisframe3.append(string.upper(genetic_code[thisAA]))
                thisAA=''
        translation_3.append(thisframe3)
        translation_1.append(thisframe1)
    return translation_3,translation_1
Example #23
0
def check_variable_len_bcs_dups(header, mapping_data, errors):
    """ Checks variable length barcodes plus sections of primers for dups
    
    header:  list of header strings
    mapping_data:  list of lists of raw metadata mapping file data
    errors:  list of errors
    """

    header_field_to_check = "BarcodeSequence"

    # Skip if no field BarcodeSequence
    try:
        check_ix = header.index(header_field_to_check)
    except ValueError:
        return errors

    linker_primer_field = "LinkerPrimerSequence"

    try:
        linker_primer_ix = header.index(linker_primer_field)
        no_primers = False
    except ValueError:
        no_primers = True

    barcodes = []
    bc_lens = []

    correction = 1

    for curr_data in mapping_data:
        barcodes.append(upper(curr_data[check_ix]))
        bc_lens.append(len(curr_data[check_ix]))

    # Get max length of barcodes to determine how many primer bases to slice
    barcode_max_len = max(bc_lens)

    # Have to do second pass to append correct number of nucleotides to
    # check for duplicates between barcodes and primer sequences

    bcs_added_nts = []
    for curr_data in mapping_data:
        if no_primers:
            bcs_added_nts.append(upper(curr_data[check_ix]))
        else:
            adjusted_len = barcode_max_len - len(curr_data[check_ix])
            bcs_added_nts.append(upper(curr_data[check_ix] + curr_data[linker_primer_ix][0:adjusted_len]))

    dups = duplicates_indices(bcs_added_nts)

    for curr_dup in dups:
        for curr_loc in dups[curr_dup]:
            if no_primers:
                errors.append("Duplicate barcode %s found.\t%d,%d" % (curr_dup, curr_loc + correction, check_ix))
            else:
                errors.append(
                    "Duplicate barcode and primer fragment sequence "
                    + "%s found.\t%d,%d" % (curr_dup, curr_loc + correction, check_ix)
                )

    return errors
Example #24
0
File: query.py Project: houcy/joy
   def initialize(self):
      self.d = {}
      with open("saltUI/ciphersuites.txt") as f:
         for line in f:
            (key, val, sec) = line.split()
            self.d[key] = val + " (" + sec + ")"
      
      self.pr = {
         6: "TCP",
         17: "UDP"
         }
      with open("data/ip.txt") as f:
         for line in f:
            (key, val) = line.split()
            self.pr[key] = val

      self.ports = {}
      with open("data/ports.txt") as f:
         for line in f:
            try:
               (val, key) = line.split()
               if '-' in str(key):
                  start, stop = str(key).split('-')
                  for a in range(int(start), int(stop)):
                     self.ports[a] = string.upper(val)
               else:
                  key = int(key)
                  self.ports[key] = string.upper(val)
            except:
               pass
Example #25
0
def walk_directory(prefix=""):
    views = []
    sections = []

    for dirname, dirnames, filenames in os.walk("."):

        for filename in filenames:

            filepart, fileExtension = os.path.splitext(filename)
            pos = filepart.find("ViewController")
            if string.lower(fileExtension) == ".xib" and pos > 0:
                # read file contents
                f = open(dirname + "/" + filename, "r")
                contents = f.read()
                f.close()

                # identify identifier part
                vc_name = prefix_remover(filepart[0:pos], prefix)
                vc_name = special_names(vc_name)

                if (
                    string.find(contents, "MCSectionViewController") != -1
                    or string.find(contents, "RFSectionViewController") != -1
                    or string.find(contents, "SectionViewController") != -1
                ):
                    sections.append(
                        {"type": "section", "variable_name": "SECTION_" + string.upper(vc_name), "mapped_to": filepart}
                    )
                else:
                    views.append(
                        {"type": "view", "variable_name": "VIEW_" + string.upper(vc_name), "mapped_to": filepart}
                    )

    return sections, views
Example #26
0
    def do_GET(self):
        self.send_response(200)  # Return a response of 200, OK to the client
        self.end_headers()
        parsed_path = urlparse.urlparse(self.path)

        if upper(parsed_path.path) == "/KILL_TASK":
            try:
                obsnum = str(parsed_path.query)
                pid_of_obs_to_kill = int(self.server.dbi.get_obs_pid(obsnum))
                logger.debug("We recieved a kill request for obsnum: %s, shutting down pid: %s" % (obsnum, pid_of_obs_to_kill))
                self.server.kill(pid_of_obs_to_kill)
                self.send_response(200)  # Return a response of 200, OK to the client
                self.end_headers()
                logger.debug("Task killed for obsid: %s" % obsnum)
            except:
                logger.exception("Could not kill observation, url path called : %s" % self.path)
                self.send_response(400)  # Return a response of 200, OK to the client
                self.end_headers()
        elif upper(parsed_path.path) == "/INFO_TASKS":
            task_info_dict = []
            for mytask in self.server.active_tasks:  # Jon : !!CHANGE THIS TO USE A PICKLED DICT!!
                try:
                    child_proc = mytask.process.children()[0]
                    if psutil.pid_exists(child_proc.pid):
                        task_info_dict.append({'obsnum': mytask.obs, 'task': mytask.task, 'pid': child_proc.pid,
                                               'cpu_percent': child_proc.cpu_percent(interval=1.0), 'mem_used': child_proc.memory_info_ex()[0],
                                               'cpu_time': child_proc.cpu_times()[0], 'start_time': child_proc.create_time(), 'proc_status': child_proc.status()})
                except:
                    logger.exception("do_GET : Trying to send response to INFO request")
            pickled_task_info_dict = pickle.dumps(task_info_dict)
            self.wfile.write(pickled_task_info_dict)

        return
Example #27
0
def game(word2):
    i=0
    while (i+1)<len(word2): # This loop finds the double letters (ie. 'll' or 'tt') in the inputed word.
        if word2[i]==word2[i+1]:
            print "Yes, Grandma does like %s." %(string.upper(word2))
            time.sleep(.5)
            word3=raw_input ('What else does she like? ')
            time.sleep(.5)
            i=len(word2)-2
            if word3!="DONE":
               game(word3)
            else:
                print "Thanks for playing!"
            return
        if word2[i]!=word2[i+1]:
            i=i+1
    b=random.randrange(0,16)
    likes=["sinners", "winners", "Mommy", "little", "tall", "Harry","kittens", "all", "books", "trees", "bees", "yellow", "jello", "good", "kisses", "wrapping paper", "the moon", "yellow"]
    dislikes=["saints", "losers", "Mother","big", "short", "Ron", "cats", "none", "magazines", "flowers", "ladybugs", "white", "fudge", "bad", "hugs","gift bags", "the sun", "white"]
    c=likes[b]
    d=dislikes[b]
    print "No, Grandma doesn't like %s." %(string.upper(word2)) # This prints a random set of examples.
    time.sleep(.5)
    print 'She likes %s but not %s.'  %(string.upper(c),string.upper(d))
    time.sleep(.5)
    word3=raw_input ("What else does she like? ")
    time.sleep(.5)
    if word3!="DONE":
        game(word3)
    else:
        print "Thanks for playing!"
Example #28
0
 def read_DNA_sequence(self,filename):
     #
     # Read the sequence
     #
     fd=open(filename)
     lines=fd.readlines()
     fd.close()
     #for line in lines:
     #    print line,
     #
     # Figure out what kind of file it is - a bit limited at the moment
     #
     import string
     if lines[0][0]=='>' and string.upper(lines[0][1:3])!='DL':
         # FASTA
         #print 'Reading FASTA'
         return self.readfasta(filename)
     elif string.upper(lines[0][:3])=='>DL':
         # PIR
         #print 'READING pir'
         return self.readpir(filename)
     elif string.upper(lines[0][:5])=='LOCUS':
         # GenBank
         #print 'READING Genbank'
         return self.readGenBank(filename)
     elif lines[0][2:5]=='xml':
         for line in lines:
             if string.find(line,'DOCTYPE Bsml')!=-1:
                 return self.readBSML(filename)
         return None
     else:
         # FLAT file
         #print 'READING FLAT'
         return self.readflat(filename)
Example #29
0
 def __init__(self, r, s):
     """where r is the rank, s is suit"""
     if type(r) == str:
         r = string.upper(r)
     self.r = r
     s = string.upper(s)
     self.s = s
Example #30
0
def rank_matched_clubs(query,num_winners):
	min_prev = 0.1
	max_prev = 65.0
	query = string.upper(query)
	q_rel,q_rel_init = [],related_terms(query, min_prev, max_prev)
	for e in q_rel_init:
		if not e in q_rel:
				q_rel.append(e)
	#q_rel = [x for x in q_rel if (prevalence(x,all_words_from_clubs)>=min_prev and prevalence(x,all_words_from_clubs)<=max_prev)]
	''' for e in query.split(' '):
		if e not in q_rel:
			q_rel = [e]+q_rel '''
	print '(At most %d) Terms related to \"%s\" between min_prev %.1f and max_prev %.1f: %s.'%(num_winners,query,min_prev,max_prev,[[x,prevalence(x,all_words_from_clubs)] for x in q_rel])# if prevalence(x,all_words_from_clubs)>=min_prev and prevalence(x,all_words_from_clubs)<=max_prev])
	relevant = {}
	for related in q_rel:
		for e in clubs:
			ct = (string.upper(e[0]) + string.upper(e[1])).count(string.upper(related))
			#if string.upper(related) in string.upper(e[0]) + string.upper(e[1]):
			if ct > 0:
				if clubs.index(e) not in relevant:
					relevant[clubs.index(e)] = ct
		#relevant.append(relevant_clubs(related))
	#print '\nClubs containing these terms (%d):'%len(relevant)
	#for c in relevant:
		#print c
	return [clubs[x] for x in sorted(relevant, key=relevant.get, reverse=True)[:num_winners]]
Example #31
0
def OpcodeName(funcName):
    """Return the C token for the opcode for the given function."""
    return "CR_" + string.upper(funcName) + "_OPCODE"
Example #32
0
def itomic_Nof1(Nof1_item, original_labels, geneMappping, comparison_list,
                outputfile):
    itomic_samples = xena.xenaAPI.dataset_samples(Nof1_item["hub"],
                                                  Nof1_item["dataset"])
    Nof1_sample = Nof1_item["samples"][0]

    #file header output
    fout = open(outputfile, 'w')
    filer_header(comparison_list, Nof1_sample, fout)

    pDic = {}  # pvalue collection
    for original_label in original_labels:
        if original_label in geneMappping:
            gene = geneMappping[original_label]
        else:
            gene = original_label
        itomic_Data = get_itomic_Data(gene, Nof1_item["hub"],
                                      Nof1_item["dataset"], itomic_samples)

        #screen output
        Nof1_output(Nof1_sample, original_label, gene, itomic_Data,
                    Nof1_item["log2Theta"])

        Nof1_value = itomic_Data[Nof1_sample]

        outputList = [original_label, gene]

        for item in comparison_list:
            hub = item["hub"]
            dataset = item["dataset"]
            samples = item["samples"]
            name = item["name"]
            mode = item["mode"]
            gene = string.upper(gene)

            if mode == "gene":
                values = xena.xenaAPI.Gene_values(hub, dataset, samples, gene)
            elif mode == "probe":
                values = xena.xenaAPI.Probe_values(hub, dataset, samples, gene)
            h_l_values = clean(values)  # comparison larger cohort values

            rank, percentage = rank_and_percentage(Nof1_value, h_l_values)
            outputList.append('{:.2f}%'.format(percentage))

            r_and_p_values = map(lambda x: rank_and_percentage(x, h_l_values),
                                 itomic_Data.values())
            outputList.append(
                string.join(
                    map(
                        lambda x: '100'
                        if (x[1] > 99.5) else '{:.2g}'.format(x[1]),
                        r_and_p_values), ', '))

            # log2TPM statistics
            # ttest p value
            try:
                tStat, p = scipy.stats.ttest_ind(itomic_Data.values(),
                                                 h_l_values,
                                                 equal_var=False)
                mean1 = numpy.mean(itomic_Data.values())
                mean2 = numpy.mean(h_l_values)
                outputList.append(str(p))  # ttest p value
                outputList.append(str(tStat))  # ttest t
                outputList.append(str(mean1))
                outputList.append(str(mean2))
                pDic[gene] = p
            except:
                outputList.append('')
                outputList.append('')
                outputList.append('')
                outputList.append('')

            # rank SD
            r_list = map(lambda x: x[1], r_and_p_values)
            try:
                mean1 = numpy.mean(r_list)
                SD = numpy.std(r_list)
                outputList.append('{:.2g}'.format(mean1))
                outputList.append('{:.2g}'.format(SD))
            except:
                outputList.append('')
                outputList.append('')

            print
            print name + " ( n=", len(h_l_values), "):"
            print "rank:", rank
            #print map(lambda x: x[0], r_and_p_values)

            print "Rank %:", '{:.2f}%'.format(percentage)

            #for list in zip(itomic_Data.keys(), r_and_p_values):
            #    print list[0], list[1][0], '{:.2f}%'.format(list[1][1])

        outputList.append('{:.2f}'.format(Nof1_value))
        outputList.append('{:.2f}'.format(
            revert_Log2_theta(Nof1_value, Nof1_item["log2Theta"])))
        fout.write(string.join(outputList, '\t') + '\n')
    fout.write("\n")
    fout.write(
        "Rank % : percentile of samples with lower expression than sample of interest.\n"
    )
    fout.write("Higher Rank %  means higher expression.\n")
    fout.close()
Example #33
0
 def delEntity(self, portno):
     getattr(self.app, 'dontListen'+string.upper(self.ptype))(int(portno))
Example #34
0
 def reallyPutEntity(self, portno, factory):
     getattr(self.app, 'listen'+string.upper(self.ptype))(int(portno), factory)
Example #35
0
 def put(self, key, *args, **kwargs):
     key = string.upper(key)
     # call our parent classes put method with an upper case key
     return apply(dbobj.DB.put, (self, key) + args, kwargs)
def main():
  usage = """%s [options] <organism1> <organism2> <organism3> ...
  
  Here <organism1> is the five character SwissProt identifier 
  for the organism, such as CHICK for chicken or DROME for fruitfly.""" \
  % sys.argv[0]
  opt_parser = OptionParser(usage=usage)
  opt_parser.add_option("-e", "--max_e_value", dest="evalue_cutoff",
                        default="1e-3", 
                        help="Maximum e-value for which to include hits")
  (options, args) = opt_parser.parse_args()
  if len(args) == 0:
    opt_parser.error("No organisms were specified")
  try:
    evalue_cutoff = float(options.evalue_cutoff)
  except ValueError:
    opt_parser.error("--max_e_value must be a number")

  organisms = []
  for arg in args:
    if len(arg) == 5 and arg.isalpha():
      organisms.append(string.upper(arg))
    else:
      opt_parser.error("%s is not a 5-character alphabetical organism id" % arg)

  all_hits = {}
  hits_by_organism = {}
  for organism in organisms:
    hits_by_organism[organism] = {}

  patt = '|'.join(['_%s' % organism for organism in organisms])
  # The following would retrieve the identifiers of all the PSI-BLAST hits
  # cmd = 'grep -E "%s" pb | cut -f 2 | cut -d "|" -f 3' % patt
  cmd = 'grep -E "%s" pb' % patt
  fp_grep_cmd = 'grep -E "%s" final.a2m' % patt
  f = open("clusters", "rU")
  lines = f.readlines()
  f.close()
  cluster_num = 0
  found_seed = False
  for line in lines:
    line = line.rstrip()
    if line[0:9] == 'Cluster #':
      cluster_num = int(line[9:-1])
      found_seed = False
    elif not found_seed:
      if line.find('|') >= 0:
        seed_id = line.split('|')[1]
      else:
        seed_id = line
      # Replace / in the header by %2f
      seed_id = seed_id.replace('/','%2f')
      # Replace ( in the header by %28
      seed_id = seed_id.replace('(','%28')
      # Replace ) in the header by %29
      seed_id = seed_id.replace(')','%29')
      # Replace - in the header by %2d
      seed_id = seed_id.replace('-','%2d')
      # Replace * in the header by %2a
      seed_id = seed_id.replace('*','%2a')
      os.chdir(seed_id)
      f = os.popen(cmd)
      hit_lines = f.readlines()
      f.close()
      f = os.popen(fp_grep_cmd)
      fp_hit_lines = f.readlines()
      f.close()
      fp_hit_set = set()
      for fp_hit_line in fp_hit_lines:
        fp_hit_id = fp_hit_line.rstrip().strip('>').split()[0]
        if fp_hit_id.find('|') >= 0:
          if fp_hit_id[0:3] == 'sp|' or fp_hit_id[0:4] == 'lcl|':
            fp_hit_id = fp_hit_id.split('|')[2]
          else:
            fp_hit_id = fp_hit_id.split('|')[1]
        fp_hit_set.add(fp_hit_id)
      for hit_line in hit_lines:
        hit_line = hit_line.rstrip()
        fields = hit_line.split()
        evalue = float(fields[10])
        if evalue > evalue_cutoff:
          continue
        hit_id = fields[1].split('|')[2]
        is_fp_hit = hit_id in fp_hit_set
        organism = hit_id.split('_')[1]
        if hit_id not in hits_by_organism[organism]:
          hits_by_organism[organism][hit_id] = (evalue, seed_id, is_fp_hit)
          all_hits[hit_id] = (evalue, seed_id, is_fp_hit)
        elif hits_by_organism[organism][hit_id][0] > evalue:
          hits_by_organism[organism][hit_id] = (evalue, seed_id, is_fp_hit)
          all_hits[hit_id] = (evalue, seed_id, is_fp_hit)
      os.chdir('..')
      found_seed = True
  print "Organisms:", organisms
  hits_with_evalues = [(all_hits[hit], hit) for hit in all_hits.keys()]
  hits_with_evalues.sort()
  for (evalue, seed_id, is_fp_hit), hit_id in hits_with_evalues:
    if is_fp_hit:
      print "%s\t%g\t%s\t*" % (hit_id, evalue, seed_id)
    else:
      print "%s\t%g\t%s" % (hit_id, evalue, seed_id)
Example #37
0
def ExtendedOpcodeName(funcName):
    """Return the C token for the extended opcode for the given function."""
    return "CR_" + string.upper(funcName) + "_EXTEND_OPCODE"
Example #38
0
def run_sql(sql, param=None, n=0, with_desc=False, with_dict=False,
            run_on_slave=False, connection=None):
    """Run SQL on the server with PARAM and return result.

    :param param: tuple of string params to insert in the query (see
    notes below)
    :param n: number of tuples in result (0 for unbounded)
    :param with_desc: if True, will return a DB API 7-tuple describing
    columns in query.
    :param with_dict: if True, will return a list of dictionaries
    composed of column-value pairs
    :param connection: if provided, uses the given connection.
    :return: If SELECT, SHOW, DESCRIBE statements, return tuples of data,
    followed by description if parameter with_desc is
    provided.
    If SELECT and with_dict=True, return a list of dictionaries
    composed of column-value pairs, followed by description
    if parameter with_desc is provided.
    If INSERT, return last row id.
    Otherwise return SQL result as provided by database.

    @note: When the site is closed for maintenance (as governed by the
    config variable CFG_ACCESS_CONTROL_LEVEL_SITE), do not attempt
    to run any SQL queries but return empty list immediately.
    Useful to be able to have the website up while MySQL database
    is down for maintenance, hot copies, table repairs, etc.
    @note: In case of problems, exceptions are returned according to
    the Python DB API 2.0.  The client code can import them from
    this file and catch them.
    """
    if cfg['CFG_ACCESS_CONTROL_LEVEL_SITE'] == 3:
        # do not connect to the database as the site is closed for maintenance:
        return []
    elif cfg['CFG_ACCESS_CONTROL_LEVEL_SITE'] > 0:
        # Read only website
        if not sql.upper().startswith("SELECT") and \
                not sql.upper().startswith("SHOW"):
            return

    if param:
        param = tuple(param)

    # FIXME port database slave support
    dbhost = cfg['CFG_DATABASE_HOST']
    if run_on_slave and cfg['CFG_DATABASE_SLAVE']:
        dbhost = cfg['CFG_DATABASE_SLAVE']

    if 'sql-logger' in cfg.get('CFG_DEVEL_TOOLS', []):
        log_sql_query(dbhost, sql, param)

    gc.disable()
    engine = db.engine.execution_options(use_unicode=0)
    sql = sql.replace('`', '"')
    current_app.logger.info(sql)
    if param is None:
        cur = engine.execute(sql.replace('%', '%%'))
    else:
        cur = engine.execute(sql, (param, ))
    gc.enable()

    if string.upper(string.split(sql)[0]) in \
            ("SELECT", "SHOW", "DESC", "DESCRIBE"):
        if n:
            recset = cur.fetchmany(n)
        else:
            recset = cur.fetchall()

        from invenio.base.helpers import utf8ifier
        recset = map(dict if with_dict else tuple, recset)
        recset = utf8ifier(recset)

        if with_desc:
            return recset, cur.description
        else:
            return recset
    else:
        if string.upper(string.split(sql)[0]) == "INSERT":
            return cur.lastrowid
        return cur
Example #39
0
    print
    print '\t    Use device no. 2 as the READER and device no. 3 as the EMULATOR:'
    print
    print '\t      ' + sys.argv[0] + ' -r 2 3'
    print
    print '\t    Use device no. 2 as the EMULATOR and remote system on 192.168.1.3 port 5000 as the READER:'
    print
    print '\t      ' + sys.argv[0] + ' -r 2 reader:192.168.1.3:5000'
    print
    os._exit(True)

logging = False
if len(args) > 1:
    try:
        logfile = open(args[1], 'r')
        x = string.upper(
            raw_input('  *** Warning! File already exists! Overwrite (y/n)? '))
        if not x == 'Y':
            os._exit(True)
        logfile.close()
    except:
        pass
    try:
        logfile = open(args[1], 'w')
        logging = True
    except:
        print "  Couldn't create logfile:", args[1]
        os._exit(True)

try:
    if args[2] == 'QUIET':
        quiet = True
Example #40
0
    def _makeProduct(self, fcst, segmentAreas, argDict):
        """
        Modified to allow headlines.  Segments will be automatically
        generated based on hazards grid, if not already broken up."""
        editArea = segmentAreas[0]
        areaLabel = editArea

        # Get combinations to be used for the segment

        combinations = argDict["combinations"]
        if combinations is not None:
            areaList = self.getCurrentAreaNames(argDict, areaLabel)
            print "using combinations, areaList=", areaList
            usingCombo = 1
        else:
            for editArea, label in defaultEditAreas:
                if label == areaLabel:
                    areaList = [editArea]
            print "not using combinations, areaList=", areaList

        # Generate the standard headline for the segment
        self._hazards = argDict['hazards']
        self._combinations = argDict["combinations"]
        self._hazardTR = self.createTimeRange(0, 240)

        headlines = self.generateProduct("Hazards",
                                         argDict,
                                         area=editArea,
                                         areaLabel=areaLabel,
                                         timeRange=self._hazardTR)

        self._agencyNames = ""

        # If no valid AQA hazard grid, just return a placeholder
        if headlines == "":
            return fcst + "|* Statement text *|"

        # If valid hazard grid, insert headline, agency attribution, and any default text
        else:

            # Make sure main headline is in upper case.
            upperhead = string.upper(headlines)
            fcst = fcst + upperhead
            #strip out the line feed within headlines
            headlines = string.split(headlines, '\n')
            headlines = string.join(headlines)

            #create an attribution phrase containing headline info
            HeadIssue1 = ""
            HeadIssue2 = ""
            HeadIssue = ""

            # Determine the list of agencies associated with the segmentAreas
            agencies = []
            for areaLabel in segmentAreas:
                # Find the agency for this areaLabel
                for agency in self._agencyDict.keys():
                    if agency not in agencies:
                        zones = self._agencyDict[agency]['zones']
                        if areaLabel in zones:
                            agencies.append(agency)
                            name = self._agencyDict[agency]['name']
                            self._agencyNames = self._agencyNames + "\n" + name

            # Make the headline using the first agency only
            if agencies == []:
                print "\n\nCheck set up of agencyDict!! -- no agencyDict entry for " + ` segmentAreas ` + "\n\n"
            agency = agencies[0]
            HeadIssue1 = self._agencyDict[agency]['declaration']
            HeadIssue2 = headlines
            if "remains" in headlines:  # This is an update
                HeadIssue2 = Headissue2[29:len(HeadIssue2) - 4]
            else:  # This is the first issuance
                HeadIssue2 = HeadIssue2[21:len(HeadIssue2) - 4]
            HeadIssue = HeadIssue1 + HeadIssue2 + "\n\n" + self._agencyDict[
                agency]['text']
            fcst = fcst + HeadIssue + "\n\n"

            return fcst
Example #41
0
 #    raise 'Could not determine if we have a chainid or not',pdbid
 #
 # Write the PDB file
 #
 #print 'chainid',chainid
 #print 'keepchainid',keep_chainid
 fd = open(pkapdb, 'w')
 for line in files[filenms[0]]:
     split = string.split(line)
     newline = line
     if split[0] == 'ATOM' and keep_chainid:
         if chainid:
             thisid = split[4]
             thisid = line[21]
             #print string.upper(thisid),string.upper(keep_chainid)
             if string.upper(thisid) != string.upper(keep_chainid):
                 newline = ''
                 #print 'Excluding line'
             #else:
             #    print 'Keeping',line
     elif split[0] == 'HETATM':
         newline = ''
     fd.write(newline)
 fd.close()
 #
 # Remove the chain ID
 #
 import remove_chain_identifier
 remove_chain_identifier.remove_chainid(pkapdb)
 #
 # Done
Example #42
0
    def __init__(self,
                 filename,
                 when='h',
                 interval=1,
                 backupCount=0,
                 encoding=None,
                 delay=0,
                 utc=0):
        BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
        self.when = string.upper(when)
        self.backupCount = backupCount
        self.utc = utc
        # Calculate the real rollover interval, which is just the number of
        # seconds between rollovers.  Also set the filename suffix used when
        # a rollover occurs.  Current 'when' events supported:
        # S - Seconds
        # M - Minutes
        # H - Hours
        # D - Days
        # midnight - roll over at midnight
        # W{0-6} - roll over on a certain day; 0 - Monday
        #
        # Case of the 'when' specifier is not important; lower or upper case
        # will work.
        currentTime = int(time.time())
        if self.when == 'S':
            self.interval = 1  # one second
            self.suffix = "%Y-%m-%d_%H-%M-%S"
            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
        elif self.when == 'M':
            self.interval = 60  # one minute
            self.suffix = "%Y-%m-%d_%H-%M"
            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
        elif self.when == 'H':
            self.interval = 60 * 60  # one hour
            self.suffix = "%Y-%m-%d_%H"
            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
        elif self.when == 'D' or self.when == 'MIDNIGHT':
            self.interval = 60 * 60 * 24  # one day
            self.suffix = "%Y-%m-%d"
            self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
        elif self.when.startswith('W'):
            self.interval = 60 * 60 * 24 * 7  # one week
            if len(self.when) != 2:
                raise ValueError(
                    "You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s"
                    % self.when)
            if self.when[1] < '0' or self.when[1] > '6':
                raise ValueError(
                    "Invalid day specified for weekly rollover: %s" %
                    self.when)
            self.dayOfWeek = int(self.when[1])
            self.suffix = "%Y-%m-%d"
            self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
        else:
            raise ValueError("Invalid rollover interval specified: %s" %
                             self.when)

        self.extMatch = re.compile(self.extMatch)
        self.interval = self.interval * interval  # multiply by units requested
        self.rolloverAt = currentTime + self.interval

        # If we are rolling over at midnight or weekly, then the interval is already known.
        # What we need to figure out is WHEN the next interval is.  In other words,
        # if you are rolling over at midnight, then your base interval is 1 day,
        # but you want to start that one day clock at midnight, not now.  So, we
        # have to fudge the rolloverAt value in order to trigger the first rollover
        # at the right time.  After that, the regular interval will take care of
        # the rest.  Note that this code doesn't care about leap seconds. :)
        if self.when == 'MIDNIGHT' or self.when.startswith('W'):
            # This could be done with less code, but I wanted it to be clear
            if utc:
                t = time.gmtime(currentTime)
            else:
                t = time.localtime(currentTime)
            currentHour = t[3]
            currentMinute = t[4]
            currentSecond = t[5]
            # r is the number of seconds left between now and midnight
            r = _MIDNIGHT - (
                (currentHour * 60 + currentMinute) * 60 + currentSecond)
            self.rolloverAt = currentTime + r
            # If we are rolling over on a certain day, add in the number of days until
            # the next rollover, but offset by 1 since we just calculated the time
            # until the next day starts.  There are three cases:
            # Case 1) The day to rollover is today; in this case, do nothing
            # Case 2) The day to rollover is further in the interval (i.e., today is
            #         day 2 (Wednesday) and rollover is on day 6 (Sunday).  Days to
            #         next rollover is simply 6 - 2 - 1, or 3.
            # Case 3) The day to rollover is behind us in the interval (i.e., today
            #         is day 5 (Saturday) and rollover is on day 3 (Thursday).
            #         Days to rollover is 6 - 5 + 3, or 4.  In this case, it's the
            #         number of days left in the current week (1) plus the number
            #         of days in the next week until the rollover day (3).
            # The calculations described in 2) and 3) above need to have a day added.
            # This is because the above time calculation takes us to midnight on this
            # day, i.e. the start of the next day.
            if when.startswith('W'):
                day = t[6]  # 0 is Monday
                if day != self.dayOfWeek:
                    if day < self.dayOfWeek:
                        daysToWait = self.dayOfWeek - day
                    else:
                        daysToWait = 6 - day + self.dayOfWeek + 1
                    newRolloverAt = self.rolloverAt + (daysToWait *
                                                       (60 * 60 * 24))
                    if not utc:
                        dstNow = t[-1]
                        dstAtRollover = time.localtime(newRolloverAt)[-1]
                        if dstNow != dstAtRollover:
                            if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
                                newRolloverAt = newRolloverAt - 3600
                            else:  # DST bows out before next rollover, so we need to add an hour
                                newRolloverAt = newRolloverAt + 3600
                    self.rolloverAt = newRolloverAt
Example #43
0
def tableauth(pioclass, pobjstmt, mysqlc):
	
	autherr = ''
	table = ''
	authsort = ['N', 'R', ' ', '', 'F']
	tblauth = [' ', ' ', ' ', ' ']
	
	if pobjstmt[0] == "'" or pobjstmt[0] == '"':
		autherr = 'pobjstmt begins with quotes...triple quote individual column values to escape correctly<br>'
	
	pobjsplit = re.split(' ', pobjstmt.lstrip())
	if pobjsplit[0].lower() == 'insert'  or pobjsplit[0].lower() == 'replace':		#handle replaces as inserts
		n = 1
		while n < 4 and table == '':
			if pobjsplit[n].lower() == 'into' or pobjsplit[n].lower() == 'ignore':
				n = n + 1
			else:
				table = pobjsplit[n].lower()
		if n >= 4:
			autherr = 'insert cannot find table - failed to authorize table'
			
#		print 'table=' + table		#used to test the auth values
		
	elif pobjsplit[0].lower() == 'update':
		n = 1
		while n < 4 and table == '':
			if pobjsplit[n].lower() == 'low_priority' or pobjsplit[n].lower() == 'ignore':
				n = n + 1
			else:
				table = pobjsplit[n].lower()
		if n >= 4:
			autherr = 'update cannot find table - failed to authorize table'
		
	elif pobjsplit[0].lower() == 'delete':
		try:
			table = pobjsplit[pobjsplit.index('from') + 1]
		except ValueError:
			try:
				table = pobjsplit[pobjsplit.index('FROM') + 1]
			except ValueError:
				autherr = 'delete missing "from" cannot find table - failed to authorize table'
		
	elif pobjsplit[0].lower() == 'select':
		f1 = string.find(pobjstmt.upper(), ' FROM ')
		if f1 <> -1:
			w1 = string.find(pobjstmt.upper(), ' WHERE ')
			if w1 <> -1:
				whereval = pobjstmt[w1+1:w1+6]
			else:
				whereval = ''
			
			f2 = pobjsplit.index(pobjstmt[f1+1:f1+5])			#FROM index#
			while f2 - 1 > -1:												#eliminate everything before the FROM
				pobjsplit.remove(pobjsplit[f2-1])
				f2 = pobjsplit.index(pobjstmt[f1+1:f1+5])
			table = pobjsplit[f2+1]
			if table[len(table)-1:] == ',':
				table = table[0:len(table) -1]
			as1 = 0
			while as1 <> -1:												#eliminate "AS" and the following value
				as1 = string.find(str(pobjsplit).upper(), ' AS ')
				if as1 <> -1:
					as2 = pobjsplit.index(str(pobjsplit)[as1:as1+4])
					pobjsplit.remove(pobjsplit[as2])
					pobjsplit.remove(pobjsplit[as2])			#because index shifts over, as2 is now value after "AS".
				
			jn1 = string.find(str(pobjsplit).upper(), ' JOIN ')
			if jn1 <> -1:
				while jn1 > -1:
					jn2 = pobjsplit.index(str(pobjsplit)[jn1:jn1+6])
					table = table + ',' + pobjsplit[jn2+1]
					pobjsplit.remove(pobjsplit[jn2])
					jn1 = string.find(str(pobjsplit).upper(), ' JOIN ', jn1 + 1)
			else:
				if whereval <> '':
					fwdiff = pobjsplit.index(whereval) - f2
					if fwdiff > 2:
						fwdiff = fwdiff -1
						while fwdiff > 1:
							if table[len(table)-1:] == ',':
								table = table + pobjsplit[fwdiff]
							else:
								table = table + ',' + pobjsplit[fwdiff]
							fwdiff = fwdiff -1
			if table[len(table)-1:] == ',':
				table = table[0:len(table) -1]
	#	else:
	#		autherr = 'Select has no "FROM" - cannot find table - failed to authorize table'
	#selects can do more than get stuff from tables, so a select with no from my be allowable.
		
	if table <> '':
		tablelist = re.split(',', table)
		n = 0
##		db = MySQLdb.connect(db=sqldb, user=sqluser, host= sqlhost)
##		mysqlc. = db.cursor()
		for i in tablelist:
			sqlstmt = 'select pioclass, dbtable, dbinsert, dbupdate, dbdelete, dbselect from pioclasstable'
			sqlstmt = sqlstmt + ' where pioclass = "%s" and dbtable = "%s"' % (pioclass, tablelist[n])
			sqlstmt = sqlstmt + ' or pioclass = "" and dbtable = "%s"' % (tablelist[n])
			sqlstmt = sqlstmt + ' order by pioclass DESC, dbtable DESC'
			mysqlc.execute(sqlstmt)
			
			if mysqlc.rowcount == 0:
				#insert, update, delete, select (Full, Record, None, '')
				tmpauth = [' ', ' ', ' ', ' ']
			else:
				tmpauth = [string.upper(mysqlc._rows[0][2]), string.upper(mysqlc._rows[0][3]), string.upper(mysqlc._rows[0][4]),string.upper(mysqlc._rows[0][5])]
				if n == 0:
					tblauth = tmpauth
				else:
					if authsort.index(tmpauth[0]) < authsort.index(tblauth[0]):
						tblauth[0] = tmpauth[0]
					if authsort.index(tmpauth[1]) < authsort.index(tblauth[1]):
						tblauth[1] = tmpauth[1]
					if authsort.index(tmpauth[2]) < authsort.index(tblauth[2]):
						tblauth[2] = tmpauth[2]
					if authsort.index(tmpauth[3]) < authsort.index(tblauth[3]):
						tblauth[3] = tmpauth[3]
			
			n = n + 1
	return tblauth, autherr, table
Example #44
0
    def caverphone(self):
        "returns the language key using the caverphone algorithm 2.0"

        # Developed at the University of Otago, New Zealand.
        # Project: Caversham Project (http://caversham.otago.ac.nz)
        # Developer: David Hood, University of Otago, New Zealand
        # Contact: [email protected]
        # Project Technical Paper: http://caversham.otago.ac.nz/files/working/ctp150804.pdf
        # Version 2.0 (2004-08-15)

        code = ""

        i = 0
        term = self.term
        termLength = len(term)

        if (termLength == 0):
            # empty string ?
            return code

        # convert to lowercase
        code = string.lower(term)

        # remove anything not in the standard alphabet (a-z)
        code = re.sub(r'[^a-z]', '', code)

        # remove final e
        if code.endswith("e"):
            code = code[:-1]

        # if the name starts with cough, rough, tough, enough or trough -> cou2f (rou2f, tou2f, enou2f, trough)
        code = re.sub(r'^([crt]|(en)|(tr))ough', r'\1ou2f', code)

        # if the name starts with gn -> 2n
        code = re.sub(r'^gn', r'2n', code)

        # if the name ends with mb -> m2
        code = re.sub(r'mb$', r'm2', code)

        # replace cq -> 2q
        code = re.sub(r'cq', r'2q', code)

        # replace c[i,e,y] -> s[i,e,y]
        code = re.sub(r'c([iey])', r's\1', code)

        # replace tch -> 2ch
        code = re.sub(r'tch', r'2ch', code)

        # replace c,q,x -> k
        code = re.sub(r'[cqx]', r'k', code)

        # replace v -> f
        code = re.sub(r'v', r'f', code)

        # replace dg -> 2g
        code = re.sub(r'dg', r'2g', code)

        # replace ti[o,a] -> si[o,a]
        code = re.sub(r'ti([oa])', r'si\1', code)

        # replace d -> t
        code = re.sub(r'd', r't', code)

        # replace ph -> fh
        code = re.sub(r'ph', r'fh', code)

        # replace b -> p
        code = re.sub(r'b', r'p', code)

        # replace sh -> s2
        code = re.sub(r'sh', r's2', code)

        # replace z -> s
        code = re.sub(r'z', r's', code)

        # replace initial vowel [aeiou] -> A
        code = re.sub(r'^[aeiou]', r'A', code)

        # replace all other vowels [aeiou] -> 3
        code = re.sub(r'[aeiou]', r'3', code)

        # replace j -> y
        code = re.sub(r'j', r'y', code)

        # replace an initial y3 -> Y3
        code = re.sub(r'^y3', r'Y3', code)

        # replace an initial y -> A
        code = re.sub(r'^y', r'A', code)

        # replace y -> 3
        code = re.sub(r'y', r'3', code)

        # replace 3gh3 -> 3kh3
        code = re.sub(r'3gh3', r'3kh3', code)

        # replace gh -> 22
        code = re.sub(r'gh', r'22', code)

        # replace g -> k
        code = re.sub(r'g', r'k', code)

        # replace groups of s,t,p,k,f,m,n by its single, upper-case equivalent
        for singleLetter in ["s", "t", "p", "k", "f", "m", "n"]:
            otherParts = re.split(singleLetter + "+", code)
            code = string.join(otherParts, string.upper(singleLetter))

        # replace w[3,h3] by W[3,h3]
        code = re.sub(r'w(h?3)', r'W\1', code)

        # replace final w with 3
        code = re.sub(r'w$', r'3', code)

        # replace w -> 2
        code = re.sub(r'w', r'2', code)

        # replace h at the beginning with an A
        code = re.sub(r'^h', r'A', code)

        # replace all other occurrences of h with a 2
        code = re.sub(r'h', r'2', code)

        # replace r3 with R3
        code = re.sub(r'r3', r'R3', code)

        # replace final r -> 3
        code = re.sub(r'r$', r'3', code)

        # replace r with 2
        code = re.sub(r'r', r'2', code)

        # replace l3 with L3
        code = re.sub(r'l3', r'L3', code)

        # replace final l -> 3
        code = re.sub(r'l$', r'3', code)

        # replace l with 2
        code = re.sub(r'l', r'2', code)

        # remove all 2's
        code = re.sub(r'2', r'', code)

        # replace the final 3 -> A
        code = re.sub(r'3$', r'A', code)

        # remove all 3's
        code = re.sub(r'3', r'', code)

        # extend the code by 10 '1' (one)
        code += '1' * 10

        # return the first 10 characters
        return code[:10]
Example #45
0
            nextframe(infile, maxlevel, None)
            framecount = framecount + 1
        if frames:
            frames = frames[1:]
        arrayvalues = nextframe(infile, maxlevel, dimensions)
        if arrayvalues == {}:
            break
        outfilename = outfileprefix + "_%04d.gif" % (framecount)
        filelist.append(outfilename)
        if conserve == 0 or not os.path.exists(outfilename):
            # Call constructor from dictionary (see definition of arraydict)
            array = eval(arraydict[geometry])
            array.tile()
            del array.draw
            print "Writing to output file:", outfilename, "..."
            array.im.save(outfilename)
            del array.im
        framecount = framecount + 1
        if frames == []:
            break
    infile.close()

    # Generate animation file if requested (using external program gifsicle)
    if makeanim:
        animfilename = string.upper(outfileprefix) + "_ANIM.gif"
        anim_cmd = "gifsicle -O2 --no-loopcount -d %d " % (delay) \
          + string.join(filelist, ' ') + " > " + animfilename
        print "Running gifsicle with delay = %d" % (delay)
        print anim_cmd
        os.system(anim_cmd)
Example #46
0
#coding utf-8

import string 

a = 'deCvaldfj DFJ dkf d234'
a = ''.join([x for x in a if not x.isdigit()])
print a
print sorted(a,key=string.upper)

a = [string.upper(x) for x in a]
print sorted(a)
Example #47
0
def key(word):
    py = [[string.upper() for string in item] for item in lazy_pinyin(word)]
    for i in range(len(py)):
        if py[i][0][0] not in string.digits + string.ascii_uppercase:
            py[i][0] = "#" + py[i][0]
    return py
Example #48
0
def pobjauth(pioclass, pobjid, page, pobjstmt, mysqlc):
	
	authsort = ['N', 'R', 'F', ' ', '']		# '' and ' ' get treated as 'N' in processpobj.
	tblauth, tblautherr, table = tableauth(pioclass, pobjstmt, mysqlc)		#get table authorization
	outputauth = tblauth
	origtblauth = (tblauth[0], tblauth[1], tblauth[2], tblauth[3])
	
##	db = MySQLdb.connect(db=sqldb, user=sqluser, host=sqlhost)
##	c = db.cursor()
	sqlstmt = 'select pioclass, pobjid, page, dbinsert, dbupdate, dbdelete, dbselect from pioclasspobj'
	sqlstmt = sqlstmt + ' where pioclass = "%s" and pobjid = "%s" and page = "%s"' % (pioclass, pobjid, page)
	sqlstmt = sqlstmt + ' or pioclass = "" and pobjid = "%s" and page = "%s"' % (pobjid, page)
	sqlstmt = sqlstmt + ' or pioclass = "" and pobjid = "%s" and page = ""' % (pobjid)
	sqlstmt = sqlstmt + ' or pioclass = "" and pobjid = "" and page = ""'
	sqlstmt = sqlstmt + ' or pioclass = "%s" and pobjid = "%s" and page = ""' % (pioclass, pobjid)
	sqlstmt = sqlstmt + ' or pioclass = "%s" and pobjid = "" and page = ""' % (pioclass)
	sqlstmt = sqlstmt + ' order by pioclass DESC, pobjid DESC, page DESC'
	mysqlc.execute(sqlstmt)
	if mysqlc.rowcount == 0:
		#insert, update, delete, select (Full, Record, None)
		pobjauthl = ['N', 'N', 'N', 'N']
		mysqlc._rows = (('', '', '', 'N', 'N', 'N', 'N'),)
	else:
		pobjauthl = [string.upper(mysqlc._rows[0][3]), string.upper(mysqlc._rows[0][4]), string.upper(mysqlc._rows[0][5]),string.upper(mysqlc._rows[0][6])]
		#I really don't care what records after the first there are, because it's the first one that tells me what the authorization is
		#the later records are more general authorizations, the earlier records are more specific.
	
	#tblauth is more authoritative than pobjauth.  except in the following situations.
	#note-A blank pobjid, means the pobjauthl is a defaul list.  
	#outputauth gets overwritten by a more restrictive pobjauth, not a more restrictive DEFAULT pobjauth value.
	#if the outputauth is blank, it will get the default pobjauth values.  
	
	try:		#this tested a failure on mysqlc._rows.  where mysqlc._rows == (), since corrected by setting mysqlc._rows value where mysqlc.rowcount = 0 above
		#if pobjid auth is less than outputauth use outputauth values.  except for blank pobjid
		if authsort.index(pobjauthl[0]) < authsort.index(outputauth[0]) and mysqlc._rows[0][1] <> '':		
			outputauth[0] = pobjauthl[0]
	except IndexError:
		print 'tableauth' + str(outputauth) + 'pobjauthl' + str(pobjauthl) + 'authsort'+ str(authsort)+'mysqlc._rows' + str(mysqlc._rows) + 'rowcount-' + str(mysqlc.rowcount)
		print 'pioclass' + pioclass + 'pobjid' + pobjid + '<br><br>'
	
	if authsort.index(pobjauthl[1]) < authsort.index(outputauth[1]) and mysqlc._rows[0][1] <> '':
		outputauth[1] = pobjauthl[1]
	
	if authsort.index(pobjauthl[2]) < authsort.index(outputauth[2]) and mysqlc._rows[0][1] <> '':
		outputauth[2] = pobjauthl[2]
	
	if authsort.index(pobjauthl[3]) < authsort.index(outputauth[3])  and mysqlc._rows[0][1] <> '':
		outputauth[3] = pobjauthl[3]
	
	if outputauth[0] == '' or outputauth[0] == ' ':		#if outputauth is blank, use pobjauth regardless
		outputauth[0] = pobjauthl[0]
	
	if outputauth[1] == '' or outputauth[1] == ' ':
		outputauth[1] = pobjauthl[1]
	
	if outputauth[2] == '' or outputauth[2] == ' ':
		outputauth[2] = pobjauthl[2]
	
	if outputauth[3] == '' or outputauth[3] == ' ':
		outputauth[3] = pobjauthl[3]
	
	pobjauth = {'dbinsert':outputauth[0], 'dbupdate':outputauth[1], 'dbdelete':outputauth[2], 'dbselect':outputauth[3]}
	
	return (pobjauth, tblautherr, origtblauth, pobjauthl)
Example #49
0
def itomic_Nof1(Nof1_sample, Nof1_item, original_labels, geneMappping,
                external_comparison_list, outputfile):
    if Nof1_item.has_key("samples"):
        itomic_samples = Nof1_item["samples"]
    else:
        itomic_samples = xena.xenaAPI.dataset_samples(Nof1_item["hub"],
                                                      Nof1_item["dataset"])

    #file header output
    fout = open(outputfile, 'w')
    filer_header(external_comparison_list, itomic_samples, Nof1_item,
                 Nof1_sample, fout)

    pDic = {}  # pvalue collection
    for original_label in original_labels:
        if original_label in geneMappping:
            gene = geneMappping[original_label]
        else:
            gene = original_label
        outputList = [original_label, gene]

        itomic_Data = get_itomic_Data(gene, Nof1_item["hub"],
                                      Nof1_item["dataset"], itomic_samples)

        #screen output
        Nof1_output(Nof1_sample, original_label, gene, itomic_Data,
                    Nof1_item["log2Theta"])

        Nof1_value = itomic_Data[Nof1_sample]

        rank, percentage = rank_and_percentage(Nof1_value,
                                               itomic_Data.values())
        outputList.append('{:.2f}%'.format(percentage))

        for item in external_comparison_list:
            hub = item["hub"]
            dataset = item["dataset"]
            samples = item["samples"]
            name = item["name"]
            mode = item["mode"]
            gene = string.upper(gene)

            if mode == "gene":
                values = xena.xenaAPI.Gene_values(hub, dataset, samples, gene)
            elif mode == "probe":
                values = xena.xenaAPI.Probe_values(hub, dataset, samples, gene)
            h_l_values = clean(values)  # comparison larger cohort values

            # log2TPM statistics
            # ttest p value
            try:
                tStat, p = scipy.stats.ttest_ind(itomic_Data.values(),
                                                 h_l_values,
                                                 equal_var=False)
                mean1 = numpy.mean(itomic_Data.values())
                mean2 = numpy.mean(h_l_values)
                outputList.append(str(p))  # ttest p value
                outputList.append(str(tStat))  # ttest t
                outputList.append(str(mean1))
                outputList.append(str(mean2))
                pDic[gene] = p
            except:
                outputList.append('')
                outputList.append('')
                outputList.append('')
                outputList.append('')

        print
        print Nof1_item["label"] + " ( n=", len(itomic_samples), "):"
        print "rank:", rank
        print "Rank %:", '{:.2f}%'.format(percentage)

        outputList.append('{:.2f}'.format(Nof1_value))
        outputList.append('{:.2f}'.format(
            revert_Log2_theta(Nof1_value, Nof1_item["log2Theta"])))
        fout.write(string.join(outputList, '\t') + '\n')

    fout.write("\n")
    fout.write(
        "Rank % : percentile of samples with lower expression than sample of interest.\n"
    )
    fout.write("Higher Rank %  means higher expression.\n")
    fout.close()
    def run (self):
        if (sys.platform != "win32" and
            (self.distribution.has_ext_modules() or
             self.distribution.has_c_libraries())):
            raise DistutilsPlatformError \
                  ("distribution contains extensions and/or C libraries; "
                   "must be compiled on a Windows 32 platform")

        if not self.skip_build:
            self.run_command('build')

        install = self.reinitialize_command('install', reinit_subcommands=1)
        install.root = self.bdist_dir
        install.skip_build = self.skip_build
        install.warn_dir = 0
        install.plat_name = self.plat_name

        install_lib = self.reinitialize_command('install_lib')
        # we do not want to include pyc or pyo files
        install_lib.compile = 0
        install_lib.optimize = 0

        if self.distribution.has_ext_modules():
            # If we are building an installer for a Python version other
            # than the one we are currently running, then we need to ensure
            # our build_lib reflects the other Python version rather than ours.
            # Note that for target_version!=sys.version, we must have skipped the
            # build step, so there is no issue with enforcing the build of this
            # version.
            target_version = self.target_version
            if not target_version:
                assert self.skip_build, "Should have already checked this"
                target_version = sys.version[0:3]
            plat_specifier = ".%s-%s" % (self.plat_name, target_version)
            build = self.get_finalized_command('build')
            build.build_lib = os.path.join(build.build_base,
                                           'lib' + plat_specifier)

        # Use a custom scheme for the zip-file, because we have to decide
        # at installation time which scheme to use.
        for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
            value = string.upper(key)
            if key == 'headers':
                value = value + '/Include/$dist_name'
            setattr(install,
                    'install_' + key,
                    value)

        log.info("installing to %s", self.bdist_dir)
        install.ensure_finalized()

        # avoid warning of 'install_lib' about installing
        # into a directory not in sys.path
        sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))

        install.run()

        del sys.path[0]

        # And make an archive relative to the root of the
        # pseudo-installation tree.
        from tempfile import mktemp
        archive_basename = mktemp()
        fullname = self.distribution.get_fullname()
        arcname = self.make_archive(archive_basename, "zip",
                                    root_dir=self.bdist_dir)
        # create an exe containing the zip-file
        self.create_exe(arcname, fullname, self.bitmap)
        if self.distribution.has_ext_modules():
            pyversion = get_python_version()
        else:
            pyversion = 'any'
        self.distribution.dist_files.append(('bdist_wininst', pyversion,
                                             self.get_installer_filename(fullname)))
        # remove the zip-file again
        log.debug("removing temporary file '%s'", arcname)
        os.remove(arcname)

        if not self.keep_temp:
            remove_tree(self.bdist_dir, dry_run=self.dry_run)
Example #51
0
def mmis(f1, oper, f2=None, oper1=None, f3=None):
    """
        - Alternative
            Consider using array operations or isbinary()
        - Purpose
            Verify if a relationship among images is true or false.
        - Synopsis
            y = mmis(f1, oper, f2=None, oper1=None, f3=None)
        - Input
            f1:    Gray-scale (uint8 or uint16) or binary image.
            oper:  String relationship from: '==', '~=', '<','<=', '>',
                   '>=', 'binary', 'gray'.
            f2:    Gray-scale (uint8 or uint16) or binary image. Default:
                   None.
            oper1: String Default: None. relationship from: '==', '~=',
                   '<','<=', '>', '>='.
            f3:    Gray-scale (uint8 or uint16) or binary image. Default:
                   None.
        - Output
            y: Bool value: 0 or 1
        - Description
            Verify if the property or relatioship between images is true or
            false. The result is true if the relationship is true for all
            the pixels in the image, and false otherwise. (Obs: This
            function replaces is equal, is lesseq, is binary ).
        - Examples
            #
            fbin=binary([0, 1])
            f1=to_uint8([1, 2, 3])
            f2=to_uint8([2, 2, 3])
            f3=to_uint8([2, 3, 4])
            mmis(fbin,'binary')
            mmis(f1,'gray')
            mmis(f1,'==',f2)
            mmis(f1,'<',f3)
            mmis(f1,'<=',f2)
            mmis(f1,'<=',f2,'<=',f3)
    """
    from string import upper

    if f2 == None:
        oper = upper(oper)
        if oper == 'BINARY': return isbinary(f1)
        elif oper == 'GRAY': return not isbinary(f1)
        else:
            assert 0, 'oper should be BINARY or GRAY, was' + oper
    elif oper == '==':
        y = isequal(f1, f2)
    elif oper == '~=':
        y = not isequal(f1, f2)
    elif oper == '<=':
        y = mmislesseq(f1, f2)
    elif oper == '>=':
        y = mmislesseq(f2, f1)
    elif oper == '>':
        y = isequal(neg(threshad(f2, f1)), binary(1))
    elif oper == '<':
        y = isequal(neg(threshad(f1, f2)), binary(1))
    else:
        assert 0, 'oper must be one of: ==, ~=, >, >=, <, <=, it was:' + oper
    if oper1 != None:
        if oper1 == '==': y = y and isequal(f2, f3)
        elif oper1 == '~=': y = y and (not isequal(f2, f3))
        elif oper1 == '<=': y = y and mmislesseq(f2, f3)
        elif oper1 == '>=': y = y and mmislesseq(f3, f2)
        elif oper1 == '>': y = y and isequal(neg(threshad(f3, f2)), binary(1))
        elif oper1 == '<': y = y and isequal(neg(threshad(f2, f3)), binary(1))
        else:
            assert 0, 'oper1 must be one of: ==, ~=, >, >=, <, <=, it was:' + oper1
    return y
                # multiple entries with the same label are not allowed
                name = string.strip(pieces[0])
                linkMap[name] = "LINK-" + ` len(linkMap) `
#	print "Mapping link ID="+name+" to ID="+`linkMap[name]`
            elif (currentSection == 14):
                # multiple entries with the same label are not allowed
                name = string.strip(pieces[0])
                linkMap[name] = "PUMP-" + ` len(linkMap) `
#	print "Mapping pump ID="+name+" to ID="+`linkMap[name]`
            elif (currentSection == 18):
                # multiple entries with the same label are not allowed
                name = string.strip(pieces[0])
                nodeMap[name] = "RESERVOIR-" + ` len(nodeMap) `
#	print "Mapping reservoir ID="+name+" to ID="+`nodeMap[name]`
            elif (currentSection == 19):
                if (string.upper(pieces[0]) == "RULE"):
                    name = string.strip(pieces[1])
                    ruleMap[name] = "RULE-" + ` len(ruleMap) `
#	  print "Mapping rule ID="+name+" to ID="+`ruleMap[name]`
            elif (currentSection == 23):
                # multiple entries with the same label are not allowed
                name = string.strip(pieces[0])
                nodeMap[name] = "TANK-" + ` len(nodeMap) `
#	print "Mapping tank ID="+name+" to ID="+`nodeMap[name]`
            elif (currentSection == 26):
                # multiple entries with the same label are not allowed
                name = string.strip(pieces[0])
                linkMap[name] = "VALVE-" + ` len(linkMap) `
#	print "Mapping valve ID="+name+" to ID="+`linkMap[name]`

epanetFilePass1.close()
def processComponentList(compList, resMap, associatedResListMap):
    for comp in compList:

        #----------Changes to support templateGroup association--------
        templateGroup = templateGroupMap[comp.attributes["name"].value]
        import_string = "from " + templateGroup + " import omTemplate"
        exec import_string
        reload(omTemplate)
        #--------------------------------------------------------------

        map = dict()
        compName = comp.attributes["name"].value
        associatedResList = associatedResListMap[compName]
        map['compName'] = compName
        map['capCompName'] = string.upper(compName)
        if os.path.exists(compName) == False:
            os.mkdir(compName)

        omClasses = ""
        oampConfigSourceHashDefines = ""
        omClassDefinitions = ""
        consDestPrototypes = ""
        provFnPrototypes = ""
        alarmFnPrototypes = ""
        deviceObjectEnums = ""
        deviceOperationMethods = ""
        deviceOperationMethodProtoTypes = ""
        deviceOperationsTables = ""
        halDeviceObjectInfoTables = ""
        deviceObjectTableToBegenerated = "false"
        halObjConfTableEntries = ""
        deviceObjectTableEntries = ""
        provFilesToBeCopied = "false"
        generateStaticPMFiles = False

        isBUILD_CPP = "false"

        try:
            isBUILD_CPP = comp.attributes["isBuildCPP"].value
        except:
            pass

        doMethodsFileContents = omTemplate.deviceOperationsHeadertemplate.safe_substitute(
            map)

        if len(associatedResList) > 0:
            for resName in associatedResList:
                alarmHalDevInfoTableToBegenerated = "false"
                provHalDevInfoTableToBegenerated = "false"
                comp_res_File_To_Be_Generated = "false"
                provFnDefinitions = ""
                alarmFnDefinitions = ""
                res = resMap[resName]
                map['resName'] = resName
                map['mopath'] = getMOPath(resName, templateGroup)
                map['conditionalIncludes'] = ""
                map['userFnDec'] = ""
                map['userFnDef'] = ""
                map['halObj'] = ""
                map['provConstructorHalBlob'] = ""
                map['provDestructorHalBlob'] = ""
                map['omHandle'] = ""

                if not generateStaticPMFiles:
                    pmObj = res.getElementsByTagName("pm")
                    if len(pmObj) > 0:
                        if len(pmObj[0].getElementsByTagName("attribute")):
                            generateStaticPMFiles = True

                dos = res.getElementsByTagName("deviceObject")
                if len(dos) > 0:
                    deviceObjectTableToBegenerated = "true"
                    for do in dos:
                        deviceOps = do.getElementsByTagName("deviceOperations")
                        deviceID = do.attributes["deviceId"].value
                        deviceIDEnumName = string.upper(
                            resName) + "_" + string.upper(deviceID)
                        deviceObjectEnums += omTemplate.doEnumEntryTemplate.safe_substitute(
                            doEnumName=deviceIDEnumName)
                        deviceObjectTableEntries += getDeviceObjectTableEntry(
                            resName, do)
                        if len(deviceOps) > 0:
                            deviceOp = deviceOps[0]
                            deviceOperationMethods = getDeviceOperationMethodsAndProtoTypes(
                                deviceOp, resName, deviceID)[0]
                            deviceOperationMethodProtoTypes += getDeviceOperationMethodsAndProtoTypes(
                                deviceOp, resName, deviceID)[1]
                            deviceOperationsTables += getDeviceOperationsTable(
                                deviceOp, resName, deviceID)
                            doMethodsFileContents += deviceOperationMethods

                alarm = res.getElementsByTagName("alarmManagement")
                if len(alarm) > 0 and alarm[0].attributes[
                        "isEnabled"].value == "true":
                    comp_res_File_To_Be_Generated = "true"
                    map['svc'] = "Alarm"
                    map['svcid'] = "ALARM"
                    map['res'] = resName.upper()
                    map['alarmConstructorHalBlob'] = ""
                    map['alarmDestructorHalBlob'] = ""
                    alarmAssociatedDOTableEntries = ""
                    associatedDOs = alarm[0].getElementsByTagName(
                        "associatedDO")
                    if len(associatedDOs) > 0:
                        map['halObj'] = "ClHalObjectHandleT halObj;"
                        map['omHandle'] = "ClHandleT omHandle;"
                        map['alarmConstructorHalBlob'] = omTemplate.alarmConstructorHalBlobTemplate.safe_substitute(
                            map)
                        map['alarmDestructorHalBlob'] = omTemplate.alarmDestructorHalBlobTemplate.safe_substitute(
                            map)
                        alarmHalDevInfoTableToBegenerated = "true"
                        for associatedDO in associatedDOs:
                            map['deviceID'] = resName.upper(
                            ) + "_" + string.upper(
                                associatedDO.attributes["deviceId"].value)
                            map['accessPriority'] = associatedDO.attributes[
                                "accessPriority"].value
                            alarmAssociatedDOTableEntries += omTemplate.associatedDOTableEntryTemplate.safe_substitute(
                                map)

                        map['associatedDOTableEntries'] = alarmAssociatedDOTableEntries
                        halDeviceObjectInfoTables += omTemplate.halDevObjInfoTableTemplate.safe_substitute(
                            map)
                        map['omClassId'] = "CL_OM_ALARM_" + resName.upper(
                        ) + "_CLASS_TYPE"
                        halObjConfTableEntries += omTemplate.halObjConfTableEntryTemplate.safe_substitute(
                            map)

                    omClasses += omTemplate.omClassTemplate.safe_substitute(
                        map)
                    oampConfigSourceHashDefines += omTemplate.oampConfigSourceHashDefineTemplate.safe_substitute(
                        map)
                    omClassDefinitions += omTemplate.omClassTableEntryTemplate.safe_substitute(
                        map)
                    consDestPrototypes += omTemplate.alarmConstDestPrototypeTemplate.safe_substitute(
                        map)
                    alarmFnPrototypes += omTemplate.alarmFunctionPrototypesTemplate.safe_substitute(
                        map)
                    alarmFnDefinitions += omTemplate.alarmFnDefsTemplate.safe_substitute(
                        map)
                    map['halObj'] = ""

                prov = res.getElementsByTagName("provisioning")
                pm = res.getElementsByTagName("pm")
                if (len(prov) > 0
                        and prov[0].attributes["isEnabled"].value == "true"):
                    comp_res_File_To_Be_Generated = "true"
                    provFilesToBeCopied = "true"
                    map['svc'] = "Prov"
                    map['svcid'] = "PROV"
                    map['res'] = resName.upper()
                    map['provConstructorHalBlob'] = ""
                    map['provDestructorHalBlob'] = ""
                    map['provUpdateHalBlob'] = ""
                    provAssociatedDOTableEntries = ""
                    associatedDOs = prov[0].getElementsByTagName(
                        "associatedDO")
                    if len(associatedDOs) > 0:
                        map['halObj'] = "ClHalObjectHandleT halObj;"
                        map['omHandle'] = "ClHandleT omHandle;"
                        map['provConstructorHalBlob'] = omTemplate.provConstructorHalBlobTemplate.safe_substitute(
                            map)
                        map['provDestructorHalBlob'] = omTemplate.provDestructorHalBlobTemplate.safe_substitute(
                            map)
                        map['provUpdateHalBlob'] = omTemplate.provUpdateHalBlobTemplate.safe_substitute(
                            map)
                        provHalDevInfoTableToBegenerated = "true"
                        for associatedDO in associatedDOs:
                            map['deviceID'] = resName.upper(
                            ) + "_" + string.upper(
                                associatedDO.attributes["deviceId"].value)
                            map['accessPriority'] = associatedDO.attributes[
                                "accessPriority"].value
                            provAssociatedDOTableEntries += omTemplate.associatedDOTableEntryTemplate.safe_substitute(
                                map)

                        map['associatedDOTableEntries'] = provAssociatedDOTableEntries
                        halDeviceObjectInfoTables += omTemplate.halDevObjInfoTableTemplate.safe_substitute(
                            map)
                        map['omClassId'] = "CL_OM_PROV_" + resName.upper(
                        ) + "_CLASS_TYPE"
                        halObjConfTableEntries += omTemplate.halObjConfTableEntryTemplate.safe_substitute(
                            map)

                    omClasses += omTemplate.omClassTemplate.safe_substitute(
                        map)
                    oampConfigSourceHashDefines += omTemplate.oampConfigSourceHashDefineTemplate.safe_substitute(
                        map)
                    omClassDefinitions += omTemplate.omClassTableEntryTemplate.safe_substitute(
                        map)
                    consDestPrototypes += omTemplate.provConstDestPrototypeTemplate.safe_substitute(
                        map)
                    provFnPrototypes += omTemplate.provFunctionPrototypesTemplate.safe_substitute(
                        map)

                    provFnDefinitions += omTemplate.provFnDefsTemplate.safe_substitute(
                        map)
                    map['halObj'] = ""

                omFnDefsFileContents = ""
                omFnDefsFileContents += omTemplate.provFnDefsHeaderTemplate.safe_substitute(
                    map)
                omFnDefsFileContents += provFnDefinitions
                omFnDefsFileContents += alarmFnDefinitions

                file = ""
                cFile = compName + "/" + "cl" + compName + resName + ".c"
                cppFile = compName + "/" + "cl" + compName + resName + ".C"

                if isBUILD_CPP == "true":
                    file = cppFile
                else:
                    file = cFile

                if comp_res_File_To_Be_Generated == "true":
                    #-----------------------------------------------------------
                    oldCFile = oldNewNameMap[
                        compName] + "/" + "cl" + oldNewNameMap[
                            compName] + oldNewNameMap[resName] + ".c"
                    oldCppFile = oldNewNameMap[
                        compName] + "/" + "cl" + oldNewNameMap[
                            compName] + oldNewNameMap[resName] + ".C"
                    oldFilePath = "app/" + file
                    if os.path.isfile("../../src/app/" + oldCppFile):
                        oldFilePath = "app/" + oldCppFile
                    if os.path.isfile("../../src/app/" + oldCFile):
                        oldFilePath = "app/" + oldCFile
                    newFilePath = "app/" + file
                    writeToMapFile(oldFilePath, newFilePath)
                    #-----------------------------------------------------------

                    omFnDefsFile = open(file, "w")
                    omFnDefsFile.write(omFnDefsFileContents)
                    omFnDefsFile.close()

# fix for bug 4317 - remove old cl<Comp><Res>.c if it exists and comp_res_File is not to be generated this time
                else:
                    if (os.path.isfile(file)):
                        try:
                            os.remove(file)
                        except os.error:
                            pass

            #-----------------------------------------------------------------
            # If the component had resources associated with it then copy
            # the base transaction callback start and end code from the
            # template directory.
            #-----------------------------------------------------------------
            if provFilesToBeCopied == "true":
                provTemplateAppCFile = "clCompAppProv.c"
                provTemplateAppCppFile = "clCompAppProv.C"
                provTemplateApphFile = "clCompAppProv.h"

                templateGroupDir = os.path.join(templatesDir, templateGroup)
                provTemplateAppCFilePath = templateGroupDir + "/" + provTemplateAppCFile
                provTemplateApphFilePath = templateGroupDir + "/" + provTemplateApphFile

                provdestApphFile = compName + "/" + provTemplateApphFile
                provdestAppCFile = compName + "/" + provTemplateAppCFile
                provdestAppCppFile = compName + "/" + provTemplateAppCppFile

                #--------------------------------------------------------------
                oldCFile = oldNewNameMap[compName] + "/" + "clCompAppProv.c"
                oldCppFile = oldNewNameMap[compName] + "/" + "clCompAppProv.C"
                if isBUILD_CPP == "true":
                    destAppFile = provdestAppCppFile
                else:
                    destAppFile = provdestAppCFile
                oldFilePath = "app/" + destAppFile
                if os.path.isfile("../../src/app/" + oldCppFile):
                    oldFilePath = "app/" + oldCppFile
                if os.path.isfile("../../src/app/" + oldCFile):
                    oldFilePath = "app/" + oldCFile
                newFilePath = "app/" + destAppFile
                writeToMapFile(oldFilePath, newFilePath)
                #--------------------------------------------------------------

                if isBUILD_CPP == "true":
                    shutil.copyfile(provTemplateAppCFilePath,
                                    provdestAppCppFile)
                else:
                    shutil.copyfile(provTemplateAppCFilePath, provdestAppCFile)
                shutil.copyfile(provTemplateApphFilePath, provdestApphFile)

            if deviceObjectTableEntries != "":

                file = ""
                cFile = compName + "/" + "cl" + compName + "DO" + ".c"
                cppFile = compName + "/" + "cl" + compName + "DO" + ".C"

                if isBUILD_CPP == "true":
                    file = cppFile

                else:
                    file = cFile

                if not (os.path.isfile(file)):
                    doMethodsFile = open(file, "w")
                    doMethodsFile.write(doMethodsFileContents)
                    doMethodsFile.close()

                map['deviceIDEnums'] = deviceObjectEnums
                halConfHeaderFileContents = omTemplate.halConfHeaderFileTemplate.safe_substitute(
                    map)
                halConfHeaderFileContents += deviceOperationMethodProtoTypes
                halConfHeaderFileContents += omTemplate.halConfHeaderFileFooterTemplate.safe_substitute(
                    map)
                file = compName + "/" + "cl" + compName + "HalConf.h"
                halConfHeaderFile = open(file, "w")
                halConfHeaderFile.write(halConfHeaderFileContents)
                halConfHeaderFile.close()
                #-----------------------------------------------------------
                oldFile = oldNewNameMap[compName] + "/" + "cl" + oldNewNameMap[
                    compName] + "HalConf.h"
                oldFilePath = "app/" + oldFile
                newFilePath = "app/" + file
                writeToMapFile(oldFilePath, newFilePath)
                #-----------------------------------------------------------

            file = ""
            cFile = compName + "/" + "cl" + compName + "HalConf" + ".c"
            cppFile = compName + "/" + "cl" + compName + "HalConf" + ".C"

            if isBUILD_CPP == "true":
                file = cppFile
            else:
                file = cFile

            if halObjConfTableEntries != "" and deviceObjectTableEntries != "":

                halConfSrcFileContents = omTemplate.halConfSrcFileHeaderTemplate.safe_substitute(
                    map)
                halConfSrcFileContents += deviceOperationsTables
                map['deviceObjectTableEntries'] = deviceObjectTableEntries
                halConfSrcFileContents += omTemplate.deviceObjectTableTemplate.safe_substitute(
                    map)
                halConfSrcFileContents += halDeviceObjectInfoTables
                map['halObjConfTableEntries'] = halObjConfTableEntries
                halConfSrcFileContents += omTemplate.halObjConfTableTemplate.safe_substitute(
                    map)
                halConfSrcFileContents += omTemplate.halConfigTemplate.safe_substitute(
                    map)
                #-----------------------------------------------------------
                oldCFile = oldNewNameMap[compName] + "/" + "cl" + oldNewNameMap[
                    compName] + "HalConf" + ".c"
                oldCppFile = oldNewNameMap[
                    compName] + "/" + "cl" + oldNewNameMap[
                        compName] + "HalConf" + ".C"
                oldFilePath = "app/" + file
                if os.path.isfile("../../src/app/" + oldCppFile):
                    oldFilePath = "app/" + oldCppFile
                if os.path.isfile("../../src/app/" + oldCFile):
                    oldFilePath = "app/" + oldCFile
                newFilePath = "app/" + file
                writeToMapFile(oldFilePath, newFilePath)
                #-----------------------------------------------------------

                halConfSrcFile = open(file, "w")
                halConfSrcFile.write(halConfSrcFileContents)
                halConfSrcFile.close()

# fix for bug 4317 - remove old cl<Comp>HalConf.c if it exists and there are no hal/do entries
            else:
                if (os.path.isfile(file)):
                    try:
                        os.remove(file)
                    except os.error:
                        pass

            oampConfigHeaderFile = ""
            oampConfigHeaderFile += omTemplate.oampConfigHeaderTemplate.safe_substitute(
                map)  # replace compName = compName with map by Abhay
            oampConfigHeaderFile += omClasses
            oampConfigHeaderFile += consDestPrototypes
            oampConfigHeaderFile += provFnPrototypes
            oampConfigHeaderFile += alarmFnPrototypes
            oampConfigHeaderFile += omTemplate.oampConfigFooterTemplate.safe_substitute(
                capCompName=string.upper(compName))
            file = compName + "/" + "cl" + compName + "OAMPConfig.h"
            out_file = open(file, "w")
            out_file.write(oampConfigHeaderFile)
            out_file.close()

            oampConfigSourceFile = ""
            oampConfigSourceFile += omTemplate.oampConfigSourceHeaderTemplate.safe_substitute(
                map)
            oampConfigSourceFile += oampConfigSourceHashDefines
            if omClassDefinitions != "":
                oampConfigSourceFile += omTemplate.omClassTableTemplate.safe_substitute(
                    omClassTableEntries=omClassDefinitions)

            file = ""
            cFile = compName + "/" + "cl" + compName + "OAMPConfig" + ".c"
            cppFile = compName + "/" + "cl" + compName + "OAMPConfig" + ".C"

            if isBUILD_CPP == "true":
                file = cppFile

            else:
                file = cFile

            out_file = open(file, "w")
            out_file.write(oampConfigSourceFile)
            out_file.close()

        if generateStaticPMFiles:
            pmAppCFile = "/clCompAppPM.c"
            pmAppCppFile = "/clCompAppPM.C"
            pmAppHFile = "/clCompAppPM.h"

            #--------------------------------------------------------------
            oldCFile = oldNewNameMap[compName] + pmAppCFile
            oldCppFile = oldNewNameMap[compName] + pmAppCppFile
            if isBUILD_CPP == "true":
                destAppFile = compName + pmAppCppFile
            else:
                destAppFile = compName + pmAppCFile

            oldFilePath = "app/" + destAppFile
            if os.path.isfile("../../src/app/" + oldCppFile):
                oldFilePath = "app/" + oldCppFile
            if os.path.isfile("../../src/app/" + oldCFile):
                oldFilePath = "app/" + oldCFile
            newFilePath = "app/" + destAppFile
            writeToMapFile(oldFilePath, newFilePath)
            #--------------------------------------------------------------

            try:
                if isBUILD_CPP == "true":
                    shutil.copyfile(staticDir + pmAppCFile,
                                    compName + pmAppCppFile)
                else:
                    shutil.copyfile(staticDir + pmAppCFile,
                                    compName + pmAppCFile)
                shutil.copyfile(staticDir + pmAppHFile, compName + pmAppHFile)
            except:
                print "Supporting files for app pm were not found in static folder"
Example #54
0
    def __init__(self, fp=None, headers=None, outerboundary="",
		 environ=os.environ, keep_blank_values=None):
	"""Constructor.  Read multipart/* until last part.

	Arguments, all optional:

	fp      	: file pointer; default: sys.stdin

	headers 	: header dictionary-like object; default:
	    taken from environ as per CGI spec

        outerboundary   : terminating multipart boundary
	    (for internal use only)

	environ         : environment dictionary; default: os.environ

        keep_blank_values: flag indicating whether blank values in
            URL encoded forms should be treated as blank strings.  
            A true value inicates that blanks should be retained as 
            blank strings.  The default false value indicates that
	    blank values are to be ignored and treated as if they were
	    not included.

	"""
	method = None
	self.keep_blank_values = keep_blank_values
	if environ.has_key('REQUEST_METHOD'):
	    method = string.upper(environ['REQUEST_METHOD'])
	if not fp and method == 'GET':
	    qs = None
	    if environ.has_key('QUERY_STRING'):
		qs = environ['QUERY_STRING']
	    from StringIO import StringIO
	    fp = StringIO(qs or "")
	    if headers is None:
		headers = {'content-type':
			   "application/x-www-form-urlencoded"}
	if headers is None:
	    headers = {}
	    if environ.has_key('CONTENT_TYPE'):
		headers['content-type'] = environ['CONTENT_TYPE']
	    if environ.has_key('CONTENT_LENGTH'):
		headers['content-length'] = environ['CONTENT_LENGTH']
	self.fp = fp or sys.stdin
	self.headers = headers
	self.outerboundary = outerboundary

	# Process content-disposition header
	cdisp, pdict = "", {}
	if self.headers.has_key('content-disposition'):
	    cdisp, pdict = parse_header(self.headers['content-disposition'])
	self.disposition = cdisp
	self.disposition_options = pdict
	self.name = None
	if pdict.has_key('name'):
	    self.name = pdict['name']
	self.filename = None
	if pdict.has_key('filename'):
	    self.filename = pdict['filename']

	# Process content-type header
	ctype, pdict = "text/plain", {}
	if self.headers.has_key('content-type'):
	    ctype, pdict = parse_header(self.headers['content-type'])
	self.type = ctype
	self.type_options = pdict
	self.innerboundary = ""
	if pdict.has_key('boundary'):
	    self.innerboundary = pdict['boundary']
	clen = -1
	if self.headers.has_key('content-length'):
	    try:
		clen = string.atoi(self.headers['content-length'])
	    except:
		pass
	self.length = clen

	self.list = self.file = None
	self.done = 0
	self.lines = []
	if ctype == 'application/x-www-form-urlencoded':
	    self.read_urlencoded()
	elif ctype[:10] == 'multipart/':
	    self.read_multi()
	else:
	    self.read_single()
Example #55
0
def check_variable_len_bcs_dups(header, mapping_data, errors):
    """ Checks variable length barcodes plus sections of primers for dups

    header:  list of header strings
    mapping_data:  list of lists of raw metadata mapping file data
    errors:  list of errors
    """

    header_field_to_check = "BarcodeSequence"

    # Skip if no field BarcodeSequence
    try:
        check_ix = header.index(header_field_to_check)
    except ValueError:
        return errors

    linker_primer_field = "LinkerPrimerSequence"

    try:
        linker_primer_ix = header.index(linker_primer_field)
        no_primers = False
    except ValueError:
        no_primers = True

    barcodes = []
    bc_lens = []

    correction = 1

    for curr_data in mapping_data:
        barcodes.append(upper(curr_data[check_ix]))
        bc_lens.append(len(curr_data[check_ix]))

    # Get max length of barcodes to determine how many primer bases to slice
    barcode_max_len = max(bc_lens)

    # Have to do second pass to append correct number of nucleotides to
    # check for duplicates between barcodes and primer sequences

    bcs_added_nts = []
    for curr_data in mapping_data:
        if no_primers:
            bcs_added_nts.append(upper(curr_data[check_ix]))
        else:
            adjusted_len = barcode_max_len - len(curr_data[check_ix])
            bcs_added_nts.append(
                upper(curr_data[check_ix] +
                      curr_data[linker_primer_ix][0:adjusted_len]))

    dups = duplicates_indices(bcs_added_nts)

    for curr_dup in dups:
        for curr_loc in dups[curr_dup]:
            if no_primers:
                errors.append('Duplicate barcode %s found.\t%d,%d' %
                              (curr_dup, curr_loc + correction, check_ix))
            else:
                errors.append(
                    'Duplicate barcode and primer fragment sequence ' +
                    '%s found.\t%d,%d' %
                    (curr_dup, curr_loc + correction, check_ix))

    return errors
Example #56
0
 def add_doc(self, document, keyword_list):
     for keyword in keyword_list:
         keyword = upper(keyword)
         self._addlink(keyword, document)
Example #57
0
 def __init__(self, seq):
     self.seq = string.upper(seq.strip())
Example #58
0
def check_added_demultiplex_dups(header,
                                 mapping_data,
                                 errors,
                                 has_barcodes=True,
                                 added_demultiplex_field=None):
    """ Checks that all barcodes and added demultiplex fields are unique

    header:  list of header strings
    mapping_data:  list of lists of raw metadata mapping file data
    errors:  list of errors
    has_barcodes:  True if barcode fields are to be used.
    added_demultiplex_field:  If specified, references a field in the mapping
     file to use for demultiplexing.  These are to be read from fasta labels
     during the actual demultiplexing step.  All combinations of barcodes,
     primers, and the added_demultiplex_field must be unique.
    """

    # Treat as variable length to test combinations of barcodes and the
    # added demultiplex field (should return the same result for the barcode
    # component)
    correction = 1

    header_field_to_check = "BarcodeSequence"
    bc_found = False

    # Skip if no field BarcodeSequence
    if has_barcodes:
        try:
            bc_ix = header.index(header_field_to_check)
            bc_found = True
        except ValueError:
            pass

    linker_primer_field = "LinkerPrimerSequence"

    try:
        linker_primer_ix = header.index(linker_primer_field)
        no_primers = False
    except ValueError:
        no_primers = True

    try:
        added_demultiplex_ix = header.index(added_demultiplex_field)
    except ValueError:
        # Skip out at this point, header check will have error for missing
        # field
        return errors

    barcodes = []
    bc_lens = []
    bcs_added_field = []

    if has_barcodes and bc_found:
        for curr_data in mapping_data:
            barcodes.append(upper(curr_data[bc_ix]))
            bc_lens.append(len(curr_data[bc_ix]))

        # Get max length of barcodes to determine how many primer bases to
        # slice
        barcode_max_len = max(bc_lens)

        # Have to do second pass to append correct number of nucleotides to
        # check for duplicates between barcodes and primer sequences

        for curr_data in mapping_data:
            if no_primers:
                bcs_added_field.append(curr_data[bc_ix] +
                                       curr_data[added_demultiplex_ix])
            else:
                adjusted_len = barcode_max_len - len(curr_data[bc_ix])
                bcs_added_field.append(
                    curr_data[bc_ix] +
                    curr_data[linker_primer_ix][0:adjusted_len] +
                    curr_data[added_demultiplex_ix])
    else:
        for curr_data in mapping_data:
            bcs_added_field.append(curr_data[added_demultiplex_ix])

    dups = duplicates_indices(bcs_added_field)

    for curr_dup in dups:
        if has_barcodes and bc_found:
            for curr_loc in dups[curr_dup]:
                errors.append(
                    'Duplicate barcode and added demultiplex field ' +
                    '%s found.\t%d,%d' %
                    (curr_dup, curr_loc + correction, bc_ix))
        else:
            for curr_loc in dups[curr_dup]:
                errors.append(
                    'Duplicate added demultiplex field ' + '%s found.\t%d,%d' %
                    (curr_dup, curr_loc + correction, added_demultiplex_ix))

    return errors
Example #59
0
def test(verbose=True):
    def ext(x):
        if x == 'tiff': x = 'tif'
        return x

    #grab all drawings from the tests module and write out.
    #make a page of links in HTML to assist viewing.
    import os
    from reportlab.graphics import testshapes
    getAllTestDrawings = testshapes.getAllTestDrawings
    drawings = []
    if not os.path.isdir('pmout'):
        os.mkdir('pmout')
    htmlTop = """<html><head><title>renderPM output results</title></head>
    <body>
    <h1>renderPM results of output</h1>
    """
    htmlBottom = """</body>
    </html>
    """
    html = [htmlTop]
    names = {}
    argv = sys.argv[1:]
    E = [a for a in argv if a.startswith('--ext=')]
    if not E:
        E = ['gif', 'tiff', 'png', 'jpg', 'pct', 'py', 'svg']
    else:
        for a in E:
            argv.remove(a)
        E = (','.join([a[6:] for a in E])).split(',')

    #print in a loop, with their doc strings
    for (drawing, docstring,
         name) in getAllTestDrawings(doTTF=hasattr(_renderPM, 'ft_get_face')):
        i = names[name] = names.setdefault(name, 0) + 1
        if i > 1: name += '.%02d' % (i - 1)
        if argv and name not in argv: continue
        fnRoot = name
        w = int(drawing.width)
        h = int(drawing.height)
        html.append('<hr><h2>Drawing %s</h2>\n<pre>%s</pre>' %
                    (name, docstring))

        for k in E:
            if k in ['gif', 'png', 'jpg', 'pct']:
                html.append('<p>%s format</p>\n' % string.upper(k))
            try:
                filename = '%s.%s' % (fnRoot, ext(k))
                fullpath = os.path.join('pmout', filename)
                if os.path.isfile(fullpath):
                    os.remove(fullpath)
                if k == 'pct':
                    from reportlab.lib.colors import white
                    drawToFile(drawing,
                               fullpath,
                               fmt=k,
                               configPIL={'transparent': white})
                elif k in ['py', 'svg']:
                    drawing.save(formats=['py', 'svg'],
                                 outDir='pmout',
                                 fnRoot=fnRoot)
                else:
                    drawToFile(drawing, fullpath, fmt=k)
                if k in ['gif', 'png', 'jpg']:
                    html.append('<img src="%s" border="1"><br>\n' % filename)
                elif k == 'py':
                    html.append('<a href="%s">python source</a><br>\n' %
                                filename)
                elif k == 'svg':
                    html.append('<a href="%s">SVG</a><br>\n' % filename)
                if verbose: print 'wrote', fullpath
            except AttributeError:
                print 'Problem drawing %s file' % k
                raise
        if os.environ.get('RL_NOEPSPREVIEW', '0') == '1':
            drawing.__dict__['preview'] = 0
        drawing.save(formats=['eps', 'pdf'], outDir='pmout', fnRoot=fnRoot)
    html.append(htmlBottom)
    htmlFileName = os.path.join('pmout', 'index.html')
    open(htmlFileName, 'w').writelines(html)
    if sys.platform == 'mac':
        from reportlab.lib.utils import markfilename
        markfilename(htmlFileName, ext='HTML')
    if verbose: print 'wrote %s' % htmlFileName
def build_db_operator_enum_member(struct_name):
    enum_name = string.upper(struct_name)[4:]#把第一个Sql_去掉
    return "        DB_OPRATOR_%(enum_name)s,"%(locals())