Example #1
0
def nmrinfo_readdatafile(datafile):
  # WE KEEP A LIST IN ORDER TO RETAIN THE ORDER IN THE FILE
  filelist = []
  # READ AND PARSE THE DICTIONARY TYPE FILE
  try: dctfile = open(datafile,"r")
  except: nTerror("Datasetfile %s could not be read" %datafile)
  dct = {}
  for line in dctfile.readlines():
    line=string.strip(line)
    # CHECK IF CURRENT LINE IS A COMMENT (STARTS WITH '#')
    l=len(line);
    if (l):
      if (line[0] not in ['#','/']):
        # IF LINE IS NOT A COMMENT:
        #   CHECK IF DICTIONARY KEY AND ENTRY ARE REALLY PRESENT
        i=string.find(line,"=")
        if (i==-1):  nTerror("No equals sign found in %s at %s" % (datafile,line))
        if (i==0):   nTerror("No data found before equals sign in %s at %s" % (datafile,line))
        if (i==l-1): nTerror("No data found behind equals sign in %s at %s" % (datafile,line))
        # ADD TO DICTIONARY
        dct[string.strip(line[:i])] = string.strip(line[i+1:])
      # END OF DICT
      if line[:2]=='//':
          filelist.append(dct)
          dct = {}
  return filelist
Example #2
0
def parseline(line):
    fields = []
    i, n = 0, len(line)
    while i < n:
        field, i = parsefield(line, i, n)
        fields.append(field)
        i = i+1 # Skip semicolon
    if len(fields) < 2:
        return None, None
    key, view, rest = fields[0], fields[1], fields[2:]
    fields = {'view': view}
    for field in rest:
        i = string.find(field, '=')
        if i < 0:
            fkey = field
            fvalue = ""
        else:
            fkey = string.strip(field[:i])
            fvalue = string.strip(field[i+1:])
        if fields.has_key(fkey):
            # Ignore it
            pass
        else:
            fields[fkey] = fvalue
    return key, fields
Example #3
0
 def makeIdentifier(self, string):
   string = re.sub( r"\s+", " ", string.strip())
   string = unicodedata.normalize('NFKD', safeEncode(string))
   string = re.sub(r"['\"!?@#$&%^*\(\)_+\.,;:/]","", string)
   string = re.sub(r"[_ ]+","_", string)
   string = string.strip('_')
   return string.strip().lower()
Example #4
0
def main():
    global stack
    name = sys.argv[1]
    arch = getArchive(name)
    stack.append((name, arch))
    show(name, arch)

    while 1:
        toks = string.split(raw_input('? '), ' ', 1)
        if not toks:
            usage()
            continue
        if len(toks) == 1:
            cmd = toks[0]
            arg = ''
        else:
            cmd, arg = toks
        cmd = string.upper(cmd)
        if cmd == 'U':
            if len(stack) > 1:
                arch = stack[-1][1]
                arch.lib.close()
                del stack[-1]
            nm, arch = stack[-1]
            show(nm, arch)
        elif cmd == 'O':
            if not arg:
                arg = raw_input('open name? ')
            arg = string.strip(arg)
            arch = getArchive(arg)
            if arch is None:
                print arg, "not found"
                continue
            stack.append((arg, arch))
            show(arg, arch)
        elif cmd == 'X':
            if not arg:
                arg = raw_input('extract name? ')
            arg = string.strip(arg)
            data = getData(arg, arch)
            if data is None:
                print "Not found"
                continue
            fnm = raw_input('to filename? ')
            if not fnm:
                print `data`
            else:
                open(fnm, 'wb').write(data)
        elif cmd == 'Q':
            break
        else:
            usage()
    for (nm, arch) in stack:
        arch.lib.close()
    stack = []
    for fnm in cleanup:
        try:
            os.remove(fnm)
        except Exception, e:
            print "couldn't delete", fnm, e.args
Example #5
0
def readmailcapfile(fp):
    caps = {}
    while 1:
        line = fp.readline()
        if not line: break
        # Ignore comments and blank lines
        if line[0] == '#' or string.strip(line) == '':
            continue
        nextline = line
        # Join continuation lines
        while nextline[-2:] == '\\\n':
            nextline = fp.readline()
            if not nextline: nextline = '\n'
            line = line[:-2] + nextline
        # Parse the line
        key, fields = parseline(line)
        if not (key and fields):
            continue
        # Normalize the key
        types = string.splitfields(key, '/')
        for j in range(len(types)):
            types[j] = string.strip(types[j])
        key = string.lower(string.joinfields(types, '/'))
        # Update the database
        if caps.has_key(key):
            caps[key].append(fields)
        else:
            caps[key] = [fields]
    return caps
Example #6
0
 def CleanFileData(self, data):
     """ clean the read file data and return it """
     fresh = []
     for i in data:
         if string.strip(string.replace(i, '\012', '')) != '':
             fresh.append(string.strip(string.replace(i, '\012', '')))
     return fresh
Example #7
0
def dropbox_init(dropbox_app_key,dropbox_app_secret):
    from dropbox import client, rest, session
    APP_KEY = dropbox_app_key
    APP_SECRET = dropbox_app_secret
    ACCESS_TYPE = 'app_folder'
    sess = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE)
    oauth_token = ''
    oauth_token_secret = ''
    f = open("dropbox_token.txt",'r')
    if f:
      oauth_token = string.strip(f.readline())
      oauth_token_secret = string.strip(f.readline())
      f.close()
    if oauth_token == '' or oauth_token_secret == '':
      request_token = sess.obtain_request_token()
      url = sess.build_authorize_url(request_token)
      print "url:", url
      print "Please visit this website and press the 'Allow' button, then hit 'Enter' here."
      raw_input()
      access_token = sess.obtain_access_token(request_token)
      f = open("dropbox_token.txt","wb")
      f.write(access_token.key + '\n')
      f.write(access_token.secret)
      f.close()
    else:
      sess.set_token(oauth_token, oauth_token_secret)
    client = client.DropboxClient(sess)
    #print "linked account:", client.account_info()
    return client
 def get_plot(self):
     self.plot = gutils.trim(self.page,"<td valign=\"top\" align=\"left\">","</td>")
     self.plot = string.strip(self.plot.decode('latin-1'))
     self.plot = string.replace(self.plot,"<br>", " ")
     self.plot = string.replace(self.plot,"<p>", " ")
     self.plot = string.replace(self.plot,"'","_")
     self.plot = string.strip(gutils.strip_tags(self.plot))
Example #9
0
def formatElement(el,path):
    if (el.find("{http://www.w3.org/2001/XMLSchema}annotation") is not None):
        splitPath=string.split(path,'/')
        #set default component to "scenario", i.e. write to the "Scenario" worksheet below
        component="scenario"
        printPath=string.replace(path,"/"+component,"")
        #update component if a know child element of scenario, i.e. write to another worksheet below
        if (len(splitPath)>2):
            if (splitPath[2] in worksheets):
                component=splitPath[2]
                printPath=string.replace(printPath,"/"+component,"")
        sheet= worksheets[component][0]
        row=worksheets[component][1]
        sheet.write(row,0,string.lstrip(printPath,"/"))
        annotation=el.find("{http://www.w3.org/2001/XMLSchema}annotation")
        docuElem=annotation.find("{http://www.w3.org/2001/XMLSchema}documentation")
        if (docuElem is not None):
            docu=docuElem.text
        else:
            docu="TODO"
        content=string.strip(docu)
        sheet.write(row,1,normalizeNewlines(content),docu_xf)
        appInfoElem=el.find("{http://www.w3.org/2001/XMLSchema}annotation").find("{http://www.w3.org/2001/XMLSchema}appinfo")
        if (appInfoElem is not None):
            appInfo=string.strip(appInfoElem.text)
        else:
            appInfo="name:TODO"
        appInfoList=string.split(appInfo,";")[0:-1]
        for keyValue in appInfoList:
            splitPair=string.split(keyValue,":")
            colIndex=appinfoOrder.index(string.strip(str(splitPair[0])))+2
            sheet.write(row,colIndex,splitPair[1],docu_xf)
        #update next row to be written in that sheet
        worksheets[component][1]=worksheets[component][1]+1
Example #10
0
	def lineinfo(self, identifier):
		failed = (None, None, None)
		# Input is identifier, may be in single quotes
		idstring = string.split(identifier, "'")
		if len(idstring) == 1:
			# not in single quotes
			id = string.strip(idstring[0])
		elif len(idstring) == 3:
			# quoted
			id = string.strip(idstring[1])
		else:
			return failed
		if id == '': return failed
		parts = string.split(id, '.')
		# Protection for derived debuggers
		if parts[0] == 'self':
			del parts[0]
			if len(parts) == 0:
				return failed
		# Best first guess at file to look at
		fname = self.defaultFile()
		if len(parts) == 1:
			item = parts[0]
		else:
			# More than one part.
			# First is module, second is method/class
			f = self.lookupmodule(parts[0])
			if f:
				fname = f
			item = parts[1]
		answer = find_function(item, fname)
		return answer or failed
Example #11
0
def getGcor(tpDic1, tpDic2):
	tmpf1 = open("/home/dk/tmp/1.txt","w")
	for ap1 in sorted(tpDic1.keys()):
		line = ""
		for ap2 in sorted(tpDic1.keys()):
			line = line + str(tpDic1[ap1][ap2]) + " " 
		line = string.strip(line)
		print >> tmpf1, line
	tmpf1.close()
	tmpf2 = open("/home/dk/tmp/2.txt","w")
	for ap1 in sorted(tpDic2.keys()):
		line = ""
		for ap2 in sorted(tpDic2.keys()):
			line = line + str(tpDic2[ap1][ap2]) + " " 
		line = string.strip(line)
		print >> tmpf2, line
	tmpf2.close()
	rscript = """
library(sna)
g1 <- as.matrix(read.table(file="/home/dk/tmp/1.txt", header=F))
g2 <- as.matrix(read.table(file="/home/dk/tmp/2.txt", header=F))
gcor(g1[2:93,2:93],g2[2:93,2:93])
"""
	try:
		return float(r(rscript)[0])
	except:
		return 0.0
Example #12
0
 def slotSave(self):
     """Called when user clicks on Save button. Saves the copy of the
     account object in account object's place
     """
     if self._accObj.id: # else: nothing to do
         o= self._accObj
         try:
             o.num= string.strip(str(self.wAccNum.text()))
             o.name= string.strip(str(self.wAccName.text()))
             if self.useVat == 'Y':
                 o.defVat= str(self.wVat.currentItem())
             else: o.defVat= '0'
             if self.useBudget=='Y':
                 o.budget= Model.Global.moneyToInt(
                     str(self.wBudget.text()))
             else: o.budget= 0
         except Model.Exceptions.VarLimit, s:
             try: # do we have an appropriate error message?
                 m= str(self.tr(e[s[1]]))
             except KeyError: #No, fake one
                 m= str(self.tr("Error text for '%s' is not defined"))%s[1]
                 msg= m + str(self.tr(
                     '\nPlease fix,\nThis account was not saved.'))
             QMessageBox.information(self, Model.Global.getAppName(), msg)
             return # Try again
         res= self._accountL.saveIfChanged(o)
         if res != 0: # changed and saved, tell the world
             self.emit(PYSIGNAL('account'), (self._accObj, 'E'))
         self.slotNew() # prepare for a new edit
Example #13
0
 def slotSave(self):
     """Save the new account. Called when Save-button clicked
     """
     #First, is this really a new account?
     if self._accountL.getByNum(string.strip(str(self.wAccNum.text()))):
         QMessageBox.information(self, Model.Global.getAppName(),
                 str(self.tr("This account already exists.")) + '\n' +
                 str(self.tr("Please chose a new account number.")))
     else: #no, make a new account object and fill in from fields
         o= Model.Account.Account()
         try:
             o.num= string.strip(str(self.wAccNum.text()))
             o.name= string.strip(str(self.wAccName.text()))
             if self.useVat == 'Y':
                 o.defVat= str(self.wVat.currentItem())
             else: o.defVat= '0'
             if self.useBudget=='Y':
                 o.budget= Model.Global.moneyToInt(
                     str(self.wBudget.text()))
             else: o.budget= 0
         except Model.Exceptions.VarLimit, s: #An unacceptable value
             try: # approporiate error message?
                 m= str(self.tr(e[s[1]]))
             except KeyError: #No, make a substitute
                 m= str(self.tr("Error text for '%s' is not defined"))%s[1]
                 msg= m + str(self.tr(
                     '\nPlease fix,\nThis account was not saved.'))
             QMessageBox.information(self, Model.Global.getAppName(), msg)
             return # and let the user correct
         self._accountL.saveEntry(o)
         self.emit(PYSIGNAL('account'), (o, 'N')) # Tell the world
         self.slotNew()
Example #14
0
 def do_meta(self, attrs):
     # attempt to pull in a description:
     name = string.strip(string.lower(attrs.get("name", "")))
     if name in ("description", "dc.description"):
         desc = string.strip(attrs.get("content", ""))
         if desc:
             self.__root.set_description(desc)
def GetAliveMachinesWithVersions(machines, config):
  cmd = ". %s; cd %s/enterprise/legacy/util; ./get_config_version.py %s" % (
    config.ENTERPRISE_BASHRC, config.MAIN_GOOGLE3_DIR, config.ENTERPRISE_HOME)

  (err, out) = commands.getstatusoutput(
    ". %s;cd %s/enterprise/legacy/util; ./ent_exec.py -q -a60 -b "\
    " -m \"%s\" -x \"%s\"" % (
      config.ENTERPRISE_BASHRC, config.MAIN_GOOGLE3_DIR,
      string.join(machines, " "), cmd))

  out = string.split(out, "\n")
  out = map(lambda p: string.split(p, "-"), out)
  out = filter(lambda p: len(p) == 2, out)

  ret = []
  for (version, machine) in out:
    try:
      ret.append((int(version), string.strip(machine)))
    except ValueError:
      # An Invalid answer is punished
      ret.append((0, string.strip(machine)))

  #  if we have less than 1/2 of machines, something is wrong... return Nonr
  if len(machines)/2 >= len(ret):
    return None

  #  else we return what we got
  ret.sort()
  return ret
Example #16
0
def test():
    """Test this module.

    The test consists of retrieving and displaying the Python
    home page, along with the error code and error string returned
    by the www.python.org server.

    """
    import sys
    import getopt
    opts, args = getopt.getopt(sys.argv[1:], 'd')
    dl = 0
    for o, a in opts:
        if o == '-d': dl = dl + 1
    host = 'www.python.org'
    selector = '/'
    if args[0:]: host = args[0]
    if args[1:]: selector = args[1]
    h = HTTP()
    h.set_debuglevel(dl)
    h.connect(host)
    h.putrequest('GET', selector)
    h.endheaders()
    errcode, errmsg, headers = h.getreply()
    print 'errcode =', errcode
    print 'errmsg  =', errmsg
    print
    if headers:
        for header in headers.headers: print string.strip(header)
    print
    print h.getfile().read()
Example #17
0
    def checkAuthorization(self, authorization, password, nonce, method="REGISTER"):
        hash = {}
        list = authorization.split(",")
        for elem in list:
            md = rx_kv.search(elem)
            if md:
                value = string.strip(md.group(2),'" ')
                key = string.strip(md.group(1))
                hash[key]=value
        # check nonce (response/request)
        if hash["nonce"] != nonce:
            self.server.main_logger.warning("SIP: Authentication: Incorrect nonce")
            return False

        a1="%s:%s:%s" % (hash["username"],hash["realm"], password)
        a2="%s:%s" % (method, hash["uri"])
        ha1 = hashlib.md5(a1).hexdigest()
        ha2 = hashlib.md5(a2).hexdigest()
        b = "%s:%s:%s" % (ha1,nonce,ha2)
        expected = hashlib.md5(b).hexdigest()
        if expected == hash["response"]:
            self.server.main_logger.debug("SIP: Authentication: succeeded")
            return True
        self.server.main_logger.warning("SIP: Authentication: expected= %s" % expected)
        self.server.main_logger.warning("SIP: Authentication: response= %s" % hash["response"])
        return False
Example #18
0
	def decodeValue(self, rawval):
		'''system: answer .dbt block number
		jjk  02/18/98'''
		try:
			return(string.atoi(string.strip(rawval)))
		except ValueError:
			return(string.atol(string.strip(rawval)))
Example #19
0
def get_server_capability(s):
    headers = s.get_response_headers()
    if headers is None:
        # No request done yet
        return {}
    cap_headers = headers.getallmatchingheaders("X-RHN-Server-Capability")
    if not cap_headers:
        return {}
    regexp = re.compile(
            r"^(?P<name>[^(]*)\((?P<version>[^)]*)\)\s*=\s*(?P<value>.*)$")
    vals = {}
    for h in cap_headers:
        arr = string.split(h, ':', 1)
        assert len(arr) == 2
        val = string.strip(arr[1])
        if not val:
            continue

        mo = regexp.match(val)
        if not mo:
            # XXX Just ignoring it, for now
            continue
        vdict = mo.groupdict()
        for k, v in vdict.items():
            vdict[k] = string.strip(v)

        vals[vdict['name']] = vdict
    return vals
Example #20
0
    def handle_msg(self, msg):
        if msg['body'] is None:
            return
        uid = msg['user_id']
        if not uid in self.cfusers:
            try:
                u = self.connection.connection.user(uid)
                self.cfusers[uid] = u['user']['name']
            except:
                pass

        try:
            from_nick = self.cfusers[uid]
        except:
            print 'Could not get nick'
            from_nick = 'Oops'

        info('Received from %s: %s' % (from_nick, msg['body']))
        m = re.match('%s[:,](.*)' % self.nickname, msg['body'], re.IGNORECASE)
        if m:
            cmd = m.groups()[0]
            info('Command: %s' % cmd)
            self.do_command(string.strip(cmd), string.strip(from_nick))

        # Pass the message to all listeners
        self.do_listeners(msg)
Example #21
0
def xload(machines):
    """
    Take list of machine names, run xosview on them,
    -> lits of pids
    """
    pids = []
    for machine in machines:
        try:
            print "launching on ", machine,
            cmd = "ssh %s 'xosview &' &" % string.strip(machine)
            r = os.system( cmd)
            time.sleep(0.6)
            if r == 0:
                p="""ps -ef | grep "ssh %s xosview" | grep -v grep >> pids.dat"""\
                         % string.strip(machine)
                os.system(p)

        except Exception:
            print "xload: ", lastError()

    ## extract pids from temp.dat
    try:
        f = open("pids.dat")
        lines = f.readlines()
        for line in lines:
            pid = string.split(line)[1]
            pids += [pid]
        f.close()
        os.remove("pids.dat")
    except Exception, why:
        print "xload Error: ", lastError()
Example #22
0
def getLocationIn(dateTimeList,sentence):
    #parse each segment of the sentence to get locations and insert the
    #locations into "dateTimeList" based on their order in the sentence
    dateTimeLocationList=[]
    if len(dateTimeList)>0:
        for i in xrange(len(dateTimeList)):
            if i==0:
                eIndex=dateTimeList[i][0][0]
                segment=string.strip(sentence[:eIndex])
                if len(segment)>0:
                    dateTimeLocationList.extend\
                                    (ParseEmail.extractLocation(segment))
                dateTimeLocationList.append(dateTimeList[i])
            else:
                sIndex,eIndex=dateTimeList[i-1][0][1],dateTimeList[i][0][0]
                segment=string.strip(sentence[sIndex:eIndex])
                if len(segment)>0:
                    dateTimeLocationList.extend\
                                    (ParseEmail.extractLocation(segment))
                dateTimeLocationList.append(dateTimeList[i])
            if i==len(dateTimeList)-1:
                sIndex=dateTimeList[i][0][1]
                segment=string.strip(sentence[sIndex:])
                if len(segment)>0:
                    dateTimeLocationList.extend\
                                    (ParseEmail.extractLocation(segment))
    else:
        sentence=string.strip(sentence)
        if len(sentence)>0:
            dateTimeLocationList.extend(ParseEmail.extractLocation(sentence))
    return dateTimeLocationList
Example #23
0
def process(fn, s, e):
    """
    Actually process the file! 
    """
    lines = open(fn, 'r').readlines()
    for line in lines:
        if line.find(",") == -1:
            continue
        if (line[0] == "*"):
            stationID = string.upper(line[3:10])
            dc = re.split(",", line)[1:-1]
            dcols = []
            for c in dc:
                dcols.append("c"+ string.strip(c))
                dcols.append("c"+ string.strip(c) +"_f")
            continue
        tokens = re.split(",", line)
        if len(tokens) < 4:
            break
        ts = mx.DateTime.DateTime(int(tokens[2]), int(tokens[0]), int(tokens[1]))
        # If this row is an hourly ob
        if int(dc[0]) == 100:
            ts = make_time( tokens[2], tokens[0], tokens[1], tokens[3])
        if ts < e:
            tstring = ts.strftime("%Y-%m-%d %H:%M")
            for i in range(len(dcols)):
                if not obs.has_key(stationID) or not obs[stationID].has_key(tstring):
                    continue
                val = tokens[4+i].strip()
                if val == "" or val.find("*") > -1:
                    val = None
                try:
                    obs[stationID][tstring][ dcols[i] ] = val
                except:
                    print stationID, dcols, i, val
Example #24
0
  def characters(self,chars):
    if "copy" in self.zone and ((len(self.zone) > 1 and self.zone[1] in self.fases) or (self.fases == ["all"])):
      if len(string.strip(chars)):
        resul = checkvars(self.properties,self.attribs,string.strip(chars),self.debuglevel)
	from_file = resul[1]
        if not resul[0]:
          if os.access(from_file, os.R_OK):
            self.copyaux[0].append(from_file)
            to_file = self.attribs["layout"]["to"] + self.attribs["copy"]["to"] + from_file.split("/")[-1]
            if os.path.isfile(from_file) or os.path.isdir(from_file):
              if to_file in self.archives and self.debuglevel >= 1: print "W: El fichero", to_file, "existe, se sobreescribirá"
              self.archives.append(to_file)
            else:
              self.error.append("E: Fichero origen no existe: %s" % from_file)
          else:
            self.error.append("E: El fichero %s no existe o no es accesible para lectura"%(from_file))
        else:
          if not from_file in self.error:
	    self.error.append(from_file)

    elif "textfile" in self.zone and len(chars.strip()):
      resul = checkvars(self.properties,self.attribs,chars.strip(),self.debuglevel)
      if not resul[0]:
        self.textfile.append(resul[1])

    elif "include" in self.zone and len(chars.strip()):
      resul = checkvars(self.properties,self.attribs,chars.strip(),self.debuglevel)
      if not resul[0]:
        self.includes.append(resul[1])

    elif "description" in self.zone and len(chars.strip()):
      resul = checkvars(self.properties,self.attribs,chars.strip(),self.debuglevel)
      if not resul[0]:
        self.properties["info.description"] = resul[1]
Example #25
0
	def precontext(self):
		# Scan back for the identifier currently being typed.
		line, pos = self.cursor()
		command = self.getline()
		preceding = command[:pos]
		startchars = string.letters + "_"
		identchars = string.letters + string.digits + "_"
		while pos > 0 and preceding[pos-1] in identchars:
			pos = pos - 1
		preceding, ident = preceding[:pos], preceding[pos:]
		start = "%d.%d" % (line, pos)

		preceding = string.strip(preceding)
		context = ""
		if not ident or ident[0] in startchars:
			# Look for context before the start of the identifier.
			while preceding[-1:] == ".":
				preceding = string.strip(preceding[:-1])
				if preceding[-1] in identchars:
					pos = len(preceding)-1
					while pos > 0 and preceding[pos-1] in identchars:
						pos = pos - 1
					if preceding[pos] in startchars:
						context = preceding[pos:] + "." + context
						preceding = string.strip(preceding[:pos])
					else: break
				else: break

		line, pos = self.cursor()
		endpos = pos
		while endpos < len(command) and command[endpos] in identchars:
			endpos = endpos + 1
		end = "%d.%d" % (line, endpos)

		return command, context, ident, start, end
Example #26
0
 def manageStopWordsProperties(self, ids='', stop_word='', old_stop_word='', REQUEST=None):
     """ manage stop words for EEAGlossaryEngine """
     if self.utAddObjectAction(REQUEST):
         if string.strip(stop_word)=='':
             return REQUEST.RESPONSE.redirect('manage_properties_html?pagetab=6')
         else:
             if self.check_stop_words_exists(stop_word):
                 self.set_stop_words_list(stop_word)
                 self._p_changed = 1
             else:
                 return REQUEST.RESPONSE.redirect('manage_properties_html?pagetab=6')
     elif self.utUpdateObjectAction(REQUEST):
         if string.strip(stop_word)=='':
             return REQUEST.RESPONSE.redirect('manage_properties_html?pagetab=6')
         else:
             self.del_stop_words_from_list(old_stop_word)
             self.set_stop_words_list(stop_word)
             self._p_changed = 1
     elif self.utDeleteObjectAction(REQUEST):
         if not ids or len(ids) == 0:
             return REQUEST.RESPONSE.redirect('manage_properties_html?pagetab=6')
         for word in self.utConvertToList(ids):
             self.del_stop_words_from_list(word)
             self._p_changed = 1
     if REQUEST is not None:
         return REQUEST.RESPONSE.redirect('manage_properties_html?pagetab=6&save=ok')
Example #27
0
 def manageSubjectsProperties(self, ids=[], old_code='', code='', name='', REQUEST=None):
     """ manage subjects for EEAGlossaryEngine """
     if self.utAddObjectAction(REQUEST):
         if string.strip(code) == '' or string.strip(name) == '':
             return REQUEST.RESPONSE.redirect('manage_properties_html?pagetab=3')
         else:
             if self.check_subjects_exists(code):
                 self.set_subjects_list(code, name)
                 self._p_changed = 1
             else:
                 return REQUEST.RESPONSE.redirect('manage_properties_html?pagetab=3')
     elif self.utUpdateObjectAction(REQUEST):
         if string.strip(code) == '' or string.strip(name) == '':
             return REQUEST.RESPONSE.redirect('manage_properties_html?pagetab=3')
         else:
             self.del_subject_from_list(old_code)
             self.set_subjects_list(code, name)
             self._p_changed = 1
     elif self.utDeleteObjectAction(REQUEST):
         if not ids or len(ids) == 0:
             return REQUEST.RESPONSE.redirect('manage_properties_html?pagetab=3')
         for subj in self.utConvertToList(ids):
             self.del_subject_from_list(subj)
         self._p_changed = 1
     if REQUEST is not None:
         return REQUEST.RESPONSE.redirect('manage_properties_html?pagetab=3&save=ok')
Example #28
0
def runner(bndyargs, filename):
    # excstring = "(cd " + pathprefix + "; " + "python testbdy.py " +\
    #             bndyargs + " )"
    excstring = "python testbdy.py " + bndyargs
    digitsplus = "[-"+string.digits+"]"
    pipe = os.popen(excstring, 'r')
    file = open(pathprefix+filename,'r')
    mismatch = None
    lct = 0
    for linestring in file.readlines():
        lct+=1
        if re.match(digitsplus,linestring[0]):
            # Get next string from the pipe that looks like data.
            pipestring = pipe.readline()
            while not re.match(digitsplus, pipestring[0]):
                pipestring = pipe.readline()
            #
            lineset = string.split(linestring, ' ')
            pipeset = string.split(pipestring, ' ')
            # Compare data entries, howevermany there are.
            for (linevalue, pipevalue) in map(None, lineset, pipeset):
                if abs(float(linevalue)-float(pipevalue)) > tolerance:
                    mismatch = 1
                    print "\nMismatch, line %d in file %s." % (lct, filename)
                    print "   File: " + string.strip(linevalue)
                    print "Program: " + string.strip(pipevalue)
        else:
            # Discard pipe data.
            pipe.readline()

    if mismatch:
        print "Failed on file %s." % filename
    else:
        print "File %s OK." % filename
Example #29
0
def ssh_keygen(logger):
    """using pexpect to generate RSA"""
    logger.info("generate ssh RSA \"%s\"" % SSH_KEYGEN)
    child = pexpect.spawn(SSH_KEYGEN)
    while True:
        index = child.expect(['Enter file in which to save the key ',
                              'Enter passphrase ',
                              'Enter same passphrase again: ',
                              pexpect.EOF,
                              pexpect.TIMEOUT])
        if index == 0:
            child.sendline("\r")
        elif index == 1:
            child.sendline("\r")
        elif index == 2:
            child.sendline("\r")
        elif index == 3:
            logger.debug(string.strip(child.before))
            child.close()
            return 0
        elif index == 4:
            logger.error("ssh_keygen timeout")
            logger.debug(string.strip(child.before))
            child.close()
            return 1

    return 0
Example #30
0
def load_variables(export_dir,variable_uses):
  variables = []
  try:
    file = open(export_dir + "variables.txt","r")
    var_list = file.readlines()
    file.close()
    for v in var_list:
      vv = string.strip(v)
      if vv:
        variables.append(vv)
  except:
    print "variables.txt not found. Creating new variables.txt file"

  try:
    file = open(export_dir + "variable_uses.txt","r")
    var_list = file.readlines()
    file.close()
    for v in  var_list:
      vv = string.strip(v)
      if vv:
        variable_uses.append(int(vv))
  except:
    print "variable_uses.txt not found. Creating new variable_uses.txt file"

  return variables
Example #31
0
for i in sys.argv[1:]:
    input_files.append(i)

if (len(input_files) == 0):
    bashCommand = "find . -name *.tex "
    import subprocess
    process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
    stdout = process.communicate()
    something = ""
    for i in stdout:
        if i != None:
            something += i

    for i in something.split("\n"):
        i = string.strip(i)
        if len(i) == 0:
            continue
        input_files.append(i)

for input_file_name in input_files:
    with open(input_file_name) as input_file:
        prev_word = ""
        prev_line = ""
        rep_words = []
        should_print_prev_line = False
        printed_prev_line = False
        printed_cur_line = False
        total_errors = 0

        for i, line in enumerate(input_file):
import string
import Orange
from AZutilities import getCinfonyDesc
from AZutilities import dataUtilities

data = dataUtilities.DataTable("Assay1350AZOdesc.txt")

# Read nonIID cmpds
fid = open("nonIIDlist.txt")
IDlist = []
for line in fid:
    ID = string.strip(line)
    IDlist.append(ID)
fid.close()
print "Number of non IID cmpds; ", len(IDlist)

# Select nonIID cmpds from full data set
nonIID = []
IID = []
for ex in data:
    if ex["ID"].value in IDlist:
        nonIID.append(ex)
    else:
       IID.append(ex)
print "# IID ", len(IID)
print "# nonIID ", len(nonIID)

nonIIDdata = dataUtilities.DataTable(nonIID)
IIDdata = dataUtilities.DataTable(IID)

# Partition the nonIID cmpds into a test and an external test set
Example #33
0
def ParseValue(string):
    string = string.strip()
    if string.startswith('[') and string.endswith(']'):
        return string.lstrip('[').rstrip(']').split()
    else:
        return string
Example #34
0
 def getAutoCompleteList(self, rawCmd='', *args, **kwds):
     """Return list of auto-completion options for a command.
     
     The list of options will be based on the locals namespace."""
     try:
         actKey = rawCmd[-1]     #Was it activated by a '/', '.' or ' ' ?
         cmd = re.sub('#.*', '', rawCmd)  # remove comments
         cmd = string.strip(cmd)
         if not cmd: return None      
         
         # get lhs description
         (lhsDesc, remaining) = self.cmd.GetNextTerm( cmd )    
 
         lst = []
         
         #Get contents from the root
         if actKey == '/':
             if hasattr(self.cmd.root, 'GetContents'):
                 lst = []
                 for i in self.cmd.root.GetContents():
                     lst.append(i[0])
                     
         #Try different options
         elif actKey == '.':
             myDesc = string.split(cmd, ' ')[-1][:-1]                
             if myDesc[0] == '/': lhsObj = self.cmd.GetObject(self.cmd.root, myDesc[1:])
             else: lhsObj = self.cmd.GetObject(self.cmd.currentObj, myDesc)
             
             #Object with get contents attr
             if hasattr(lhsObj, 'GetContents'):
                 lst = []
                 for i in lhsObj.GetContents():
                     lst.append(i[0])
             
             #If it is a thermo provider, return available prop pkgs
             elif myDesc in self.cmd.thermoAdmin.GetAvThermoProviderNames():
                 thAd = self.cmd.thermoAdmin
                 lst = thAd.GetAvPropPkgNames(myDesc)
                 
             #If a folder with unit ops, then retun av u ops
             elif myDesc in unitop.__all__:
                 uop = guicmd.CommandInterface.__dict__.get(myDesc, None)
                 if hasattr(uop, 'VALID_UNIT_OPERATIONS'):
                     lst = uop.VALID_UNIT_OPERATIONS
                 
         #Is it a command?
         elif guicmd.CommandInterface.commands.has_key(lhsDesc):
             cmdActOnObj = ('cd', 'view', 'delete', 'dir', 'valueOf')
             lst = []
             if lhsDesc == 'units':
                 if actKey == ' ' and remaining == '':
                     lst = self.cmd.units.GetSetNames()
             elif lhsDesc in cmdActOnObj:
                 if actKey == ' ' and remaining == '':
                     lst = ['..', '/']
                     if hasattr(self.cmd.currentObj, 'GetContents'):
                         for i in self.cmd.currentObj.GetContents():
                             lst.append(i[0])
             elif lhsDesc == 'language':
                 if actKey == ' ' and remaining == '':
                     dct = guicmd.CommandInterface.MessageHandler.GetSupportedLanguages()
                     #dct['languages'] should have the main languages supported
                     lst = list(dct['languages'])
                     
         lst.sort()
         return lst
         
     except:
         return []
def btjs(debugger, command, result, internal_dict):
    '''Prints a stack trace of current thread with JavaScript frames decoded.  Takes optional frame count argument'''

    target = debugger.GetSelectedTarget()
    addressFormat = '#0{width}x'.format(width=target.GetAddressByteSize() * 2 +
                                        2)
    process = target.GetProcess()
    thread = process.GetSelectedThread()

    if target.FindFunctions("JSC::ExecState::describeFrame").GetSize(
    ) or target.FindFunctions("_ZN3JSC9ExecState13describeFrameEv").GetSize():
        annotateJSFrames = True
    else:
        annotateJSFrames = False

    if not annotateJSFrames:
        print "Warning: Can't find JSC::ExecState::describeFrame() in executable to annotate JavaScript frames"

    backtraceDepth = thread.GetNumFrames()

    if len(command) > 0:
        try:
            backtraceDepth = int(command)
        except ValueError:
            return

    threadFormat = '* thread #{num}: tid = {tid:#x}, {pcAddr:' + addressFormat + '}, queue = \'{queueName}, stop reason = {stopReason}'
    print threadFormat.format(num=thread.GetIndexID(),
                              tid=thread.GetThreadID(),
                              pcAddr=thread.GetFrameAtIndex(0).GetPC(),
                              queueName=thread.GetQueueName(),
                              stopReason=thread.GetStopDescription(30))

    for frame in thread:
        if backtraceDepth < 1:
            break

        backtraceDepth = backtraceDepth - 1

        function = frame.GetFunction()

        if annotateJSFrames and not frame or not frame.GetSymbol(
        ) or frame.GetSymbol().GetName() == "llint_entry":
            callFrame = frame.GetSP()
            JSFrameDescription = frame.EvaluateExpression(
                "((JSC::ExecState*)0x%x)->describeFrame()" %
                frame.GetFP()).GetSummary()
            if not JSFrameDescription:
                JSFrameDescription = frame.EvaluateExpression(
                    "((JSC::CallFrame*)0x%x)->describeFrame()" %
                    frame.GetFP()).GetSummary()
            if not JSFrameDescription:
                JSFrameDescription = frame.EvaluateExpression(
                    "(char*)_ZN3JSC9ExecState13describeFrameEv(0x%x)" %
                    frame.GetFP()).GetSummary()
            if JSFrameDescription:
                JSFrameDescription = string.strip(JSFrameDescription, '"')
                frameFormat = '    frame #{num}: {addr:' + addressFormat + '} {desc}'
                print frameFormat.format(num=frame.GetFrameID(),
                                         addr=frame.GetPC(),
                                         desc=JSFrameDescription)
                continue
        print '    %s' % frame
Example #36
0
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
import pyfits as pyf
from string import strip

import os

set2_dir = "../inputs/SFHs_set2/"

sample = {}
for line in open(set2_dir + "ssag.log") :
	name_bc, name_ic = line[:-1].split("=")

	sample[strip(name_bc) + ".sed"] = "thread" + strip(name_ic) + ".fits.gz"

mass_g, mass_i = [], []
for j in xrange(10) :
	#fig, axs = plt.subplots(ncols = 2, nrows = 5, figsize = (7, 9))
	#axs = np.ravel(axs)
	for i, key in enumerate(sample.keys()[j*10:(j + 1)*10]) :

		s = open(set2_dir + "seds/" + key, "r")
		lines = s.readlines()
		
		f = pyf.open(set2_dir + sample[key])
		wl, flux = np.loadtxt(set2_dir + "seds/" + key, usecols = (0, 2), unpack = True)

		mass_g.append(eval(lines[33].split("=")[1][:-4]) * 1e9)
		mass_i.append(f[0].header["tform"] - f[0].header["burstage"])
Example #37
0
    def do_request(self, json_obj):
        headers = {
            'Content-Type': 'application/json-rpc',
            'User-Agent': 'python/zabbix_api'
        }

        if self.httpuser:
            self.debug(logging.INFO, "HTTP Auth enabled")
            auth = 'Basic ' + string.strip(
                base64.encodestring(self.httpuser + ':' + self.httppasswd))
            headers['Authorization'] = auth
        self.r_query.append(str(json_obj))
        self.debug(logging.INFO, "Sending: " + str(json_obj))
        self.debug(logging.DEBUG, "Sending headers: " + str(headers))

        request = urllib2.Request(url=self.url,
                                  data=json_obj.encode('utf-8'),
                                  headers=headers)
        if self.proto == "https":
            https_handler = urllib2.HTTPSHandler(debuglevel=0)
            opener = urllib2.build_opener(https_handler)
        elif self.proto == "http":
            http_handler = urllib2.HTTPHandler(debuglevel=0)
            opener = urllib2.build_opener(http_handler)
        else:
            raise ZabbixAPIException("Unknow protocol %s" % self.proto)

        urllib2.install_opener(opener)
        try:
            response = opener.open(request, timeout=self.timeout)
        except Exception as e:
            raise ZabbixAPIException(
                "Site needs HTTP authentication. Error: " + str(e))
        self.debug(logging.INFO, "Response Code: " + str(response.code))

        # NOTE: Getting a 412 response code means the headers are not in the
        # list of allowed headers.
        if response.code != 200:
            raise ZabbixAPIException("HTTP ERROR %s: %s" %
                                     (response.status, response.reason))
        reads = response.read()
        if len(reads) == 0:
            raise ZabbixAPIException("Received zero answer")
        try:
            jobj = json.loads(reads.decode('utf-8'))
        except ValueError as msg:
            print("unable to decode. returned string: %s" % reads)
            sys.exit(-1)
        self.debug(logging.DEBUG, "Response Body: " + str(jobj))

        self.id += 1

        if 'error' in jobj:  # some exception
            msg = "Error %s: %s, %s while sending %s" % (
                jobj['error']['code'], jobj['error']['message'],
                jobj['error']['data'], str(json_obj))
            if re.search(".*already\sexists.*", jobj["error"]["data"],
                         re.I):  # already exists
                raise Already_Exists(msg, jobj['error']['code'])
            else:
                raise ZabbixAPIException(msg, jobj['error']['code'])
        return jobj
    def handle(self, *args, **options):
        BASE_TEMPLATE_PATH = os.path.join( settings.ABSOLUTE_PATH, 'templates/autocreate_templates/model_%s.template' )
        kwargs={}  # @UnusedVariable
        include_fields = []  # for creating *both* model admins and forms
        exclude_fields = []  # for creating *both* model admins and forms
        inline_admins = []  # only for creatin model admins
        fkey_models = [] # for api only
        model_form = {} # for api only

        #
        #  qc
        #
        if not options['model_name'] and not options['stub_type']:
            raise CommandError('You need to provide  a --model-name and --stub-type [ forms, admin, api ]')
        if options['stub_type'] not in [ 'forms', 'admin', 'api' ]:
            raise CommandError('--stub-type flag must be set to "forms" or "admin" or "apis" ')
        if options['stub_type'] == 'api' and not options['model_form_name']:
            raise CommandError('where --stub-type is "api" you must be set --model-form-name flag')          

        #
        #  main
        #
        content_type_model = ContentType.objects.filter( model__iexact=options['model_name'] )
        logger.debug("[ TEMPLATE ]: %s" % BASE_TEMPLATE_PATH)
        BASE_TEMPLATE_PATH = BASE_TEMPLATE_PATH % options['stub_type']
        if content_type_model.exists() and content_type_model.count() == 1:
            model_name = content_type_model[0].model
            app_label = content_type_model[0].app_label
                

            #
            #  the content_type.model_name is different than model.model_name
            #  so we need content_type.model_name to get model class
            #  and then we override it with model.object_name
            #
            model = get_model( app_label, model_name )
            module_name = model._meta.module_name
            ctype_model_name = model_name # save the content_type version for later use
            model_name = model._meta.object_name
            
            
            #
            #
            #  get the app name
            #  in weird nested situations 
            #  where apps are inside other apps
            #  we don't know full context path
            #  we we finagle it by getting app
            #  and looking the __name__ to understand 
            #  where it's imported from
            #  where is something like <module>.app_name.models
            #
            full_app_label = get_app(app_label).__name__
        
            #
            #
            #  __path__ used to get full path for writes...gives back full path
            #  to an apps's model directory
            #
            #
            relative_app_path = get_app(app_label).__path__[0]   
            1
            #
            #
            #  try to import model form or
            #  throw error
            #
            #
            if options['stub_type'] == 'api':
                try:
                    full_forms_import = string.replace( full_app_label, 'models', 'forms' )
                    model_form_class = import_mods( full_forms_import, options['model_form_name'])
                    model_form.update( { 'model_form' : model_form_class.__name__, 'model_form_app_label': full_forms_import } )
                    logger.debug( "[ FORM ]: success retrieved model form = %s" % model_form )
                except Exception as e:
                    logger.exception( e )
                    raise CommandError( "it seems the model-form class you passed in was not found. Did you spell it right? Does it exist in <app_name>/<forms_dir>/<form>.py structure?")
                    sys.exit( 1 )
        

            opts = getattr( model, '_meta', None )
            if not opts:
                raise CommandError('Cannot find _meta attribute on model')

            for f in opts.get_all_field_names():
                field, model, direct, m2m = opts.get_field_by_name(f)  # @UnusedVariable
                name = field.name
                logger.info( "[ FIELD ]: %s " % name )
                if not direct: # relation or many field from another model
                    name = field.get_accessor_name()
                    field = field.field
                    if field.rel.multiple: # m2m or fk to this model
                        #logger.debug( "m2m or fkey TO this model")
                        #exclude_fields.append( name )
                        pass
                    else: # o2o
                        #logger.debug( "o2o TO this model")
                        #exclude_fields.append( name )
                        pass
                else: # relation, many or field from this model
                    if field.rel: # relation or many field
                        if hasattr(field.rel, 'through'): # m2m
                            #logger.debug( "m2m FROM this model")
                            exclude_fields.append( [ name,  string.strip( field._description().split(':')[1] ) ] )
                            if options['stub_type'] == 'api':
                                #
                                #  skip over fkey of auth user for now
                                #
                                if name != 'user':
                                    #
                                    # 
                                    #  FIGURE OUT HOW WE GET TO FKEY FIELD MODEL NAME
                                    #
                                    #
                                    template_object = get_model_by_content_type( field.model.__name__ )
                                    # update the object name to be api import
                                    model_label = template_object['model_name_app_label']
                                    api_label = string.replace(model_label, 'models', 'api' )
                                    template_object.update( { 'model_name_app_label' : api_label } )
                                    template_object.update( { 'field_name' : name } )
                                    fkey_models.append( template_object )
                        else: #o2o or fkey
                            #logger.debug( "o2o  or fkey FROM this model")
                            if not hasattr( field, 'parent_model'):  # skips fkeys that are <model>_set()
                                exclude_fields.append( [ name,  string.strip( field._description().split(':')[1] ) ] )
                                if options['stub_type'] == 'api':
                                    #
                                    #  skip over fkey of auth user for now
                                    #
                                    if name != 'user':
                                        #
                                        # 
                                        #  FIGURE OUT HOW WE GET TO FKEY FIELD MODEL NAME
                                        #
                                        #
                                        template_object = get_model_by_content_type( field.model.__name__ )
                                        # update the object name to be api import
                                        model_label = template_object['model_name_app_label']
                                        api_label = string.replace(model_label, 'models', 'api' )
                                        template_object.update( { 'model_name_app_label' : api_label } )
                                        template_object.update( { 'field_name' : name } )
                                        fkey_models.append( template_object )
                    else: # standard field#
                        #logger.debug( "standard field")
                        if name != 'id':  # we want all standard fields except ID
                            include_fields.append( [ name,  string.strip( field._description().split(':')[1] ) ] )
                        
   
            #
            #
            #  render templates
            #
            #
            mod_context = {
                    'app_label' : full_app_label,
                    'model_name' : model_name,
                    'module_name' : module_name,
                    'include_fields' : include_fields,
                    'exclude_fields'  : [] # we want excluded fields in our model forms by default ,
            }
            if options['stub_type'] == 'admin':
                mod_context = {
                    'inline_admins' : inline_admins,
                    'show_fields' : include_fields + exclude_fields,
                    'search_fields' : [ i for i in include_fields if i not in exclude_fields ] ,
                    'order_fields' : [ include_fields[0] ],
                    'filter_fields' : [ i for i in include_fields if i not in exclude_fields ],
                    'app_label' : full_app_label,
                    'model_name' : model_name,
                    'module_name' : module_name,
                    'include_fields' : include_fields,
                    'exclude_fields'  : exclude_fields,
                }
            elif options['stub_type'] == 'api':
                mod_context = {

                    'app_label' : full_app_label,
                    'fkey_models' : fkey_models, 
                    'model_form' : model_form,
                    'model_name' : model_name,
                    'module_name' : module_name,
                    'include_fields' : include_fields,
                    'exclude_fields'  : exclude_fields,
                }        
                
            context  = Context( mod_context )
            t = Template( open( BASE_TEMPLATE_PATH, "r" ).read() )
            model_form_class = t.render( context )

            #
            #
            #  check for existence of app subdirs
            #
            #
            create_or_replace_app_subdirs( os.path.dirname( relative_app_path  ), options['stub_type'] )
            #
            #
            #  write out the model_class
            #
            #    
            import_type = 'Admin'
            if options['stub_type'] == 'api': 
                import_type = 'Resource'
            elif options['stub_type'] == 'forms': 
                import_type = 'Form'
                
            
    
            if not options['dry_run']:
                write_model_form( 
                    os.path.join( 
                        os.path.dirname( relative_app_path ), options['stub_type'], "%s_model_%s.py" % ( ctype_model_name, import_type.lower() if import_type != 'Resource' else 'api' )
                    ), 
                    model_form_class )
            else:
                logger.debug('\n\n\n')            
                logger.debug('---------------------------------------------------')
                logger.debug('------- COPY THESE TO YOUR MODEL FORMS ------')
                logger.debug('---------------------------------------------------\n')
                logger.info( model_form_class )
            

            if not options['dry_run']:
                write_to_init( 
                    os.path.join( 
                        os.path.dirname( relative_app_path ), options['stub_type'], "__init__.py"
                    ), 
                    "from %s_model_%s import %sModel%s\n" % 
                    ( module_name, import_type.lower() if import_type != 'Resource' else 'api', model_name, import_type )  )
            else:
                logger.debug('\n\n\n')
                logger.debug('---------------------------------------------------')
                logger.debug('------- __INIT__.py ------')
                logger.debug('---------------------------------------------------\n')
                logger.info( "from %s_model_%s import %sModel%s\n" % 
                    ( module_name, import_type.lower() if import_type != 'Resource' else 'api', model_name, import_type )   )
                logger.debug('\n\n\n')
            
            
        else:
            raise CommandError('That model name does not exist yet in content_types table. Have you added it to settings.INSTALLED_APPS and ran syncdb?')
Example #39
0
    piece_length = 2 ** piece_len_exp

    encoding = None
    if params.has_key('filesystem_encoding'):
        encoding = params['filesystem_encoding']
    if not encoding:
        encoding = ENCODING
    if not encoding:
        encoding = 'ascii'
    
    info = makeinfo(file, piece_length, encoding, flag, progress, progress_percent)
    if flag.isSet():
        return
    check_info(info)
    h = open(f, 'wb')
    data = {'info': info, 'announce': strip(url), 'creation date': long(time())}
    
    if params.has_key('comment') and params['comment']:
        data['comment'] = params['comment']
        
    if params.has_key('real_announce_list'):    # shortcut for progs calling in from outside
        data['announce-list'] = params['real_announce_list']
    elif params.has_key('announce_list') and params['announce_list']:
        l = []
        for tier in params['announce_list'].split('|'):
            l.append(tier.split(','))
        data['announce-list'] = l
        
    if params.has_key('real_httpseeds'):    # shortcut for progs calling in from outside
        data['httpseeds'] = params['real_httpseeds']
    elif params.has_key('httpseeds') and params['httpseeds']:
Example #40
0
# f<id>_params: complete params
#
# ex:
# define(`f1_ret', `void')
# define(`f1_name', `Accum')
# define(`f1_params', ``GLenum op, GLfloat value'')
#

import sys, string
from read import read_gl

(gl, wgl, glX) = read_gl(sys.stdin)

sys.stdout.write('define(`gl_start\', `0\')\n')
sys.stdout.write('define(`gl_end\', `%d\')\n' % int(len(gl) - 1))
sys.stdout.write('define(`wgl_start\', `%d\')\n' % int(len(gl)))
sys.stdout.write('define(`wgl_end\', `%d\')\n' % int(len(gl) + len(wgl) - 1))
sys.stdout.write('define(`glX_start\', `%d\')\n' % int(len(gl) + len(wgl)))
sys.stdout.write('define(`glX_end\', `%d\')\n' %
                 int(len(gl) + len(wgl) + len(glX) - 1))

i = 0
for l in (gl, wgl, glX):
    for t in l:
        # process ret type to strip trailing spaces
        t[0] = string.strip(t[0])
        sys.stdout.write('define(`f%d_ret\', `%s\')\n' % (i, t[0]))
        sys.stdout.write('define(`f%d_name\', `%s\')\n' % (i, t[2]))
        sys.stdout.write('define(`f%d_params\', ``%s\'\')\n' % (i, t[3]))
        i += 1
Example #41
0
def getDefault(name):
	name = string.strip(name)
	if defaultmap.has_key(name):
		print "  CONVERTING ", name, " TO ", defaultmap[name]
		return defaultmap[name]
	return name
Example #42
0
class DiFXJoblist:
    """
	Storage class for the contents of a difx .joblist file
	"""
    def __init__(self, path, filename):
        """
		Constructor
		"""

        # check that path exists
        if not os.path.isdir(path):
            raise IOError("Directory does not exist: " + path)
        # check that joblist file exists
        if not os.path.isfile(os.path.join(path, filename)):
            raise IOError("Joblist does not exist: " +
                          os.path.join(path, filename))

        self.path = path
        self.filename = filename
        self.overrideVersion = False
        self.parameters = None
        self.jobs = []

        self.__parse__()

    def __parse__(self):

        # Parse the joblist file
        try:
            data = open(os.path.join(self.path, self.filename)).readlines()
        except Exception, ex:
            raise IOError(ex)

        if len(data) < 2:
            print 'Malformed .joblist file %s' % filename
            #TODO throw exception instead
            exit(0)

        # parse the first line of the joblist file and
        # separate it into key/value pairs
        self.parameters = parseKeyValue(data[0])

        if len(self.parameters) < 1:
            # TODO throw exception instead
            print 'Malformed .joblist file %s line 1' % filename
            exit(0)

        # parse through the rest of the joblist file
        for line in range(1, len(data)):

            job = self.Job(self.path)

            # split the line in two: job description and antenna list
            (jobDesc, antennaList) = split(data[line], '#')

            # parse the job description
            s = split(strip(jobDesc))

            if len(s) >= 7:
                jobName = s[0]
                jobParts = split(jobName, '_')
                if (len(jobParts) != 2):
                    #TODO throw exception instead
                    print 'Malformed job name "%s" in .joblist file should be of form name_number' % self.name
                    exit(0)
                else:
                    job.name = jobName
                    job.number = int(jobParts[1])

                job.mjdStart = float(s[1])
                job.mjdStop = float(s[2])
                job.nAnt = int(s[3])
                job.nPulsarBins = int(s[4])
                job.nSourceFiles = int(s[5])
                job.computationalLoad = float(s[6])

            if len(s) == 8:
                job.outputSize = int(float(s[7]))
            else:
                job.outputSize = 0

            # parse the antenna list
            antennas = []
            s = split(strip(antennaList))
            for antenna in s:
                antennas.append(antenna)

            job.antennas = antennas
            job.nNonVLBAAntennas = job.countNonVLBAAntennas()

            self.jobs.append(job)
Example #43
0
    def handle__copyright(self, arg):

        self.print_header()
        print string.strip(self.copyright % self.__dict__)
        print
        return 0
Example #44
0
def Start(filename):
    global X
    global VarNum
    global production
    result = ''
    #read data case line by line from file
    try:
        br = open(filename, 'r')

        #example on Page 8 of lecture 15_CFL5
        production = [[0] * 3 for i in range(7)]
        production[0][0] = 0
        production[0][1] = 1
        production[0][2] = 2  #S->AB
        production[1][0] = 1
        production[1][1] = 2
        production[1][2] = 3  #A->BC
        production[2][0] = 1
        production[2][1] = 0
        production[2][2] = -1  #A->a
        production[3][0] = 2
        production[3][1] = 1
        production[3][2] = 3  #B->AC
        production[4][0] = 2
        production[4][1] = 1
        production[4][2] = -1  #B->b
        production[5][0] = 3
        production[5][1] = 0
        production[5][2] = -1  #C->a
        production[6][0] = 3
        production[6][1] = 1
        production[6][2] = -1  #C->b

        result = ''
        #Read File Line By Line
        for string in br:
            string = string.strip()
            print 'Processing ' + string + '...'
            length = len(string)
            w = [0] * length
            for i in range(length):
                w[i] = ord(string[i]) - ord(
                    'a')  #convert 'a' to 0 and 'b' to 1
            #Use CYK algorithm to calculate X
            calcCYK(w)
            #Get/print the full table X
            for step in range(length - 1, -1, -1):
                for i in range(length - step):
                    j = i + step
                    for k in range(VarNum):
                        if (X[i][j][k]):
                            result = result + str(k)
                    result = result + ' '
                result = result + '\n'

#Close the input stream
        br.close()
    except:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        print "*** print_exception:"
        traceback.print_exception(exc_type,
                                  exc_value,
                                  exc_traceback,
                                  limit=2,
                                  file=sys.stdout)
        result = result + 'error'
    return result
Example #45
0
def processdate(datatype, yymm):
    currtime = time.strftime('%Y/%m/%d %H:%M:%S')
    logtime = time.strftime('%Y%m%d%H%M%S')
    hwlog = open(
        'log/' + datatype + '_processing_' + yymm + '_' + logtime + '.log',
        'a')
    hwlog.write("#####################################################\n")
    hwlog.write(" Starting " + datatype + '_' + yymm + "\n")
    hwlog.write("#####################################################\n")
    hwlog.write(currtime + ':' + "Creating HDFS directory for " + datatype +
                " (" + yymm + ") ........\n")

    sqlstmt = 'select INSERVSERIAL,YYMMDD, FILEPATH, FILE_NAME,STATS_FILEID from vw_' + datatype + '_current where yymmdd=\'' + yymm + '\' and file_name like \'%debug%\'  order by inservserial desc'
    constr = 'ods/[email protected]:1521/callhomeods'
    oraconn = oracon.openconnect(constr)
    currtime = time.strftime('%Y/%m/%d %H:%M:%S')
    hwlog.write(currtime + ':' + "Collecting files from DB")

    resultrec = oracon.execSql(oraconn, sqlstmt)
    dbloadfl = open(
        'sql/' + datatype + '_' + str(yymm) + '_' + logtime + '.sql', 'w')
    vertflsql = 'sql/' + datatype + '_' + str(yymm) + '_' + logtime + '.sql'
    ctr = 0
    fctr = 0
    totfiles = 0
    maxthread = 100
    previnserv = 0
    filectr = 0
    nctr = 1
    for rec in resultrec:
        inserv = rec[0]
        if previnserv < inserv:
            if previnserv > 0:
                currtime = time.strftime('%Y/%m/%d %H:%M:%S')
                hwlog.write(currtime + ':' + 'Records for the inserv ' +
                            str(previnserv) + ' : ' + str(filectr))
                hwlog.flush()
            previnserv = inserv
            filectr = 0
        yymmdd = rec[1]
        filenamepath = rec[2]
        filename = rec[3]
        fileid = rec[4]

        try:
            checkcreatehdfsfolder(datatype, str(yymmdd), inserv)
            flstatus = commands.getoutput(
                'curl -v -X GET "http://*****:*****@llhome -h ' + verticanode +
                          ' callhomedb & >> /root/proc/log/' + datatype + '_' +
                          str(yymm) + '.sql.log')
                currtime = time.strftime('%Y/%m/%d %H:%M:%S')
                hwlog.write(currtime + ':' + 'Done loading to vertica\n')
                nctr += 1

                ctr = 0
                dbloadfl = open('sql/' + datatype + '_' + str(yymm) + '.sql',
                                'w')
                vertflsql = 'sql/' + datatype + '_' + str(yymm) + '.sql'
                hwlog.flush()

            ctr += 1
            totfiles += 1
        except:
            currtime = time.strftime('%Y/%m/%d %H:%M:%S')
            hwlog.write(currtime + ':' + "Error : " + str(sys.exc_info()[1]) +
                        "\n")
            break
    currtime = time.strftime('%Y/%m/%d %H:%M:%S')
    hwlog.write(currtime + ':' + 'Files for ' + str(yymm) + ': are ' +
                str(totfiles) + '\n')
    totfiles = 0
    hwlog.flush()

    dbloadfl.close()
    time.sleep(60)
    currtime = time.strftime('%Y/%m/%d %H:%M:%S')
    hwlog.write(currtime + ':' + 'Executing Vertica Script for ' + vertflsql +
                '\n')
    os.system(
        '/opt/vertica/bin/vsql -f ' + vertflsql +
        ' -U dbadmin -w c@llhome -h callhomelab-vertica01 callhomedb & > /root/proc/log/'
        + datatype + '_' + str(yymm) + '.sql.log')
    currtime = time.strftime('%Y/%m/%d %H:%M:%S')
    hwlog.write(currtime + ':' + 'Done loading to vertica\n')
    resultrec.close()
    currtime = time.strftime('%Y/%m/%d %H:%M:%S')
    hwlog.write(currtime + ':' + "Done processing " + str(yymm))
    hwlog.close()
    oraconn.close()
Example #46
0
###############################################################################
# BUILD THE MAKEFILE
###############################################################################

# this function (in buildtools.py) generates the makefile
# it's currently a bit ugly but it'll get cleaned up soon
buildMakefile(CTX)

###############################################################################
# RUN THE MAKEFILE
###############################################################################
numHardwareThreads = 4
if CTX.PLATFORM == "Darwin":
    numHardwareThreads = 0
    output = commands.getstatusoutput("sysctl hw.ncpu")
    numHardwareThreads = int(string.strip(string.split(output[1])[1]))
elif CTX.PLATFORM == "Linux":
    numHardwareThreads = 0
    for line in open('/proc/cpuinfo').readlines():
        name_value = map(string.strip, string.split(line, ':', 1))
        if len(name_value) != 2:
            continue
        name,value = name_value
        if name == "processor":
            numHardwareThreads = numHardwareThreads + 1
print "Detected %d hardware threads to use during the build" % (numHardwareThreads)

retval = os.system("make --directory=%s -j%d" % (CTX.OUTPUT_PREFIX, numHardwareThreads))
print "Make returned: ", retval
if retval != 0:
    sys.exit(-1)
    def __read(self, fp, fpname):
        """Parse a sectioned setup file.

        The sections in setup file contains a title line at the top,
        indicated by a name in square brackets (`[]'), plus key/value
        options lines, indicated by `name: value' format lines.
        Continuation are represented by an embedded newline then
        leading whitespace.  Blank lines, lines beginning with a '#',
        and just about everything else is ignored.
        """
        cursect = None  # None, or a dictionary
        optname = None
        lineno = 0
        e = None  # None, or an exception
        while 1:
            line = fp.readline()
            if not line:
                break
            lineno = lineno + 1
            # comment or blank line?
            if string.strip(line) == '' or line[0] in '#;':
                continue
            if string.lower(string.split(line)[0]) == 'rem' \
               and line[0] in "rR":      # no leading whitespace
                continue
            # continuation line?
            if line[0] in ' \t' and cursect is not None and optname:
                value = string.strip(line)
                if value:
                    cursect[optname] = cursect[optname] + '\n ' + value
            # a section header or option header?
            else:
                # is it a section header?
                mo = self.SECTCRE.match(line)
                if mo:
                    sectname = mo.group('header')
                    if self.__sections.has_key(sectname):
                        cursect = self.__sections[sectname]
                    elif sectname == DEFAULTSECT:
                        cursect = self.__defaults
                    else:
                        cursect = {'__name__': sectname}
                        self.__sections[sectname] = cursect
                    # So sections can't start with a continuation line
                    optname = None
                # no section header in the file?
                elif cursect is None:
                    raise MissingSectionHeaderError(fpname, lineno, ` line `)
                # an option line?
                else:
                    mo = self.OPTCRE.match(line)
                    if mo:
                        optname, vi, optval = mo.group('option', 'vi', 'value')
                        if vi in ('=', ':') and ';' in optval:
                            # ';' is a comment delimiter only if it follows
                            # a spacing character
                            pos = string.find(optval, ';')
                            if pos and optval[pos - 1] in string.whitespace:
                                optval = optval[:pos]
                        optval = string.strip(optval)
                        # allow empty values
                        if optval == '""':
                            optval = ''
                        cursect[optname] = optval
                    else:
                        # a non-fatal parsing error occurred.  set up the
                        # exception but keep going. the exception will be
                        # raised at the end of the file and will contain a
                        # list of all bogus lines
                        if not e:
                            e = ParsingError(fpname)
                        e.append(lineno, ` line `)
        # if any parsing errors occurred, raise an exception
        if e:
            raise e
Example #48
0
    def getNetworkList(self):
        if self.oldInterfaceState is None:
            self.oldInterfaceState = iNetwork.getAdapterAttribute(
                self.iface, "up")
        if self.oldInterfaceState is False:
            if iNetwork.getAdapterAttribute(self.iface, "up") is False:
                iNetwork.setAdapterAttribute(self.iface, "up", True)
                system("ifconfig " + self.iface + " up")

        ifobj = Wireless(self.iface)  # a Wireless NIC Object

        try:
            scanresults = ifobj.scan()
        except:
            scanresults = None
            print "[Wlan.py] No wireless networks could be found"
        aps = {}
        if scanresults is not None:
            (num_channels, frequencies) = ifobj.getChannelInfo()
            index = 1
            for result in scanresults:
                bssid = result.bssid

                if result.encode.flags & wififlags.IW_ENCODE_DISABLED > 0:
                    encryption = False
                elif result.encode.flags & wififlags.IW_ENCODE_NOKEY > 0:
                    encryption = True
                else:
                    encryption = None

                signal = str(result.quality.siglevel - 0x100) + " dBm"
                quality = "%s/%s" % (result.quality.quality,
                                     ifobj.getQualityMax().quality)

                extra = []
                for element in result.custom:
                    element = element.encode()
                    extra.append(strip(self.asciify(element)))
                for element in extra:
                    if 'SignalStrength' in element:
                        signal = element[element.index('SignalStrength') +
                                         15:element.index(',L')]
                    if 'LinkQuality' in element:
                        quality = element[element.index('LinkQuality') +
                                          12:len(element)]

                # noinspection PyProtectedMember
                aps[bssid] = {
                    'active':
                    True,
                    'bssid':
                    result.bssid,
                    'channel':
                    frequencies.index(
                        ifobj._formatFrequency(
                            result.frequency.getFrequency())) + 1,
                    'encrypted':
                    encryption,
                    'essid':
                    strip(self.asciify(result.essid)),
                    'iface':
                    self.iface,
                    'maxrate':
                    ifobj._formatBitrate(result.rate[-1][-1]),
                    'noise':
                    '',  #result.quality.nlevel-0x100,
                    'quality':
                    str(quality),
                    'signal':
                    str(signal),
                    'custom':
                    extra,
                }

                index += 1
        return aps
Example #49
0
    def getSupportedRasters(self):
        if self.supportedRasters != None:
            return self.supportedRasters

        # first get the GDAL driver manager
        if gdal.GetDriverCount() == 0:
            gdal.AllRegister()

        self.supportedRasters = dict()
        jp2Driver = None

        # for each loaded GDAL driver
        for i in range(gdal.GetDriverCount()):
            driver = gdal.GetDriver(i)

            if driver == None:
                QgsLogger.warning("unable to get driver " + str(i))
                continue

            # now we need to see if the driver is for something currently
            # supported; if not, we give it a miss for the next driver

            longName = string.strip(re.sub('\(.*$', '', driver.LongName))
            shortName = string.strip(re.sub('\(.*$', '', driver.ShortName))
            extensions = ''

            description = driver.GetDescription()
            glob = []

            metadata = driver.GetMetadata()
            if metadata.has_key(gdal.DMD_EXTENSION):
                extensions = str(metadata[gdal.DMD_EXTENSION])

            if longName != '':
                if extensions != '':
                    # XXX add check for SDTS; in that case we want (*CATD.DDF)

                    #TODO fix and test
                    #glob.append( QString("*." + extensions.replace("/", " *.")).split(" "))
                    glob.append(
                        string.split("*." +
                                     string.replace(extensions, "/", " *."),
                                     sep=(" ")))

                    # Add only the first JP2 driver found to the filter list (it's the one GDAL uses)
                    if description == "JPEG2000" or description.startswith(
                            "JP2"):  # JP2ECW, JP2KAK, JP2MrSID
                        if jp2Driver != None:
                            continue  # skip if already found a JP2 driver
                        jp2Driver = driver  # first JP2 driver found
                        glob.append("*.j2k")  # add alternate extension
                    elif description == "GTiff":
                        glob.append("*.tiff")
                    elif description == "JPEG":
                        glob.append("*.jpeg")
                else:
                    # USGS DEMs use "*.dem"
                    if description.startswith("USGSDEM"):
                        glob.append("*.dem")
                    elif description.startswith("DTED"):
                        # DTED use "*.dt0"
                        glob.append("*.dt0")
                    elif description.startswith("MrSID"):
                        # MrSID use "*.sid"
                        glob.append("*.sid")
                    else:
                        continue

                self.supportedRasters[longName] = {
                    'EXTENSIONS': glob,
                    'LONGNAME': longName,
                    'SHORTNAME': shortName,
                    'DESCRIPTION': description
                }

        return self.supportedRasters
Example #50
0
 def handle_data(self, newtext):
     newtext = string.strip(newtext)
     self.text = self.text + newtext
Example #51
0
    def makeAreaHeader(self,
                       argDict,
                       areaLabel,
                       issueTime,
                       expireTime,
                       areaDictName,
                       defaultEditAreas,
                       cityDescriptor="Including the cities of",
                       areaList=None,
                       includeCities=1,
                       includeZoneNames=1,
                       includeIssueTime=1,
                       includeCodes=1,
                       includeVTECString=1,
                       hVTECString=None,
                       accurateCities=False):
        # Make a UGC area header for the given areaLabel
        # Determine list of areas (there could be more than one if we are using a combination)

        if areaDictName is None or areaDictName == "None":
            return areaLabel + "\n"

        # If we didn't supply an areaList,
        #     Use combinations file or defaultEditAreas
        if areaList is None:
            combinations = argDict["combinations"]
            if combinations is not None:
                areaList = self.getCurrentAreaNames(argDict, areaLabel)
            else:
                for editArea, label in defaultEditAreas:
                    if label == areaLabel:
                        areaList = [editArea]

        try:
            # Remove suffixes if necessary
            if self._editAreaSuffix is not None:
                areaList = self.removeSuffixes(areaList, self._editAreaSuffix)
        except:
            pass

        # Access the UGC information for the area(s) if available
        accessor = ModuleAccessor.ModuleAccessor()
        areaDict = accessor.variable(areaDictName, "AreaDictionary")
        ugcCityList = []
        if areaDict is None:  # create a dummy header
            codeString = "STZxxx-"
            nameString = areaLabel
            cityString = ""
        else:
            codeString = ""
            nameString = ""
            cityString = ""
            areaList, ugcList = self.makeUGCList(areaDict, areaList)
            codeString = self.makeUGCString(ugcList)
            ugcNameList = []
            for areaName in areaList:
                if areaName in areaDict.keys():
                    if areaDict.has_key(areaName):
                        entry = areaDict[areaName]
                    else:
                        entry = {}
                        log.error(\
                          "AreaDictionary missing definition for [" + \
                          areaName + "].")
                    if entry.has_key('ugcName'):
                        ugcName = entry['ugcName']
                    else:
                        ugcName = areaName  #missing UGCname
                        log.error(\
                          "AreaDictionary missing ugcName definition for [" + \
                          areaName + "].")
                    if ugcName not in ugcNameList:
                        ugcNameList.append(ugcName)
                    if entry.has_key("ugcCityString"):
                        cities = entry["ugcCityString"].split('...')
                        for c in cities:
                            if len(c) and c not in ugcCityList:
                                ugcCityList.append(c)
                else:
                    ugcNameList.append(areaName)
                    log.error("AreaDictionary does not contain " +\
                      'ugcName definition for ', areaName)

            if self.alphabetizeHeaders() == 1:
                # Alphabetize both lists.
                ugcNameList.sort()
                ugcCityList.sort()

            # Build nameString and cityString strings:
            for ugcName in ugcNameList:
                nameString = nameString + ugcName + "-"
            for ugcCity in ugcCityList:
                cityString = cityString + "..." + ugcCity

        # Compute accurate city list
        if accurateCities and \
           len(ugcCityList) > 0 and argDict.has_key("hazards"):
            ugcCityList, cityString = self.makeAccurateCityList(areaList, \
                                        ugcCityList, argDict)

        # get the VTEC string from the HazardsTable
        VTECString = ""
        VTECRecords = []
        if argDict.has_key("hazards") and includeVTECString:
            hazards = argDict["hazards"]
            VTECString = hazards.getVTECString(areaList)
            #must have VTECString, in order to have hVTEC string
            if hVTECString is not None and len(VTECString) and len(
                    hVTECString):
                VTECString = VTECString + hVTECString + "\n"

        # expiration time is dependent upon the passed in expiration time
        # and the VTEC strings. expireT is seconds since epoch
        if type(expireTime) is types.IntType or\
          type(expireTime) is types.FloatType:
            expireTime = AbsTime.AbsTime(int(expireTime))
        try:
            if self._fixedExpire == 1:
                fixed = 1
            else:
                fixed = 0
        except:
            fixed = 0
        expireT = self.getExpireTime(issueTime.unixTime(),
                                     expireTime.unixTime(),
                                     VTECString,
                                     fixedExpire=fixed)

        # format the expiration time
        expireTimeRange = TimeRange.TimeRange(AbsTime.AbsTime(expireT),
                                              AbsTime.AbsTime(expireT + 1))
        expireTime = self.timeDisplay(expireTimeRange, "", "", "%d%H%M", "")
        codeString = self.endline(codeString + "-" + expireTime + "-",
                                  linelength=self._lineLength,
                                  breakStr=["-"])

        # get this time zone
        thisTimeZone = os.environ["TZ"]
        zoneList = []
        # check to see if we have any areas outside our time zone
        for areaName in areaList:
            if areaName in areaDict.keys():
                entry = areaDict[areaName]
                if not entry.has_key("ugcTimeZone"):  #add your site tz
                    if thisTimeZone not in zoneList:
                        zoneList.append(thisTimeZone)
                    continue  # skip this entry
                timeZoneList = entry["ugcTimeZone"]
                if type(timeZoneList) == types.StringType:  # a single value
                    timeZoneList = [timeZoneList]  # make it into a list
                for timeZone in timeZoneList:
                    if timeZone not in zoneList:
                        zoneList.append(timeZone)

        # if the resulting zoneList is empty, put in our time zone
        if len(zoneList) == 0:
            zoneList.append(thisTimeZone)

        # if the resulting zoneList has our time zone in it, be sure it
        # is the first one in the list
        try:
            index = zoneList.index(thisTimeZone)
            if index != 0:
                del zoneList[index]
                zoneList.insert(0, thisTimeZone)
        except:
            pass

        # now create the time string
        issueTimeStr = ''
        timeStrs = []
        for timeZone in zoneList:
            timeStr = self.formatTimeString(issueTime.unixTime(),
                                            "%l%M %p %Z %a %b %e %Y", timeZone)
            timeStr = string.replace(timeStr, "  ", " ")
            timeStr = string.strip(timeStr)
            if timeStr not in timeStrs:
                timeStrs.append(timeStr)
        if len(timeStrs) == 1:
            issueTimeStr = timeStrs[0]
        else:
            issueTimeStr = timeStrs[0]
            for i in xrange(1, len(timeStrs)):
                issueTimeStr = issueTimeStr + " /" + timeStrs[i] + "/"

        try:
            if self._useRegionLabel == 1:
                if (areaLabel != ""):
                    nameString = areaLabel
        except:
            pass

        nameString = self.endline(nameString,
                                  linelength=self._lineLength,
                                  breakStr=["-"])
        if cityString != "":
            numCities = len(string.split(cityString, "...")[1:])
            if numCities == 1:

                def preserveCase(matchobj):
                    orig = matchobj.group(0)
                    repl = 'city'
                    retv = ''
                    for i in range(len(repl)):
                        c = repl[i]
                        if orig[i].isupper():
                            c = c.upper()
                        retv = retv + c
                    return retv

                cityDescriptor = re.sub("cities",
                                        preserveCase,
                                        cityDescriptor,
                                        flags=re.IGNORECASE)
            cityString = self.endline(cityDescriptor + cityString,
                                      linelength=self._lineLength,
                                      breakStr=["..."])
        issueTimeStr = issueTimeStr + "\n\n"
        try:
            if self._includeHeader == 0:
                issueTimeStr = "\n"
                codeString = ""
                cityString = ""
        except:
            pass
        if includeCities == 0:
            cityString = ""
        if includeZoneNames == 0:
            nameString = ""
        if includeIssueTime == 0:
            issueTimeStr = ""
        if includeCodes == 0:
            codeString = ""
        if includeVTECString == 0:
            VTECString = ""
        header = codeString + VTECString + nameString + cityString + issueTimeStr
        return header
Example #52
0
    def getSupportedVectors(self):
        if self.supportedVectors != None:
            return self.supportedVectors

        # first get the OGR driver manager
        QgsApplication.registerOgrDrivers()

        self.supportedVectors = dict()

        # for each loaded OGR driver
        for i in range(ogr.GetDriverCount()):
            driver = ogr.GetDriver(i)

            if driver == None:
                QgsLogger.warning("unable to get driver " + str(i))
                continue

            driverName = driver.GetName()
            longName = ''
            glob = []

            if driverName.startswith("AVCBin"):
                pass  #myDirectoryDrivers += "Arc/Info Binary Coverage,AVCBin"
            elif driverName.startswith("AVCE00"):
                longName = "Arc/Info ASCII Coverage"
                glob.append("*.e00")
            elif driverName.startswith("BNA"):
                longName = "Atlas BNA"
                glob.append("*.bna")
            elif driverName.startswith("CSV"):
                longName = "Comma Separated Value"
                glob.append("*.csv")
            elif driverName.startswith("DODS"):
                pass  #myProtocolDrivers += "DODS/OPeNDAP,DODS"
            elif driverName.startswith("PGeo"):
                pass  #myDatabaseDrivers += "ESRI Personal GeoDatabase,PGeo"

                # on Windows add a pair to the dict for this driver
                if platform.system() == "Windows":
                    longName = "ESRI Personal GeoDatabase"
                    glob.append("*.mdb")
            elif driverName.startswith("SDE"):
                pass  #myDatabaseDrivers += "ESRI ArcSDE,SDE"
            elif driverName.startswith("ESRI"):
                longName = "ESRI Shapefiles"
                glob.append("*.shp")
            elif driverName.startswith("FMEObjects Gateway"):
                longName = "FMEObjects Gateway"
                glob.append("*.fdd")
            elif driverName.startswith("GeoJSON"):
                pass  #myProtocolDrivers += "GeoJSON,GeoJSON"
                longName = "GeoJSON"
                glob.append("*.geojson")
            elif driverName.startswith("GeoRSS"):
                longName = "GeoRSS"
                glob.append("*.xml")
            elif driverName.startswith("GML"):
                longName = "Geography Markup Language"
                glob.append("*.gml")
            elif driverName.startswith("GMT"):
                longName = "GMT"
                glob.append("*.gmt")
            elif driverName.startswith("GPX"):
                longName = "GPX"
                glob.append("*.gpx")
            elif driverName.startswith("GRASS"):
                pass  #myDirectoryDrivers += "Grass Vector,GRASS"
            elif driverName.startswith("IDB"):
                pass  #myDatabaseDrivers += "Informix DataBlade,IDB"
            elif driverName.startswith("Interlis 1"):
                longName = "INTERLIS 1"
                glob.append("*.itf")
                glob.append("*.xml")
                glob.append("*.ili")
            elif driverName.startswith("Interlis 2"):
                longName = "INTERLIS 2"
                glob.append("*.itf")
                glob.append("*.xml")
                glob.append("*.ili")
            elif driverName.startswith("INGRES"):
                pass  #myDatabaseDrivers += "INGRES,INGRES"
            elif driverName.startswith("KML"):
                longName = "KML"
                glob.append("*.kml")
            elif driverName.startswith("MapInfo File"):
                longName = "Mapinfo File"
                glob.append("*.mif")
                glob.append("*.tab")
            elif driverName.startswith("DGN"):
                longName = "Microstation DGN"
                glob.append("*.dgn")
            elif driverName.startswith("MySQL"):
                pass  #myDatabaseDrivers += "MySQL,MySQL"
            elif driverName.startswith("OCI"):
                pass  #myDatabaseDrivers += "Oracle Spatial,OCI"
            elif driverName.startswith("ODBC"):
                pass  #myDatabaseDrivers += "ODBC,ODBC"
            elif driverName.startswith("OGDI"):
                pass  #myDatabaseDrivers += "OGDI Vectors,OGDI"
            elif driverName.startswith("PostgreSQL"):
                pass  #myDatabaseDrivers += "PostgreSQL,PostgreSQL"
            elif driverName.startswith("S57"):
                longName = "S-57 Base file"
                glob.append("*.000")
            elif driverName.startswith("SDTS"):
                longName = "Spatial Data Transfer Standard"
                glob.append("*catd.ddf")
            elif driverName.startswith("SQLite"):
                longName = "SQLite"
                glob.append("*.sqlite")
            elif driverName.startswith("UK .NTF"):
                pass  #myDirectoryDrivers += "UK. NTF,UK. NTF"
            elif driverName.startswith("TIGER"):
                pass  #myDirectoryDrivers += "U.S. Census TIGER/Line,TIGER"
            elif driverName.startswith("VRT"):
                longName = "VRT - Virtual Datasource "
                glob.append("*.vrt")
            elif driverName.startswith("XPlane"):
                longName = "X-Plane/Flighgear"
                glob.append("apt.dat")
                glob.append("nav.dat")
                glob.append("fix.dat")
                glob.append("awy.dat")

            longName = string.strip(longName)

            if longName == '':
                continue

            self.supportedVectors[longName] = {
                'EXTENSIONS': glob,
                'LONGNAME': longName,
                'SHORTNAME': driverName
            }

        return self.supportedVectors
 def setIP(self, ip):
     self.IP_address = string.strip(ip)
Example #54
0
 def getFilterName(self, aFilter):
     return string.strip(re.sub('\ \(.*$', '', aFilter))
Example #55
0
def get_condensed_indications(file_name):
    with codecs.open(file_name, 'r', 'utf-8') as fl:
        return [strip(line) for line in fl.readlines() if strip(line)]
Example #56
0
def findIsoImages(path, messageWindow):
    flush = os.stat(path)
    files = os.listdir(path)
    arch = _arch
    discImages = {}

    for file in files:
	what = path + '/' + file
	if not isys.isIsoImage(what):
            continue

	isys.makeDevInode("loop2", "/tmp/loop2")

	try:
	    isys.losetup("/tmp/loop2", what, readOnly = 1)
	except SystemError:
	    continue

	try:
	    isys.mount("loop2", "/mnt/cdimage", fstype = "iso9660",
		       readOnly = 1)
	    for num in range(1, 10):
		if os.access("/mnt/cdimage/.discinfo", os.R_OK):
                    f = open("/mnt/cdimage/.discinfo")
                    try:
                        f.readline() # skip timestamp
                        f.readline() # skip release description
                        discArch = string.strip(f.readline()) # read architecture
                        discNum = getDiscNums(f.readline().strip())
                    except:
                        discArch = None
                        discNum = [ 0 ]

                    f.close()

                    if num not in discNum or discArch != arch:
                        continue

                    # if it's disc1, it needs to have images/stage2.img
                    if (num == 1 and not
                        os.access("/mnt/cdimage/images/stage2.img", os.R_OK)):
                        log.warning("%s doesn't have a stage2.img, skipping" %(what,))
                        continue
                    # we only install binary packages, so let's look for a
                    # product/ dir and hope that this avoids getting
                    # discs from the src.rpm set
                    if not os.path.isdir("/mnt/cdimage/%s" %(productPath,)):
                        log.warning("%s doesn't have binary RPMS, skipping" %(what,))
                        continue
                    
		    # warn user if images appears to be wrong size
		    if os.stat(what)[stat.ST_SIZE] % 2048:
			rc = messageWindow(_("Warning"),
	       "The ISO image %s has a size which is not "
	       "a multiple of 2048 bytes.  This may mean "
	       "it was corrupted on transfer to this computer."
	       "\n\n"
               "It is recommended that you reboot and abort your "
               "installation, but you can choose to continue if "
               "you think this is in error." % (file,),
                                           type="custom",
                                           custom_icon="warning",
                                           custom_buttons= [_("_Reboot"),
                                                            _("_Continue")])
                        if rc == 0:
			    sys.exit(0)

		    discImages[num] = file

	    isys.umount("/mnt/cdimage")
	except SystemError:
	    pass

	isys.makeDevInode("loop2", '/tmp/' + "loop2")
	isys.unlosetup("/tmp/loop2")

    return discImages
Example #57
0
import string
import os
import shutil
import setup
import sys
import getopt

if not(os.path.exists('REPOUSER')) :
    print 'What is your repository user name?'
    #print 'What is your quest?'
    #print 'What is the capital of Asyria?'
    username = sys.stdin.readline()
    open("REPOUSER","wt").write(username)

version=string.strip(open("VERSION").readline())
user=string.strip(open("REPOUSER").readline())
repository='svn+ssh://%(user)[email protected]/svnroot/repos/pydirstat/pydirstat' % {'user':user}
public_repository='svn://svn.berlios.de/pydirstat/pydirstat'
tmpdir='tmp'
# Gruik
nsisexe=r'C:\Program Files\NSIS\makensisw.exe'

scriptargs = {
    'version':version,
    'user':user,
    'repository':repository,
    'public_repository':public_repository,
    'tmpdir':tmpdir,
    'nsisexe':nsisexe,
    }
Example #58
0
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--remove",
                    action="store_true",
                    help="Remove duplicates instead of marking")
args = parser.parse_args()

lastRead = None
lastReadCount = 0
dupCounts = {}
for line in sys.stdin:
    if line[0] == '@':
        print line,
        continue
    fields = string.split(string.strip(line))
    flag = int(fields[1])

    if flag & 0x4:  #unmapped
        print line,
        continue

    rname = fields[2]
    pos = int(fields[3])
    seq = fields[9]

    if lastRead == (rname, pos, len(seq)):
        fields[1] = str(flag | 0x400)
        lastReadCount += 1
        if not args.remove:
            print "\t".join(fields)
Example #59
0
 def _strip_value(self, value):
     from string import strip
     return strip(strip(value, '"'), "'")
#Open file containing the jobs to be resubmitted (one for all datasets)
ToBeResubmitted_name = "ToBeResubmitted.list"
ToBeResubmitted = open(ToBeResubmitted_name, 'w')

#error counters
nLogFileErrors = 0
nRootFileSizeErrors = 0
nDatFileSizeErrors = 0
nJobsToBeResubmitted = 0

#---Loop over datasets
print "\n"
for n, lin in enumerate(open(options.inputList)):

    lin = string.strip(lin, "\n")
    #print lin

    dataset_mod = string.split(string.split(lin, "/")[-1], ".")[0]
    print dataset_mod + " ... "

    #structure of the directory containing the output of the batch submission
    BatchDir = options.inputDir + "/" + options.analysisCode + "___" + dataset_mod
    inputBatchDir = BatchDir + "/" + "input"
    logBatchDir = BatchDir + "/" + "log"
    outputBatchDir = BatchDir + "/" + "output"
    scriptBatchDir = BatchDir + "/" + "src"
    castorBatchDir = options.castorDir
    #print BatchDir
    #print inputBatchDir
    #print logBatchDir