示例#1
0
def get_navmenu_choices_left(menu_id):
  """ Auswahl des Navigationsmenus """
  ret = []
  ret.append( ('|', mark_safe(_('<b><i>(Lokale) Startseite</i></b>'))) )
  menu = get_menuitems_by_menu_id_left(menu_id)[0]
  lines = string.splitfields(menu.navigation, '\n')
  nav_main = ''
  nav_sub = ''
  for line in lines:
    line = string.strip(line)
    if line != '' and line[0] != '#':
      arr = string.splitfields(line, '|')
      if len(arr) > 1:
        my_depth = int(string.strip(arr[0]))
        my_alias = string.strip(arr[1])
        if my_depth == 0:
          nav_main = my_alias
          nav_sub = ''
        else:
          nav_sub = my_alias
        info = string.strip(arr[3])
        if my_depth == 0:
          info = '<b>' + info + '</b>'
        ret.append( (nav_main + '|' + nav_sub, mark_safe(info)) )
  return ret
示例#2
0
def save_menus_top(menu_id, text, profi_mode=False):
  """ speichert das obere Hauptmenu """

  def save_this_menu (menu_id, name, navigation):
    item = get_new_navmenu_top()
    item.menu_id = menu_id
    item.name = name
    item.navigation = navigation
    item.save()

  #if not profi_mode:
  #  text = decode_html(text)
  lines = string.splitfields(text, '\n')

  delete_menuitem_navmenu_top(menu_id)
  menu = get_top_navigation_menu_top(lines, profi_mode)
  save_this_menu(menu_id, '|', menu)

  nav_main = ''
  for line in lines:
    line = string.strip(line)
    if line != '' and line[0] != '#':
      arr = string.splitfields(line, '|')
      if len(arr) > 1:
        my_alias = string.strip(arr[0])
        nav_main = my_alias
        info = string.strip(arr[3])
        menu = get_top_navigation_menu(lines, nav_main, profi_mode)
        save_this_menu(menu_id, nav_main, menu)
示例#3
0
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
    """Parse a query given as a string argument.

        Arguments:

        qs: URL-encoded query string to be parsed

        keep_blank_values: flag indicating whether blank values in
            URL encoded queries should be treated as blank strings.  
            A true value inicates that blanks should be retained as 
            blank strings.  The default false value indicates that
            blank values are to be ignored and treated as if they were
            not included.

        strict_parsing: flag indicating what to do with parsing errors.
            If false (the default), errors are silently ignored.
            If true, errors raise a ValueError exception.

       Returns a list, as God intended.
    """
    name_value_pairs = string.splitfields(qs, '&')
    r=[]
    for name_value in name_value_pairs:
        nv = string.splitfields(name_value, '=')
        if len(nv) != 2:
            if strict_parsing:
                raise ValueError, "bad query field: %s" % `name_value`
            continue
        name = urllib.unquote(string.replace(nv[0], '+', ' '))
        value = urllib.unquote(string.replace(nv[1], '+', ' '))
        r.append((name, value))

    return r
示例#4
0
def parse_qs(qs, keep_blank_values=None):
    """Parse a query given as a string argumen

        Arguments:

	qs              : URL-encoded query string to be parsed

        keep_blank_values: flag indicating whether blank values in
            URL encoded queries should be treated as blank strings.  
            A true value inicates that blanks should be retained as 
            blank strings.  The default false value indicates that
	    blank values are to be ignored and treated as if they were
	    not included.
    """
    import urllib, regsub
    name_value_pairs = string.splitfields(qs, '&')
    dict = {}
    for name_value in name_value_pairs:
	nv = string.splitfields(name_value, '=')
	if len(nv) != 2:
	    continue
	name = nv[0]
	value = urllib.unquote(regsub.gsub('+', ' ', nv[1]))
        if len(value) or keep_blank_values:
	    if dict.has_key (name):
		dict[name].append(value)
	    else:
		dict[name] = [value]
    return dict
def create_members(org_id, group_id, target_group_ids, fcontent):
  """ legt mehrere User fuer org_id an und ordnet sie der group_id zu """
  lines = string.splitfields(fcontent)
  for line in lines:
    if line.find('@') > 0:
      line = line.replace('"', '')
      items = string.splitfields(line.strip(), ';')
      if len(items) == 4:
        sex = items[0].strip()
        first_name = items[1].strip()
        last_name = items[2].strip()
        email = items[3].strip()
        title = ''
      elif len(items) == 5:
        sex = items[0].strip()
        title = items[1].strip()
        first_name = items[2].strip()
        last_name = items[3].strip()
        email = items[4].strip()
      elif len(items) == 6:
        sex = items[1].strip()
        title = items[2].strip()
        first_name = items[3].strip()
        last_name = items[4].strip()
        email = items[5].strip()
      create_member(org_id, group_id, target_group_ids, sex, first_name, last_name, title_name, email)
示例#6
0
def get_german_date(d):
  """ d=2003-02-01 10:11:12 -> 01.02.2003 10:11"""
  arr = string.splitfields(d, ' ')
  Y, M, D = string.splitfields(arr[0], '-')
  h, m, s = string.splitfields(arr[1], ':')
  dt = datetime.datetime(int(Y),int(M),int(D),int(h),int(m))
  return dt.strftime('%d.%m.%Y %H:%M')
示例#7
0
def DomConvert(node, xslParent, xslDoc, extUris, extElements, preserveSpace):
    if node.nodeType == Node.ELEMENT_NODE:
        mapping = g_mappings.get(node.namespaceURI, None)
        if mapping:
            if not mapping.has_key(node.localName):
                raise XsltException(Error.XSLT_ILLEGAL_ELEMENT, node.localName)
            xsl_class = mapping[node.localName]

            xsl_instance = xsl_class(xslDoc, baseUri=xslParent.baseUri)
            for attr in node.attributes.values():
                if not attr.namespaceURI and attr.localName not in xsl_instance.__class__.legalAttrs:
                    raise XsltException(Error.XSLT_ILLEGAL_ATTR,
                                        attr.nodeName, xsl_instance.nodeName)
                xsl_instance.setAttributeNS(attr.namespaceURI, attr.nodeName,
                                            attr.value)
            xslParent.appendChild(xsl_instance)
        elif node.namespaceURI in extUris:
            name = (node.namespaceURI, node.localName)
            if name in extElements.keys():
                ext_class = extElements[name]
            else:
                #Default XsltElement behavior effects fallback
                ext_class = XsltElement
            xsl_instance = ext_class(xslDoc, node.namespaceURI,
                                     node.localName, node.prefix,
                                     xslParent.baseUri)
            for attr in node.attributes.values():
                if (attr.namespaceURI, attr.localName) == (XSL_NAMESPACE, 'extension-element-prefixes'):
                    ext_prefixes = string.splitfields(attr.value)
                    for prefix in ext_prefixes:
                        if prefix == '#default': prefix = ''
                        extUris.append(node_nss[prefix])
                xsl_instance.setAttributeNS(attr.namespaceURI, attr.nodeName,
                                            attr.value)
            xslParent.appendChild(xsl_instance)
        else:
            xsl_instance = LiteralElement(xslDoc, node.namespaceURI,
                                          node.localName, node.prefix,
                                          xslParent.baseUri)
            node_nss = GetAllNs(node)
            for attr in node.attributes.values():
                if (attr.namespaceURI, attr.localName) == (XSL_NAMESPACE, 'extension-element-prefixes'):
                    ext_prefixes = string.splitfields(attr.value)
                    for prefix in ext_prefixes:
                        if prefix == '#default': prefix = ''
                        extUris.append(node_nss[prefix])
                xsl_instance.setAttributeNS(attr.namespaceURI,
                                            attr.nodeName,
                                            attr.value
                                            )
            xslParent.appendChild(xsl_instance)
        ps = (xsl_instance.namespaceURI, xsl_instance.localName) == (XSL_NAMESPACE, 'text') or xsl_instance.getAttributeNS(XML_NAMESPACE,'space') == 'preserve'
        #ps = (xsl_instance.namespaceURI, xsl_instance.localName) == (XSL_NAMESPACE, 'text')
        for child in node.childNodes:
            DomConvert(child, xsl_instance, xslDoc, extUris, extElements, ps)
    elif node.nodeType == Node.TEXT_NODE:
        if string.strip(node.data) or preserveSpace:
            xsl_instance = LiteralText(xslDoc, node.data)
            xslParent.appendChild(xsl_instance)
    return
示例#8
0
def get_points(item_container):
  """ liefert den Notenspiegel """
  if item_container.item.app.name=='dmsExercise':
    if item_container.item.integer_1 <= 0:
      return ''
    else:
      arr = string.splitfields(item_container.item.string_1.strip(), '\n')
      max = item_container.item.integer_1
  if item_container.item.app.name=='dmsEduExerciseItem':
    if item_container.item.integer_6 <= 0:
      return ''
    else:
      arr = string.splitfields(item_container.item.string_2.strip(), '\n')
      max = item_container.item.integer_6
  if len(arr) < 5:
    return ''
  error = False
  p = []
  for n in xrange(5):
    values = string.splitfields(arr[n].strip(), ':')
    if len(values) < 2:
      error = True
    else:
      if values[0].strip() != str(n):
        error = True
      if int(values[1].strip()) > max:
        error = True
      else:
        if n == 0:
          p.append({'max': max, 'min': int(values[1].strip())})
        else:
          p.append({'max': max-1, 'min': int(values[1].strip())})
        max = int(values[1].strip())
  p.append({'max': max, 'min': 0})
  return p
示例#9
0
def pathname2url(p):

	""" Convert a DOS path name to a file url...
	Currently only works for absolute paths

		C:\foo\bar\spam.foo

			becomes

		///C|/foo/bar/spam.foo
	"""

	import string
	comp = string.splitfields(p, ':')
	if len(comp) != 2 or len(comp[0]) > 1:
		error = 'Bad path: ' + p
		raise IOError, error

	drive = string.upper(comp[0])
	components = string.splitfields(comp[1], '\\')
	path = '///' + drive + '|'
	for comp in components:
		if comp:
			path = path + '/' + comp
	return path
示例#10
0
def listar_dependencias(seccion):
    dependencias = {}
    dependencias["simples"] = []
    dependencias["comp"] = []
    if seccion == None or seccion.get('Depends') == None:
        return dependencias
    else:
        deps = string.splitfields(seccion.get('Depends'), ", ")
        for d in deps:
            if re.findall("\|", d):
                lista_or_raw = string.splitfields(d, " | ")
                tmpls = []
                for i in lista_or_raw:
                    if re.findall("\s", i):
                        obj = string.splitfields(i, " ", 1)
                        sp = dependencia_simple(obj[0], obj[1])
                        tmpls.append(sp)
                    else:
                        obj = string.splitfields(i, " ", 1)
                        sp = dependencia_simple(obj[0])
                        tmpls.append(sp)
                dependencias["comp"].append(tmpls)
            
            elif re.findall("\s", d):                
                obj = string.splitfields(d, " ", 1)
                sp = dependencia_simple(obj[0], obj[1])
                dependencias["simples"].append(sp)
            else:
                sp = dependencia_simple(d)
                dependencias["simples"].append(sp)
        return dependencias
示例#11
0
def normpath(path):
    """Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
    Also, components of the path are silently truncated to 8+3 notation."""

    path = string.replace(path, "/", "\\")
    prefix, path = splitdrive(path)
    while path[:1] == os.sep:
        prefix = prefix + os.sep
        path = path[1:]
    comps = string.splitfields(path, os.sep)
    i = 0
    while i < len(comps):
        if comps[i] == '.':
            del comps[i]
        elif comps[i] == '..' and i > 0 and \
                      comps[i-1] not in ('', '..'):
            del comps[i-1:i+1]
            i = i-1
        elif comps[i] == '' and i > 0 and comps[i-1] <> '':
            del comps[i]
        elif '.' in comps[i]:
            comp = string.splitfields(comps[i], '.')
            comps[i] = comp[0][:8] + '.' + comp[1][:3]
            i = i+1
        elif len(comps[i]) > 8:
            comps[i] = comps[i][:8]
            i = i+1
        else:
            i = i+1
    # If the path is now empty, substitute '.'
    if not prefix and not comps:
        comps.append('.')
    return prefix + string.joinfields(comps, os.sep)
示例#12
0
def normpath(path):
	path = normcase(path)
	prefix, path = splitdrive(path)
	while path[:1] == os.sep:
		prefix = prefix + os.sep
		path = path[1:]
	comps = string.splitfields(path, os.sep)
	i = 0
	while i < len(comps):
		if comps[i] == '.':
			del comps[i]
		elif comps[i] == '..' and i > 0 and \
					  comps[i-1] not in ('', '..'):
			del comps[i-1:i+1]
			i = i-1
		elif comps[i] == '' and i > 0 and comps[i-1] <> '':
			del comps[i]
		elif '.' in comps[i]:
			comp = string.splitfields(comps[i], '.')
			comps[i] = comp[0][:8] + '.' + comp[1][:3]
			i = i+1
		elif len(comps[i]) > 8:
			comps[i] = comps[i][:8]
			i = i+1
		else:
			i = i+1
	# If the path is now empty, substitute '.'
	if not prefix and not comps:
		comps.append('.')
	return prefix + string.joinfields(comps, os.sep)
示例#13
0
文件: jaaminput.py 项目: jcandy/jaam
    def read_input(self,inputfile):
        # 1. read user input file
        for line in open(inputfile,'r').readlines():

            # Remove leading and trailing whitespace from line
            line = string.strip(line)

            # Skip blank lines
            if len(line) > 0 and line[0] != '#':
                x = string.splitfields(line,'=')
                y = string.splitfields(x[1],'#')
                arg = string.strip(x[0])
                val = string.strip(y[0])

                self.user_dict[arg] = val

        # 2. build complete input file, looking for errors
        for x in self.user_dict.keys():
            if self.data_dict.has_key(x) == 1:
                self.data_dict[x] = self.user_dict[x]
            elif self.dep_dict.has_key(x) == 1:
                self.error=1
                self.error_msg=self.error_msg+'JAAM ERROR: Deprecated parameter '+x+'\n'
                self.error_msg=self.error_msg+'       '+self.dep_dict[x]+'\n'
            else:
                self.error=1
                self.error_msg=self.error_msg+"JAAM ERROR: Bogus parameter "+x+'\n'

        if self.error == 0:
            f=open(inputfile+self.extension,'w')
            for x in self.data_orderlist:
                f.write(self.data_dict[x]+'  '+x+'\n')
示例#14
0
 def setup(self):
     self._useAttributeSets = string.splitfields(self.getAttributeNS(XSL_NAMESPACE, 'use-attribute-sets'))
     self._nss = xml.dom.ext.GetAllNs(self)
     self._outputNss = {}
     self.__attrs = []
     self.excludedNss = []
     sheet = self.ownerDocument.documentElement
     sheet._lres.append(self)
     excluded_prefixes = self.getAttributeNS(XSL_NAMESPACE, 'exclude-result-prefixes')
     if excluded_prefixes:
         excluded_prefixes = string.splitfields(excluded_prefixes)
         for prefix in excluded_prefixes:
             if prefix == '#default': prefix = ''
             self.excludedNss.append(self._nss[prefix])
     node = self.parentNode
     while node:
         if hasattr(node, 'excludedNss'):
             self.excludedNss = self.excludedNss + node.excludedNss
             break
         node = node.parentNode
     for attr in self.attributes.values():
         if attr.name == 'xmlns' or attr.name[:6] == 'xmlns:' or attr.namespaceURI == XSL_NAMESPACE:
             continue
         name = attr.name
         local_name = attr.localName
         prefix = attr.prefix
         uri = attr.namespaceURI
         if sheet.namespaceAliases[1].has_key(uri):
             name = sheet.namespaceAliases[0][prefix] + ':' + local_name
             uri = sheet.namespaceAliases[1][uri]
         self.__attrs.append((name, uri, AttributeValueTemplate.AttributeValueTemplate(attr.value)))
     self.fixupAliases()
     return
示例#15
0
def get_menu_left_from_sections(item_container, name, sections, url=None):
    if url == None:
      url = item_container.get_absolute_url()
    if name != '':
      url = url.replace('index.html', '') + name + '/index.html'
    s_name = 'Start'
    s_info = 'Startseite'
    text = u'0 | %s | %s | %s | %s | <b><i><span class="red">::</span></i></b>\n999\n' % \
           (s_name.lower(), url, s_name, s_info)
    objs = string.splitfields(sections, '\n')
    for obj in objs:
      obj = obj.strip()
      if obj != '':
        arr = string.splitfields(obj, '|')
        m_name = arr[0].strip()
        if len(arr) == 1:
          arr.append(m_name.lower())
          f_name = check_name(arr[1].strip(), True)
        else:
          f_name = arr[1].strip()
        n_pos = f_name.find('/')
        if n_pos > 0:
          menu = f_name[:n_pos]
        else:
          menu = f_name
        s_name = obj.strip()
        text += u'1 | %s | %s%s/index.html | %s\n' % \
                ( menu, url.replace('index.html', ''), f_name, m_name)
    return text
示例#16
0
def getScaleFromRaster(inputRaster, gp):
    ## Automatic Image Scale Calculation (if possible)
    ## The method used in the original Avenue script:
    ## 1. Get the extent of the Image. (This would be in Geographic Decimal Degrees) (theExtent) 
    ## 2. Get the Number of Columns of the image (theCols) 
    ## 3. Divide the Number of Columns / The Width of the image. (This would give Pixels per DD) {theRes = theCols / theExtent.GetWidth} 
    ## 4. Get the approximate # of pixels per meter by dividing {theScale = 111120/theRes } 
    ## 5. Then logic was applied to guess to populate the scale value in the MA Catalog. 
    ## If theScale <= 5
    ## Then 
    ##     theScale = theScale.Round & "M"
    ## Else 
    ##      theScale = theScale * 5000
    ## If (theScale >= 1000000) Then 
    ##      theScale = "1:" + (theScale/1000000).Round & "M"  df
    defaultScale = 50001
    minPossibleScale = 100 # assumes image will never be smaller than 100m
    maxPossibleScale = 40000000 # the circumference of the earth in meters

    try:

        if not IsGeographicSR(inputRaster, gp) : 
            msgNotWGS84 = msgDatasetNotGeographicUsingDefaultScale + str(defaultScale)
            raise Exception, msgNotWGS84
                
        description = gp.describe(inputRaster)
                
        if debug : gp.AddMessage("Extent = " + description.Extent)

        minX = float(string.splitfields(description.Extent," ")[0])
        maxX = float(string.splitfields(description.Extent," ")[2])
        widthDecDegrees = abs(maxX - minX)
        
        if debug : gp.AddMessage("Width (Decimal Degrees) = " + str(widthDecDegrees))

        pixelsPerDecDegrees = description.Width / widthDecDegrees            
        if debug : gp.AddMessage("Pixels per Decimal Degrees = " + str(pixelsPerDecDegrees))

        pixelsPerMeter = 111120 / pixelsPerDecDegrees           
        if debug : gp.AddMessage("Pixels per Meter = " + str(pixelsPerMeter))

        theScale = round(pixelsPerMeter * 5000.0)
        
        if (pixelsPerMeter < 5) : theScale = round(pixelsPerMeter)                
        if debug : gp.AddMessage("Calculated Scale = " + str(theScale))

        nScale = long(theScale)

        # check the calculated scale for bogus values
        if (nScale < minPossibleScale) or (nScale > maxPossibleScale) :
            raise Exception, msgCalculatedScaleIsOutOfBounds                

        return nScale
    
    except Exception, ErrorDesc:
        # Except block if automatic scale calculation can not be performed
        # Just return the default Scale
        gp.AddWarning(msgCouldNotCalculateScale + str(defaultScale) + " Image=" + inputRaster);
        gp.AddWarning(str(ErrorDesc))                
        return defaultScale
示例#17
0
def RCSDateToSeconds(date):
    import string, time
    (date_part, time_part) = string.split(date)
    (year, month, day) = string.splitfields(date_part, '/')
    (hour, minute, second) = string.splitfields(time_part, ':')
    tuple = (string.atoi(year), string.atoi(month), string.atoi(day),
             string.atoi(hour), string.atoi(minute), string.atoi(second),
             0, 0, -1)
    return time.mktime(tuple)
示例#18
0
	def variationGen(self):
		variationList = list()
		markerCount = len(string.splitfields(self.rawHeader, sep="^%"))
		for marker in range(1,markerCount,2):
			splitString = string.splitfields(self.rawHeader, sep="^%")
			splitString[marker] = ''.join(('^%',splitString[marker],'^%'))
			splitString = ''.join(splitString)
			variationList.append(splitString)
		return variationList
示例#19
0
def atof(str,func=string.atof):
    "Parses a string as a float according to the locale settings."
    #First, get rid of the grouping
    s=string.splitfields(str,localeconv()['thousands_sep'])
    str=string.join(s,"")
    #next, replace the decimal point with a dot
    s=string.splitfields(str,localeconv()['decimal_point'])
    str=string.join(s,'.')
    #finally, parse the string
    return func(str)
示例#20
0
def get_list(IPADDR):
    resp = urllib.urlopen("http://" + IPADDR + ":18080/pvr").read()
    resp = string.splitfields(resp, "\n")
    videoids = {"ids": {}}
    for line in resp:
        if string.find(line, '<tr><td class="n"><a href="') == 0:
            line = string.splitfields(line, '<tr><td class="n"><a href="')[1]
            if line[0] != ".":
                line = string.splitfields(line, "/")[0]
                videoids["ids"][line] = IPADDR
    return videoids
示例#21
0
def _get_relative_dir(dirname, relative_to):
    """Get relative path to `dirname' from `relative_to'."""
    dirpath = string.splitfields(dirname, os.sep)
    relative_to = string.splitfields(relative_to, os.sep)
    changed = 0
    while dirpath and relative_to and dirpath[0] == relative_to[0]:
        del dirpath[0]
        del relative_to[0]
        changed = changed + 1
    if changed < 2:
        return dirname
    dirpath = (len(relative_to) * [os.pardir]) + dirpath
    return string.joinfields(dirpath, os.sep)
def GatherLandmarks(repository, force = 0, read_cache = 1, write_cache = 0,
		    write_file = 0, print_output = 0, **kw):
    landmarks = kw["landmark"]
    # Figure out the outdir.
    if kw.has_key("outdir"):
	outdir = kw["outdir"]    
    # If there's a single root, then use it
    elif len(repository.dir_roots) == 1:
	outdir = repository.dir_roots[0].PathString()
    else:
	outdir = os.getcwd()
    if kw.has_key("outprefix"):
	outprefix = kw["outprefix"]
	outprefix = os.path.splitext(outprefix)[0]
    else:
	outprefix = "landmark-" + `int(time.time())`
    # outform is either "single" or "session"
    if kw.has_key("outform"):
	outform = kw.has_key["outform"]
    else:
	outform = "single"
    # Now, we attempt to infer from the landmark list
    # which are the relevant interesting classes.
    class_list = []
    postprocess_list = []
    for lm in landmarks:
	try:
	    [key, value] = string.splitfields(lm, "=")
	    l = string.splitfields(value, ":", 1)
	    value = l[0]
	    postprocess = None
	    if len(l) > 1:
		postprocess = l[1]		
	    c = GC_log_formatting.GetInterestingFormatClass(key, value)
	    if c is None:
		repository.io.ReportStatus("Don't recognize landmark %s=%s. Skipping." % (key, value))
	    else:
		class_list.append((c, key, value))
		postprocess_list.append(postprocess)
	except:
	    repository.io.ReportStatus("Encountered an error processing landmark %s. Skipping." % lm)
    kw["class_list"] = class_list
    kw["postprocess_list"] = postprocess_list
    kw["outform"] = outform
    kw["outprefix"] = outprefix
    kw["outdir"] = outdir
    kw["range"] = repository._InterpretRange(kw)
    apply(repository.ApplyOperation,
	  (GatherSessionLandmarks, force, read_cache, write_cache,
	   write_file, print_output), kw)
示例#23
0
文件: eud2gc.py 项目: Mortal/claws
def dElines2Dict(lElines):
	dAliases = {}
	for sEntry in lElines:
		if '"' in sEntry:
			lChunks = string.splitfields(sEntry, '"')
		else:
			lChunks = string.splitfields(sEntry, ' ')
		if lChunks[0] <> 'alias':
			print ('ignoring invalid line: %s' %sEntry)
		else:
			sAdresses = string.joinfields(lChunks[2:], ',')
			print ('Entry added: %s %s' %(lChunks[1],sEntry))
			dAliases[lChunks[1]]=sAdresses
	return dAliases
示例#24
0
def Divider(kalimat, max):
    s = string.splitfields(kalimat,'\n')
    #t = ""
    r = []
    for k in s:
      t = ""
      s1 = string.splitfields(k)
      for i in s1:
        if len(string.strip(t + " " + i)) > max:
          r.append(t)
          t = ""
        t = string.strip(t+" "+i)
      r.append(t)
    return r
示例#25
0
 def convert_date_time(s):
   cStart = 'value="'
   nLen = len(cStart)
   nStart = string.find(s, cStart)
   nEnd   = string.find(s, '"', nStart+nLen+1)
   d = s[nStart+nLen:nEnd]
   a = string.splitfields(d, ' ')
   arr = string.splitfields(a[0], '-')
   if len(arr) == 3 :
     dGerman = u'%s.%s.%s' % (arr[2],arr[1],arr[0])
   else :
     dGerman = d
   dGerman += ' ' + a[1][:a[1].rfind(':')]  # Sekunden abschneiden
   return s[:nStart+nLen] + dGerman + s[nEnd:]
示例#26
0
def get_periods_list( org_id, text):
  """ Erzeugt aus der Texteingabe der Zeitspannen eine Liste mit Endzeitpunkt
      Listenelement: [Startzeitpunkt, Benennung, Endzeitpunkt]
  """
  if text=="":
    text = get_default_settings( org_id)
  lines = string.splitfields(text,'\n')
  per_list0 = []
  for line in lines:
    array = string.splitfields(line,'|')
    if (len(array)==2):
      per_list0 += [[array[0].strip(), array[1].strip()]]
  ret = add_periods_endtime( per_list0)
  return ret
示例#27
0
文件: app.py 项目: hkfr/data
	def SetApplicationPaths(self):
		# Load the users/application paths
		new_path = []
		apppath=string.splitfields(win32ui.GetProfileVal('Python','Application Path',''),';')
		for path in apppath:
			if len(path)>0:
				new_path.append(win32ui.FullPath(path))
		for extra_num in range(1,11):
			apppath=string.splitfields(win32ui.GetProfileVal('Python','Application Path %d'%extra_num,''),';')
			if len(apppath) == 0:
				break
			for path in apppath:
				if len(path)>0:
					new_path.append(win32ui.FullPath(path))
		sys.path = new_path + sys.path
示例#28
0
def get_directory(f):
    """Get a directory in the form of a list of entries."""
    import string
    list = []
    while 1:
        line = f.readline()
        if not line:
            print '(Unexpected EOF from server)'
            break
        if line[-2:] == CRLF:
            line = line[:-2]
        elif line[-1:] in CRLF:
            line = line[:-1]
        if line == '.':
            break
        if not line:
            print '(Empty line from server)'
            continue
        gtype = line[0]
        parts = string.splitfields(line[1:], TAB)
        if len(parts) < 4:
            print '(Bad line from server:', `line`, ')'
            continue
        if len(parts) > 4:
            if parts[4:] != ['+']:
                print '(Extra info from server:',
                print parts[4:], ')'
        else:
            parts.append('')
        parts.insert(0, gtype)
        list.append(parts)
    return list
示例#29
0
def get_menu(selector, host, port):
    f = send_request(selector, host, port)
    list = []
    while 1:
        line = f.readline()
        if not line:
            print '(Unexpected EOF from server)'
            break
        if line[-2:] == CRLF:
            line = line[:-2]
        elif line[-1:] in CRLF:
            line = line[:-1]
        if line == '.':
            break
        if not line:
            print '(Empty line from server)'
            continue
        typechar = line[0]
        parts = string.splitfields(line[1:], TAB)
        if len(parts) < 4:
            print '(Bad line from server: %r)' % (line,)
            continue
        if len(parts) > 4:
            print '(Extra info from server: %r)' % (parts[4:],)
        parts.insert(0, typechar)
        list.append(parts)
    f.close()
    return list
示例#30
0
def readmailcapfile(fp):
    caps = {}
    while 1:
        line = fp.readline()
        if not line: break
        # Ignore comments and blank lines
        if line[0] == '#' or string.strip(line) == '':
            continue
        nextline = line
        # Join continuation lines
        while nextline[-2:] == '\\\n':
            nextline = fp.readline()
            if not nextline: nextline = '\n'
            line = line[:-2] + nextline
        # Parse the line
        key, fields = parseline(line)
        if not (key and fields):
            continue
        # Normalize the key
        types = string.splitfields(key, '/')
        for j in range(len(types)):
            types[j] = string.strip(types[j])
        key = string.lower(string.joinfields(types, '/'))
        # Update the database
        if caps.has_key(key):
            caps[key].append(fields)
        else:
            caps[key] = [fields]
    return caps
示例#31
0
 def readlines(self):
     lines = string.splitfields(self.read(), '\n')
     if not lines:
         return lines
     for i in range(len(lines) - 1):
         lines[i] = lines[i] + '\n'
     if not lines[-1]:
         del lines[-1]
     return lines
示例#32
0
文件: dates.py 项目: wilsonify/ppw
def parseTime(txt):
    "assumes hh:mm:ss or hh:mm"
    bits = map(string.atoi, string.splitfields(txt, timeseparator))
    if len(bits) <> 3:
        return 0
    hh = bits[0]
    nn = bits[1]
    ss = bits[2]
    return ss + (60 * nn) + (3600 * hh)
示例#33
0
    def do_set(self, args):
	fields = string.splitfields(args, ' ')
	key = fields[0]
	if len(fields) == 1:
	    value = 1
	else:
	    value = string.joinfields(fields[1:], ' ')
	self.values[key] = value
	print self.values
示例#34
0
def parse_cache_control(s):
    def parse_directive(s):
        i = string.find(s, '=')
        if i >= 0:
            return (s[:i], s[i + 1:])
        return (s, '')

    elts = string.splitfields(s, ',')
    return map(parse_directive, elts)
示例#35
0
def _getmounts():
	import commands, string
	mounts = []
	data = commands.getoutput('/etc/mount')
	lines = string.splitfields(data, '\n')
	for line in lines:
		words = string.split(line)
		if len(words) >= 3 and words[1] = 'on':
			mounts.append(words[2])
示例#36
0
 def parse_line(self, line):
     try:
         fields = string.splitfields(line, '\t')
         url = string.strip(fields[0])
         timestamp = string.atoi(string.strip(fields[1]))
         return (url, '', timestamp or now())
     except (ValueError, IndexError, TypeError):
         self._error(line)
         return None
示例#37
0
def sendportcmd(s, f, port):
	hostname = gethostname()
	hostaddr = gethostbyname(hostname)
	hbytes = string.splitfields(hostaddr, '.')
	pbytes = [`port/256`, `port%256`]
	bytes = hbytes + pbytes
	cmd = 'PORT ' + string.joinfields(bytes, ',')
	s.send(cmd + '\r\n')
	code = getreply(f)
示例#38
0
def addr2bin(addr):
    if type(addr) == type(0):
        return addr
    bytes = string.splitfields(addr, '.')
    if len(bytes) != 4: raise ValueError, 'bad IP address'
    n = 0
    for byte in bytes:
        n = n << 8 | string.atoi(byte)
    return n
示例#39
0
    def sendport(self, host, port):
        '''Send a PORT command with the current host and the given
		port number.
		'''
        hbytes = string.splitfields(host, '.')
        pbytes = [ ` port / 256 `, ` port % 256 `]
        bytes = hbytes + pbytes
        cmd = 'PORT ' + string.joinfields(bytes, ',')
        return self.voidcmd(cmd)
示例#40
0
def load_module(name, ty, theFile, fromMod=None):
    """Load module NAME, type TYPE, from file FILE.

    Optional arg fromMod indicated the module from which the load is being done
    - necessary for detecting import of __ from a package's __main__ module.

    Return the populated module object."""

    # Note: we mint and register intermediate package directories, as necessary
    
    # Determine packagepath extension:

    # Establish the module object in question:
    theMod = procure_module(name)
    nameTail = string.splitfields(name, '.')[-1]
    thePath = theFile.name

    if ty == PY_SOURCE:
	exec_into(theFile, theMod, theFile.name)

    elif ty == PY_COMPILED:
	pyc = open(theFile.name, 'rb').read()
	if pyc[0:4] != imp.get_magic():
	    raise ImportError, 'bad magic number: ' + theFile.name	# ===>
	code = marshal.loads(pyc[8:])
	exec_into(code, theMod, theFile.name)

    elif ty == C_EXTENSION:
	try:
	    theMod = imp.load_dynamic(nameTail, thePath, theFile)
	except:
	    # ?? Ok to embellish the error message?
	    raise sys.exc_type, ('%s (from %s)' %
				 (str(sys.exc_value), theFile.name))

    elif ty == PY_PACKAGE:
	# Load constituents:
	if (os.path.exists(thePath + '/' + PKG_MAIN_NM) and
	    # pkg has a __main__, and this import not already from __main__, so
	    # __main__ can 'import __', or even better, 'from __ import *'
	    ((theMod.__name__ != PKG_MAIN_NM) and (fromMod.__ == theMod))):
	    exec_into(thePath + '/' + PKG_MAIN_NM, theMod, theFile.name)
	else:
	    # ... or else recursively load constituent modules.
	    prospects = mod_prospects(thePath)
	    for item in prospects:
		theMod.__dict__[item] = import_module(item,
						      theMod.__dict__,
						      theMod.__dict__,
						      None,
						      theMod)
		
    else:
	raise ImportError, 'Unimplemented import type: %s' % ty		# ===>
	
    return theMod
示例#41
0
 def extractarglist(self, args):
     args = string.strip(args)
     if not args or args == "void":
         return []
     parts = map(string.strip, string.splitfields(args, ","))
     arglist = []
     for part in parts:
         arg = self.extractarg(part)
         arglist.append(arg)
     return arglist
示例#42
0
def get_video(IPADDR, VIDEOID, DIRECTORY, TEMPDIR, FFMPEG, FILENAME, DEBUG, TESTING, *ts):
    resp = urllib.urlopen('http://'+IPADDR+':18080/pvr/'+str(VIDEOID)+'/segs').read()
    final = string.splitfields(resp, '.ts')[:-1]
    final = string.splitfields(final[-1], '>')[-1]
    tmp = final
    while(tmp[0]) == '0':
        tmp = tmp[1:]
    final_int = eval(tmp)
    #temp_id = str(uuid.uuid4())+'-'
    temp_id = str(VIDEOID)+'-'
    counter = 1
    valid = 1
    concat = ''
    while valid:
        cmd = 'http://'+IPADDR+':18080/pvr/'+str(VIDEOID)+'/segs/'+string.zfill(counter,5)+'.ts'
        newfile = TEMPDIR+'/'+temp_id+string.zfill(counter,5)+'.ts'
        concat = concat + newfile+'|'
        if DEBUG: print '   - Retrieving '+cmd+ ' ('+str(int(float(counter)/float(final_int)*100.0))+'%)'
        urllib.urlretrieve(cmd, newfile)
        if string.zfill(counter,5) == final:
            valid = 0
        counter = counter + 1
        if TESTING and counter > 5:
            valid = 0 ## Only process first 5 segmant
    cmd = FFMPEG+' -y -i "concat:'+concat[:-1]+'" -bsf:a aac_adtstoasc -c copy "'+DIRECTORY+'/'+FILENAME+'.mp4"'
    if ts:
        cmd = FFMPEG+' -y -loglevel panic -i "concat:'+concat[:-1]+'" -c copy "'+DIRECTORY+'/'+FILENAME+'.ts"'
    if DEBUG: print cmd
    #os.system(cmd)
    subprocess.call(cmd)
    counter = 1
    valid = 1
    while valid:
        newfile = TEMPDIR+'/'+temp_id+string.zfill(counter,5)+'.ts'
        try:
            os.remove(newfile)
        except:
            ohwell = 1
            if DEBUG: print "Can't Delete " + newfile
        if string.zfill(counter,5) == final:
            valid = 0
        counter = counter + 1
    return 0
示例#43
0
 def __repr__(self):
     s = '(:' + self.type + '\n'
     for k, v in self.itemlist:
         v = str(v)
         if '\n' in v:
             lines = string.splitfields(v, '\n')
             v = string.joinfields(lines, '\n  ')
         s = s + '  :' + k + ' ' + v + '\n'
     s = s + ')'
     return s
示例#44
0
def sepa_date(date):
#
#       Separates dates that have been connected by 'and' (ie. between
#       m1/d1/y1 and m2/d2/y2).  Also strips off any leading or trailing
#       blanks.
#
        dates = string.splitfields(string.lower(date),'and')
        for i in range(len(dates)):
                dates[i] = string.strip(dates[i])
        return(dates)
示例#45
0
def expression(parmstring):
    eqMatch = eqRE.match(parmstring)
    if eqMatch:
        return 'defined (%s)' % eqMatch.group(2)
    inMatch = inRE.match(parmstring)
    if inMatch:
        words = string.splitfields(inMatch.group(2), ',')
        words = map(lambda word: 'defined (%s)' % string.strip(word), words)
        return string.joinfields(words, ' || ')
    raise InvalidConcomExpression, parmstring
示例#46
0
	def _readChunks(self, filename):
		#loads a doubletalk file. private utility, do not call
		# loads whole file
		f = open(filename,'r')
		bigstring = f.read() 
		f.close()
		chunks = string.splitfields(bigstring,'\n\n')
		if chunks[-1] == '':
			chunks = chunks[:-1]
		return chunks	
示例#47
0
 def setup(self):
     self.__dict__['_name'] = AttributeValueTemplate.AttributeValueTemplate(
         self.getAttributeNS(EMPTY_NAMESPACE, 'name'))
     self.__dict__[
         '_namespace'] = AttributeValueTemplate.AttributeValueTemplate(
             self.getAttributeNS(EMPTY_NAMESPACE, 'namespace'))
     self.__dict__['_useAttributeSets'] = string.splitfields(
         self.getAttributeNS(EMPTY_NAMESPACE, 'use-attribute-sets'))
     self.__dict__['_nss'] = xml.dom.ext.GetAllNs(self)
     return
示例#48
0
 def concomExpression(self, parmstring):
     match = self.eqRE.match(parmstring)
     if match:
         return 'defined (%s)' % match.group(2)
     match = self.inRE.match(parmstring)
     if match:
         words = splitfields(match.group(2), ',')
         words = map(lambda word: 'defined (%s)' % strip(word), words)
         return joinfields(words, ' || ')
     raise 'Invalid Concom expression', parmstring
示例#49
0
 def do_update(self):
     format = self.format_list[self.format.get()][1]
     view = self.view_list[self.view.get()][1]
     s = commands.getoutput('ps %s %s' % (view, format))
     list = splitfields(s, '\n')
     self.header.set(list[0] + '          ')
     del list[0]
     self.frame.list.delete(0, AtEnd())
     for line in list:
         self.frame.list.insert(0, line)
示例#50
0
    def close_inforef(self):
	text = self.collectsavings()
	args = string.splitfields(text, ',')
	n = len(args)
	for i in range(n):
	    args[i] = string.strip(args[i])
	while len(args) < 3: args.append('')
	node = args[0]
	file = args[2]
	self.write('`', file, '\', node `', node, '\'')
示例#51
0
def shrubOptions(options):
    if options.config:
        options.includeDict = readConfig(options.config, 'includes')
    else:
        options.includeDict = {}
    optIncludes = options.include
    includes = []
    for include in optIncludes:
        includes = includes + splitfields(include, ',')
    options.include = includes
示例#52
0
 def handle_literal(self, text):
     lines = string.splitfields(text, '\n')
     for i in range(1, len(lines)):
         lines[i] = string.expandtabs(lines[i], 8)
     for line in lines[:-1]:
         self.fmt.addword(line, 0)
         self.fmt.flush()
         self.fmt.nospace = 0
     for line in lines[-1:]:
         self.fmt.addword(line, 0)
示例#53
0
 def _checkIP(self, ip):
     try:
         [a, b, c, d] = map(string.atoi, string.splitfields(ip, '.'))
         x = range(0, 256)
         for i in (a, b, c, d):
             if i not in x:
                 return False
         return True
     except:
         return False
示例#54
0
 def send_literal_data(self, data):
     if not data:
         return
     lines = string.splitfields(data, '\n')
     text = string.expandtabs(lines[0])
     for l in lines[1:]:
         self.OutputLine(text, 1)
         text = string.expandtabs(l)
     self.OutputLine(text, 0)
     self.atbreak = 0
示例#55
0
 def push_string(self, data):
     lines = string.splitfields(data, '\n')
     linecnt = len(lines) - 1
     for line in lines:
         # do flowing text
         self.push_string_flowing(line)
         # for every line but the last, put a hard newline after it
         if linecnt:
             self.push_hard_newline()
         linecnt = linecnt - 1
示例#56
0
 def SetApplicationPaths(self):
     # Load the users/application paths
     new_path = []
     apppath = string.splitfields(
         win32ui.GetProfileVal('Python', 'Application Path', ''), ';')
     for path in apppath:
         if len(path) > 0:
             new_path.append(win32ui.FullPath(path))
     for extra_num in range(1, 11):
         apppath = string.splitfields(
             win32ui.GetProfileVal('Python',
                                   'Application Path %d' % extra_num, ''),
             ';')
         if len(apppath) == 0:
             break
         for path in apppath:
             if len(path) > 0:
                 new_path.append(win32ui.FullPath(path))
     sys.path = new_path + sys.path
示例#57
0
 def __repr__(self):
     s = '(\n'
     for item in self.list:
         item = str(item)
         if '\n' in item:
             lines = string.splitfields(item, '\n')
             item = string.joinfields(lines, '\n  ')
         s = s + '  ' + item + '\n'
     s = s + ')'
     return s
示例#58
0
def getUserName():
    try:
	import os, pwd, string
    except ImportError:
	return 'unknown user'
    pwd_entry = pwd.getpwuid(os.getuid())
    name = string.strip(string.splitfields(pwd_entry[4], ',')[0])
    if name == '':
	name = pwd_entry[0]
    return name
示例#59
0
 def extract_android_module(self):
     list_files = os.walk(self.jad_dir)
     self.android_modules = dict()
     for root, dirs, files in list_files:
         for f in files:
             for line in open(os.path.join(root, f)):
                 line = line.strip('\n')
                 head = string.strip(line, ' ')[0:6]
                 if head == 'import':
                     package = string.splitfields(line.strip(';'), ' ')[1]
                     if 'android' in package:
                         module = string.splitfields(package, '.')[-1]
                         if module != '*' and module not in self.ignore_list:
                             file_key = os.path.join(root, f)
                             module_list = self.android_modules.get(
                                 file_key) or []
                             module_list.append(module)
                             self.android_modules[file_key] = module_list
     return self.android_modules
示例#60
0
 def __init__(self, typenum, fp):
     # start of packet and type have already been read
     data = fp.read(DATASIZE * 2)
     self.pktlen, self.timestamp = struct.unpack('2l', data)
     self.pktlen = ulong(self.pktlen)
     self.timestamp = ulong(self.timestamp)
     self.msgtype = typenum
     self.pkthandler = string.joinfields(
         map(string.capitalize,
             string.splitfields(Types[typenum], '_')[1:]), '')