Example #1
0
  def constructmenuitemstring(self,items,keybindingsdict):
    def emptystring(num):
      if num<=0:
        return ""
      return " "+emptystring(num-1)
    
    if len(items)==1:
      return items[0]

    keykey=string.lstrip(string.rstrip(items[1]))
    if keykey not in keybindingsdict:
      ret = string.rstrip(items[0])
      commands[string.lstrip(ret)] = string.lstrip(items[1])
      return ret
    
    key=keybindingsdict[keykey]
    qualifier=""
    
    for item in key[1]:
      qualifier+=get_key_name(item)+" + "

      #print "items[0]",items[0],len(items[0]),qualifier
    stripped0 = string.rstrip(items[0])
    ret = stripped0 + emptystring(41-len(stripped0)) + qualifier + get_key_name(key[0][0])
    commands[string.lstrip(ret)] = string.lstrip(items[1])
    return ret
Example #2
0
    def getLabel(self, partition):
        label = None
        fd = self.openPartition(partition)

        # valid block sizes in reiserfs are 512 - 8192, powers of 2
        # we put 4096 first, since it's the default
        # reiserfs superblock occupies either the 2nd or 16th block
        for blksize in (4096, 512, 1024, 2048, 8192):
            for start in (blksize, (blksize*16)):
                try:
                    os.lseek(fd, start, 0)
                    # read 120 bytes to get s_magic and s_label
                    buf = os.read(fd, 120)

                    # see if this block is the superblock
                    # this reads reiserfs_super_block_v1.s_magic as defined
                    # in include/reiserfs_fs.h in the reiserfsprogs source
                    m = string.rstrip(buf[52:61], "\0x00")
                    if m == "ReIsErFs" or m == "ReIsEr2Fs" or m == "ReIsEr3Fs":
                        # this reads reiserfs_super_block.s_label as
                        # defined in include/reiserfs_fs.h
                        label = string.rstrip(buf[100:116], "\0x00")
                        os.close(fd)
                        return label
                except OSError, e:
                    # [Error 22] probably means we're trying to read an
                    # extended partition. 
                    e = "error reading reiserfs label on %s: %s" %(partition.getPath(), e)
                    raise YaliException, e
Example #3
0
def read(category, disc_id, server_url=default_server, 
         user=default_user, host=hostname, client_name=name,
         client_version=version):

    url = "%s?cmd=cddb+read+%s+%s&hello=%s+%s+%s+%s&proto=%i" % \
          (server_url, category, disc_id, user, host, client_name,
           client_version, proto)

    response = urllib.urlopen(url)
    
    header = string.split(string.rstrip(response.readline()), ' ', 3)

    header[0] = string.atoi(header[0])
    if header[0] == 210 or header[0] == 417: # success or access denied
        reply = []

        for line in response.readlines():
            line = string.rstrip(line)

            if line == '.':
                break;

            line = string.replace(line, r'\t', "\t")
            line = string.replace(line, r'\n', "\n")
            line = string.replace(line, r'\\', "\\")

            reply.append(line)

        if header[0] == 210:                # success, parse the reply
            return [ header[0], parse_read_reply(reply) ]
        else:                                # access denied. :(
            return [ header[0], reply ]
    else:
        return [ header[0], None ]
def main():
    addr = {}

    if len(sys.argv) != 2:
        print "Usage: {0} ipaddress".format(sys.argv[0])
        sys.exit(1)

    ctx = getdns.context_create()
    addr["address_data"] = sys.argv[1]
    if string.find(sys.argv[1], ":") != -1:
        addr["address_type"] = "IPv6"
    elif string.find(sys.argv[1], ".") != 1:
        addr["address_type"] = "IPv4"
    else:
        print "{0}: undiscernable address type".format(sys.argv[1])
        sys.exit(1)
    results = getdns.hostname(ctx, address=addr)
    if results["status"] == getdns.GETDNS_RESPSTATUS_GOOD:
        print "Hostnames:"
        for responses in results["replies_tree"]:
            for ans in responses["answer"]:
                print string.rstrip(ans["rdata"]["ptrdname"], ".")

    if results["status"] == getdns.GETDNS_RESPSTATUS_NO_NAME:
        print "{0} not found".format(sys.argv[1])
Example #5
0
 def read( self, faFileHandler ):
     line = faFileHandler.readline()
     if line == "":
         self.header = None
         self.sequence = None
         return
     while line == "\n":
         line = faFileHandler.readline()
     if line[0] == '>':
         self.header = string.rstrip(line[1:])
     else:
         print "error, line is",string.rstrip(line)
         return
     line = " "
     seq = cStringIO.StringIO()
     while line:
         prev_pos = faFileHandler.tell()
         line = faFileHandler.readline()
         if line == "":
             break
         if line[0] == '>':
             faFileHandler.seek( prev_pos )
             break
         seq.write( string.rstrip(line) )
     self.sequence = seq.getvalue()
Example #6
0
def colorize(line):
	orig_line = line
	try:
		time = get_first_bracket(line)
		line = remove_first_bracket(line)

		found = 0
		for log in log_color.keys():
			if string.find(line, log) >= 0:
				found = 1
				line = remove_first_bracket(line)
				line = string.strip(line)
				line = string.split(line, ' ',2) ## XXX: magic number 2
				line[filename] = GREEN + line[filename] + CLEAR
				line[message] = log_color[log] + line[message] + CLEAR
				break

		if not found:
			return string.rstrip(orig_line)

		if show_time:
			line.insert(0,time)
	except:
		return string.rstrip(orig_line)

	return string.join(line, ' ')
Example #7
0
	def include(self, domain, initialstate, goalstate, plan, success, shouldIWrite=True):
		domain = string.rstrip(domain, '\n')
		initialstate = string.rstrip(initialstate, '\n')
		goalstate = string.rstrip(goalstate, '\n')
		#try:
		if shouldIWrite:
			try:
				#print 'PLAN: ', plan
				D = open('cache_D'+str(self.availableId)+'.py', 'w')
				D.write(domain)
				D.close()
				I = open('cache_I'+str(self.availableId)+'.xml', 'w')
				I.write(initialstate)
				I.close()
				G = open('cache_G'+str(self.availableId)+'.py', 'w')
				G.write(goalstate)
				G.close()
				P = open('cache_plan'+str(self.availableId)+'.agglp', 'w')
				if success: P.write(plan)
				else: P.write('fail')
				P.close()
			except:
				print 'Can\'t write cache'
				traceback.print_exc()
				sys.exit(-1)
		self.availableId += 1
		md = md5.new()
		md.update(domain+initialstate+goalstate)
		checksum = md.hexdigest()
		print 'Including planning context with checksum', checksum
		try:
			self.data[checksum].append((domain, initialstate, goalstate, plan, success))
		except:
			self.data[checksum] = []
			self.data[checksum].append((domain, initialstate, goalstate, plan, success))
Example #8
0
 def create_session(self, request, email, ssid, expiration):
   """
   Encrypt parameters, set valid in DB, set cookie on client
   """
   account = memcache_db.get_entity(email, "Accounts")
   if account != None:
     update_fields = {"cookieKey" : ssid}
     memcache_db.update_fields(email, "Accounts", update_fields)
   
     email_enc = encryption.des_encrypt_str(email)
     ssid_enc = encryption.des_encrypt_str(ssid)
     exp_enc = encryption.des_encrypt_str(expiration)
     
     import base64
     import string
     email_encoded = string.rstrip(base64.encodestring(email_enc), "\n")
     ssid_encoded = string.rstrip(base64.encodestring(ssid_enc), "\n")
     exp_encoded = string.rstrip(base64.encodestring(exp_enc), "\n")
 
     # the email will be set as the key so we can use it to look up in the DB
     request.response.headers.add_header("Set-Cookie", WEB_ADMIN_PARAMS.COOKIE_EMAIL_PARAM + "=" + email_encoded)
     request.response.headers.add_header("Set-Cookie", WEB_ADMIN_PARAMS.COOKIE_KEY_PARAM + "=" + ssid_encoded)
     request.response.headers.add_header("Set-Cookie", WEB_ADMIN_PARAMS.COOKIE_EXPIRATION + "=" + exp_encoded)
     
     """ Create a new session object and return it """
     self.email = email
     self.ssid = ssid
     self.expiration = expiration
     self.account = account
   
     return self
   else:
     return None
Example #9
0
	def get_sender(self, ignore_header=""):
		"""
		Returns a tuple in the format (sender_name, sender_email). Some processing is
		done to treat X-Primary-Address/Resent-From/Reply-To/From. Headers in "ignore_header"
		are not checked.
		"""

		headers = ["X-Primary-Address", "Resent-From", "Reply-To", "From"];

		## Remove unwanted headers
		def fn_remove(x, y = ignore_header): return(x != y)
		headers = filter(fn_remove, headers)

		sender_name = ''
		sender_mail = ''

		for hdr in headers:
			(sender_name, sender_mail) = self.msg.getaddr(hdr)

			if sender_name == None: sender_name = ''
			if sender_mail == None: sender_mail = ''

			## Decode sender name and return if found
			if sender_mail:
				if sender_name:
                                       sender_name = decode_header(sender_name)
                                       sender_name = sender_name[0][0]
				break

		return(string.rstrip(sender_name), string.rstrip(sender_mail))
def getallsubs(content, language, search_string, season, episode):
    i = 0
    for matches in re.finditer(subtitle_pattern, content, re.IGNORECASE | re.DOTALL):
        i = i + 1
        title_found = unescape(string.rstrip(matches.group(2)))
        log( __name__ , title_found )
        log( __name__ , search_string )
        if string.find(string.lower(title_found),string.lower(search_string)) > -1:
            subtitle_id    = matches.group(1)
            year_found     = matches.group(3)
            season_episode_found = string.rstrip(matches.group(4))
            filename = title_found
            languageshort = toOpenSubtitles_two(language)
            match = re.match(subinfo_pattern, matches.group(5), re.IGNORECASE | re.DOTALL)
            if match:
                description = match.group(1)
                filename = filename + ' ' + description
            if len(season) > 0:
                season_episode = 'Sezona' + season + ' Epizoda' + episode
                if season_episode == season_episode_found:
                    subtitles_list.append({'rating': '0', 'sync': False, 'filename': filename, 'subtitle_id': subtitle_id, 'language_flag': 'flags/' + languageshort+ '.gif', 'language_name': language})
                    log( __name__ ,"%s Subtitles found: %s (id = %s)" % (debug_pretext, filename, subtitle_id))
            else:
                if len(season_episode_found) == 0:
                    subtitles_list.append({'rating': '0', 'sync': False, 'filename': filename, 'subtitle_id': subtitle_id, 'language_flag': 'flags/' + languageshort+ '.gif', 'language_name': language})
                    log( __name__ ,"%s Subtitles found: %s (id = %s)" % (debug_pretext, filename, subtitle_id))
Example #11
0
def build_para(doc, parent, start, i):
    children = parent.childNodes
    after = start + 1
    have_last = 0
    BREAK_ELEMENTS = PARA_LEVEL_ELEMENTS + RECURSE_INTO_PARA_CONTAINERS
    # Collect all children until \n\n+ is found in a text node or a
    # member of BREAK_ELEMENTS is found.
    for j in range(start, i):
        after = j + 1
        child = children[j]
        nodeType = child.nodeType
        if nodeType == ELEMENT:
            if child.get_tagName() in BREAK_ELEMENTS:
                after = j
                break
        elif nodeType == TEXT:
            pos = string.find(child.data, "\n\n")
            if pos == 0:
                after = j
                break
            if pos >= 1:
                child.splitText(pos)
                break
    else:
        have_last = 1
    if (start + 1) > after:
        raise ConversionError(
            "build_para() could not identify content to turn into a paragraph")
    if children[after - 1].nodeType == TEXT:
        # we may need to split off trailing white space:
        child = children[after - 1]
        data = child.data
        if string.rstrip(data) != data:
            have_last = 0
            child.splitText(len(string.rstrip(data)))
    para = doc.createElement(PARA_ELEMENT)
    prev = None
    indexes = range(start, after)
    indexes.reverse()
    for j in indexes:
        node = parent.childNodes[j]
        parent.removeChild(node)
        para.insertBefore(node, prev)
        prev = node
    if have_last:
        parent.appendChild(para)
        parent.appendChild(doc.createTextNode("\n\n"))
        return len(parent.childNodes)
    else:
        nextnode = parent.childNodes[start]
        if nextnode.nodeType == TEXT:
            if nextnode.data and nextnode.data[0] != "\n":
                nextnode.data = "\n" + nextnode.data
        else:
            newnode = doc.createTextNode("\n")
            parent.insertBefore(newnode, nextnode)
            nextnode = newnode
            start = start + 1
        parent.insertBefore(para, nextnode)
        return start + 1
    def on_afMoleculeAddPB_clicked(self):

        molecule = str(self.afMoleculeLE.text())
        molecule = string.rstrip(molecule)
        rows = self.afTable.rowCount()
        if molecule == "":
            return

        # check if molecule with this name already exist               
        moleculeAlreadyExists = False
        for rowId in range(rows):
            name = str(self.afTable.item(rowId, 0).text())
            name = string.rstrip(name)
            if name == molecule:
                moleculeAlreadyExists = True
                break

        if moleculeAlreadyExists:
            QMessageBox.warning(self, "Molecule Name Already Exists",
                                "Molecule name already exist. Please choose different name", QMessageBox.Ok)
            return

        self.afTable.insertRow(rows)
        moleculeItem = QTableWidgetItem(molecule)
        self.afTable.setItem(rows, 0, moleculeItem)

        # reset molecule entry line
        self.afMoleculeLE.setText("")
        return
Example #13
0
def reformat_paragraph(data, limit=70):
    lines = string.split(data, "\n")
    i = 0
    n = len(lines)
    while i < n and is_all_white(lines[i]):
        i = i+1
    if i >= n:
        return data
    indent1 = get_indent(lines[i])
    if i+1 < n and not is_all_white(lines[i+1]):
        indent2 = get_indent(lines[i+1])
    else:
        indent2 = indent1
    new = lines[:i]
    partial = indent1
    while i < n and not is_all_white(lines[i]):
        # XXX Should take double space after period (etc.) into account
        words = re.split("(\s+)", lines[i])
        for j in range(0, len(words), 2):
            word = words[j]
            if not word:
                continue # Can happen when line ends in whitespace
            if len(string.expandtabs(partial + word)) > limit and \
               partial != indent1:
                new.append(string.rstrip(partial))
                partial = indent2
            partial = partial + word + " "
            if j+1 < len(words) and words[j+1] != " ":
                partial = partial + " "
        i = i+1
    new.append(string.rstrip(partial))
    # XXX Should reformat remaining paragraphs as well
    new.extend(lines[i:])
    return string.join(new, "\n")
Example #14
0
def get_url_contents(url):
    global lns
    lns = []
    url_comps = urlparse(url)
    if (url_comps[0] == "file"):
	f = open(url_comps[2])
	ln = f.readline()
	while ln:
	    lns.append(string.rstrip(ln))
	    ln = f.readline()
	f.close()
    elif (url_comps[0] == "ftp"):
	def ftp_line(ln):
	    lns.append(ln)
	h = ftplib.FTP(url_comps[1])
	h.login()
	i = string.rfind(url_comps[2], '/')
	if (i >= 0):
	    h.cwd(url_comps[2][:i])
	    h.retrlines("RETR "+url_comps[2][i+1:], ftp_line)
	else:
	    h.retrlines("RETR "+url_comps[2], ftp_line)
	h.close()
    elif (url_comps[0] == "http"):
	h = httplib.HTTP(url_comps[1])
	h.putrequest('GET', url_comps[2])
	h.putheader('Accept', 'text/html')
	h.putheader('Accept', 'text/plain')
	h.endheaders()
	errcode, errmsg, headers = h.getreply()
	# HTTP/1.1 replies seem to generate an errorcode of -1, so try
	# to handle this case.  This may simply be a manifestation of
	# a broken Python 1.4 httplib module.  This bug has been fixed
	# with Python version 1.5.
	version = sys.version[0:3]
	if ((version < "1.5") and (errcode == -1)):
	    try:
		real_errcode = string.atoi(string.split(errmsg)[1])
	    except ValueError:
		real_errcode = -1 # yes, it really is bogus :-/
	    sys.stderr.write("%d" % (real_errcode)) # Should be 200
	else:
	    sys.stderr.write("%d" % (errcode)) # Should be 200
            if (errcode == 200):
                f = h.getfile()
                ln = f.readline()
                # once again, try to compensate for broken behavior on HTTP/1.1
                # by eating the header lines which would otherwise show up in
                # the data.  This bug has been fixed with Python version 1.5.
                if ((version < "1.5") and (errcode == -1) and (real_errcode <> -1)):
                    while ((ln) and
                           ((len(ln) > 2) or
                            (ln[0] <> "\r") or (ln[-1] <> "\n"))):
                        ln = f.readline()
                while ln:
                    lns.append(string.rstrip(ln)) # Get the raw HTML
                    ln = f.readline()
                f.close()
    return lns
def toHexString(bytes=[], format=0):
    """Returns an hex string representing bytes

        bytes:  a list of bytes to stringify,
                    e.g. [59, 22, 148, 32, 2, 1, 0, 0, 13]
        format: a logical OR of
                COMMA: add a comma between bytes
                HEX: add the 0x chars before bytes
                UPPERCASE: use 0X before bytes (need HEX)
                PACK: remove blanks

        example:
        bytes = [ 0x3B, 0x65, 0x00, 0x00, 0x9C, 0x11, 0x01, 0x01, 0x03 ]

        toHexString(bytes) returns  3B 65 00 00 9C 11 01 01 03

        toHexString(bytes, COMMA) returns  3B, 65, 00, 00, 9C, 11, 01, 01, 03

        toHexString(bytes, HEX) returns
            0x3B 0x65 0x00 0x00 0x9C 0x11 0x01 0x01 0x03

        toHexString(bytes, HEX | COMMA) returns
            0x3B, 0x65, 0x00, 0x00, 0x9C, 0x11, 0x01, 0x01, 0x03

        toHexString(bytes, PACK) returns  3B6500009C11010103

        toHexString(bytes, HEX | UPPERCASE) returns
            0X3B 0X65 0X00 0X00 0X9C 0X11 0X01 0X01 0X03

        toHexString(bytes, HEX | UPPERCASE | COMMA) returns
            0X3B, 0X65, 0X00, 0X00, 0X9C, 0X11, 0X01, 0X01, 0X03
    """

    from string import rstrip

    for byte in tuple(bytes):
        pass

    if type(bytes) is not list:
        raise TypeError('not a list of bytes')

    if bytes == None or bytes == []:
        return ""
    else:
        pformat = "%-0.2X"
        if COMMA & format:
            pformat = pformat + ","
        pformat = pformat + " "
        if PACK & format:
            pformat = rstrip(pformat)
        if HEX & format:
            if UPPERCASE & format:
                pformat = "0X" + pformat
            else:
                pformat = "0x" + pformat
        return rstrip(rstrip(
                reduce(lambda a, b: a + pformat % ((b + 256) % 256),
                        [""] + bytes)), ',')
Example #16
0
def query(track_info, server_url=default_server,
	  user=default_user, host=hostname, client_name=name,
          client_version=version, trying=0):

    disc_id = track_info[0]
    num_tracks = track_info[1]

    query_str = (('%08lx %d ') % (long(disc_id), num_tracks))

    for i in track_info[2:]:
	query_str = query_str + ('%d ' % i)
	
    query_str = urllib.quote_plus(string.rstrip(query_str))

    url = "%s?cmd=cddb+query+%s&hello=%s+%s+%s+%s&proto=%i" % \
	  (server_url, query_str, user, host, client_name,
           client_version, proto)

    response = urllib.urlopen(url)
    
    # Four elements in header: status, category, disc-id, title
    header = string.split(string.rstrip(response.readline()), ' ', 3)

    try:
        header[0] = string.atoi(header[0])
    except:
        if trying > 10:
            return [ 900, None ]
        return query(track_info, default_server,
                     default_user, hostname, name, version, trying+1)

    if header[0] == 200:		# OK
	result = { 'category': header[1], 'disc_id': header[2], 'title':
		   header[3] }

	return [ header[0], result ]

    elif header[0] == 211 or header[0] == 210: # multiple matches
	result = []

	for line in response.readlines():
	    line = string.rstrip(line)

	    if line == '.':		# end of matches
		break
					# otherwise:
					# split into 3 pieces, not 4
					# (thanks to bgp for the fix!)
	    match = string.split(line, ' ', 2)

	    result.append({ 'category': match[0], 'disc_id': match[1], 'title':
			    match[2] })

	return [ header[0], result ]

    else:
	return [ header[0], None ]
Example #17
0
	def setName(self, fileName):
		self.name = fileName
		if (string.count(self.name,"gdc")):
			fileName = string.lstrip(fileName,"gdc")
		if (string.count(self.name,"dat")):
			fileName = string.rstrip(fileName,".dat")
		if (string.count(self.name,".000")):
			fileName = string.rstrip(fileName,".000")
		self.runNumber = int(fileName)
Example #18
0
def BrowseDefaultDir(root_window,calcon_in,DirNameVar,):
    startdir = string.rstrip(calcon_in['target_dir'])
    startdir = string.rstrip(startdir,"\\") #          Strip off the backslash
    DirName=tkFileDialog.askdirectory(parent=root_window,initialdir=startdir, \
        title = 'Select Default Directory')
#    endloc=string.rfind(DirName,'/')+1   # 
    DirName = string.replace(DirName, "/", "\\")
    DirNameVar.set(DirName)
    calcon_in['target_dir'] = DirName+"\\"            # Restore the backslash	
Example #19
0
def editLine(filename, username, increment):

    setFlag = 0

    lineCount = getLineCount(filename)
    print(lineCount, end="")

    dataFile = open(filename, "r")
    for line in dataFile:
        lineScan = string.rstrip(line)
        lineScan = string.split(lineScan)
        if lineScan[0] == username:
            setFlag = 1
    dataFile.close()

    if setFlag == 1:
        """Search for the line in question"""
        for line in fileinput.input("witch", inplace=1):

            """Format the line so the whole username is line[0]"""
            lineTemp = string.rstrip(line)
            lineTemp = string.split(lineTemp)

            if username == lineTemp[0]:
                newVal = int(lineTemp[1]) + increment
                if newVal < 0:
                    newVal = 0

                if fileinput.filelineno() == lineCount:
                    newLine = (
                        lineTemp[0]
                        + " "
                        + str(newVal)
                        + " "
                        + datetime.strftime(getCurrentDateTime(), "%d/%m/%Y/%H:%M:%S")
                    )
                else:
                    newLine = (
                        lineTemp[0]
                        + " "
                        + str(newVal)
                        + " "
                        + datetime.strftime(getCurrentDateTime(), "%d/%m/%Y/%H:%M:%S")
                        + "\n"
                    )
                line = line.replace(line, newLine)
                print(line, end="")
            else:
                print(line, end="")

    else:
        createdLine = (
            "\n" + username + " " + str(increment) + " " + datetime.strftime(getCurrentDateTime(), "%d/%m/%Y/%H:%M:%S")
        )
        dataFile = open(filename, "a")
        dataFile.write(createdLine)
Example #20
0
    def closeMeeting(self, meetingID):

        # JSON Response BEGIN (error message)
        jsonError = jsonObject()
        jsonError.clearResponse()
        jsonError.add('result','false')
        jsonError.add('method','closeMeeting')
        jsonError.add('meetingID',meetingID)
        jsonError.add('error','7404')
        #JSON Response END

        # Do meetingID-related input verification
        if (meetingID == ''):
            return jsonError.jsonResponse()

        if sys.platform.startswith('win'):
            archiveDirectory = string.rstrip(osconfig.slidedeckArchive(), '\\')
        else:
            archiveDirectory = string.rstrip(osconfig.slidedeckArchive(), '/')

        localMeetingDir = os.path.join(archiveDirectory, meetingID)

        if not os.path.isdir(localMeetingDir):
            return jsonError.jsonResponse()

        docList = os.listdir(localMeetingDir)

        if len(docList) == 0:
            return jsonError.jsonResponse()

        for i in range (0, len(docList)):
            if docList[i] == 'Preloaded': # we don't delete preloaded directories
                continue
            docPath = os.path.join(localMeetingDir, docList[i])
            try:
                shutil.rmtree(docPath)
            except:
                pass

        docList = os.listdir(localMeetingDir)
        if len(docList) == 0:
            try:
                os.rmdir(localMeetingDir)
            except:
                pass

        # JSON Response BEGIN
        jsonSuccess = jsonObject()
        jsonSuccess.clearResponse()
        jsonSuccess.add('result','true')
        jsonSuccess.add('method','closeMeeting')
        jsonSuccess.add('meetingID',meetingID)
        jsonSuccess.add('error', '7200')
        #JSON Response END

        return jsonSuccess.jsonResponse()
Example #21
0
File: plugin.py Project: dag/makfa
 def jbofihe(self, irc, msg, args, text):
     """<text>
     
     Analyze text with jbofihe, falls back on cmafihe for
     ungrammatical text.
     """
     pipe = Popen(['jbofihe', '-ie', '-x'], stdin=PIPE, stdout=PIPE)
     result = rstrip(pipe.communicate(text)[0])
     if pipe.returncode > 0:
         pipe = Popen('cmafihe', stdin=PIPE, stdout=PIPE)
         result = "not grammatical: " + rstrip(pipe.communicate(text)[0])
     irc.reply(result)
Example #22
0
def food(type, page):

	tree = html.fromstring(page.content)	
	counter = 1
	typearr = []

	while True:
		path = '//div[@class="mensa_day mensa_day_speise {}"]//tr[@class="mensa_day_speise_row"][{}]//td[@class="mensa_day_speise_'
		path = path.format(type, str(counter))
		
		pathyfy = lambda x, y : tree.xpath(path+x+'"]'+y)

		name = pathyfy('name', '/text()')

		if name == []:
			break

		#init food dict
		food = {'Name': None, 'Type': type, 'Price': None, 'Ampel': None, 'Veg': None, 'Bio': None, 'Klimaessen': None}

		#get name
		if name[1] == '        ':
			food['Name'] = string.rstrip(name[2], None)
		else:
			food['Name'] = string.strip(name[1], None)


		price = pathyfy('preis', '/text()')
		
		food['Price'] = string.rstrip(price[0], None)

		#get Ampel
		food['Ampel'] = pathyfy('name', '//a/@href')[0]

		#Vegetarian/Vegan option
		if pathyfy('name', '//img[@alt="Vegan"]') != []:
			food['Veg'] = 'vegan'
		elif pathyfy('name', '//img[@alt="Vegetarisch"]') != []:
			food['Veg'] = 'vegetarisch'

		#Bio option
		if pathyfy('name', '//img[@alt="Bio"]') != []:
			food['Bio'] = True

		#Klimaessen
		if pathyfy('name', '//img[@alt="Klimaessen"]') != []:
			food['Klimaessen'] = True

		typearr.append(food)
		counter += 1	

	return typearr
Example #23
0
    def GetFileName(type, filename):
        if type == "open":
            fn = qt.QFileDialog.getOpenFileName(qt.QString.null, qt.QString.null, qt.QMainWindow())
        else:
            fn = qt.QFileDialog.getSaveFileName(qt.QString.null, qt.QString.null, qt.QMainWindow())

        file = open(filename, "w")
        if not fn.isNull():
            print filename
            print fn.ascii()
            print string.rstrip(fn.ascii())
            file.write(string.rstrip(fn.ascii()))
        file.close()
Example #24
0
def main(argv):
    "Parse command line and do it..."
    import os, getopt, string
    
    queries = []
    url = os.getenv("SQLCLURL",default_url)
    fmt = default_fmt
    writefirst = 1
    verbose = 0
    
    # Parse command line
    try:
        optlist, args = getopt.getopt(argv[1:],'s:f:q:vlh?')
    except getopt.error as e:
        usage(1,e)
        
    for o,a in optlist:
        if     o=='-s': url = a
        elif o=='-f': fmt = a
        elif o=='-q': queries.append(a)
        elif o=='-l': writefirst = 0
        elif o=='-v': verbose += 1
        else: usage(0)
        
    if fmt not in formats:
        usage(1,'Wrong format!')

    # Enqueue queries in files
    for fname in args:
        try:
            queries.append(open(fname).read())
        except IOError as e:
            usage(1,e)

    # Run all queries sequentially
    for qry in queries:
        ofp = sys.stdout
        if verbose:
            write_header(ofp,'#',url,qry)
        file = query(qry,url,fmt)
        # Output line by line (in case it's big)
        line = file.readline()
        if line.startswith("ERROR"): # SQL Statement Error -> stderr
            ofp = sys.stderr
        if writefirst:
            ofp.write(string.rstrip(line)+os.linesep)
        line = file.readline()
        while line:
            ofp.write(string.rstrip(line)+os.linesep)
            line = file.readline()
Example #25
0
 def GetFileName(type,filename):
   if type=="open":
     fn=QtGui.QFileDialog.getOpenFileName() #None, QtCore.QString(), QtCore.QString(), QtGui.QMainWindow() )
   else:
     fn=QtGui.QFileDialog.getSaveFileName() #None, QtCore.QString(), QtCore.QString(), QtGui.QMainWindow() )
     
   file=open(filename,'w')
   if not fn.isNull():
       s = str(fn)
       print "filename",filename
       print "fn",s
       print string.rstrip(s)
       file.write(string.rstrip(s))
       file.close()
Example #26
0
def read(category, disc_id, server_url=default_server,
	 user=default_user, host=hostname, client_name=name,
         client_version=version):

    url = "%s?cmd=cddb+read+%s+%s&hello=%s+%s+%s+%s&proto=%i" % \
	  (server_url, category, disc_id, user, host, client_name,
           client_version, proto)

    response = urllib.urlopen(url)
    
    # kaa.metadata unicode fix
    encoding = ''
    if response.headers.has_key('content-type'):
        for value in response.headers['content-type'].split(';'):
            if value.strip().lower().startswith('charset='):
                encoding = value[value.find('=')+1:].strip().lower()

    header = string.split(string.rstrip(response.readline()), ' ', 3)

    header[0] = string.atoi(header[0])
    if header[0] == 210 or header[0] == 417: # success or access denied
	reply = []

	for line in response.readlines():
	    line = string.rstrip(line)

	    if line == '.':
		break;

	    line = string.replace(line, r'\t', "\t")
	    line = string.replace(line, r'\n', "\n")
	    line = string.replace(line, r'\\', "\\")

	    reply.append(line)

	if header[0] == 210:		# success, parse the reply
            # kaa.metadata unicode fix
            reply = parse_read_reply(reply)
            for key, value in reply.items():
                if isinstance(value, str):
                    try:
                        reply[key] = unicode(reply[key], encoding)
                    except UnicodeDecodeError:
                        reply[key] = unicode(reply[key], errors='replace')
	    return [ header[0], reply ]
	else:				# access denied. :(
	    return [ header[0], reply ]
    else:
	return [ header[0], None ]
Example #27
0
 def dbHeaders( inFile, verbose=0 ):
     lHeaders = []
     
     inFileHandler = open( inFile, "r" )
     while True:
         line = inFileHandler.readline()
         if line == "":
             break
         if line[0] == ">":
             lHeaders.append( string.rstrip(line[1:]) )
             if verbose > 0:
                 print string.rstrip(line[1:])
     inFileHandler.close()
     
     return lHeaders
Example #28
0
    def update(self, entry):
        if self.buff:
            text = self.buff.get_text(self.buff.get_start_iter(), self.buff.get_end_iter())
            text = string.rstrip(text).encode("latin-1")
        else:
            text = string.rstrip(self.edit.get_chars(0, -1)).encode("latin-1")

        if text == self.string:
            return 0

        if text == "":
            del entry[self.field]
            return 1

        return self.update_content(entry, text)
Example #29
0
 def status (self):
     """ """
     # reads speed
     f=file("/proc/sys/cpu/"+str(self.cpu)+"/speed","r")
     self.speed=string.rstrip(f.readline())
     f.close
     # reads speed min
     f=file("/proc/sys/cpu/"+str(self.cpu)+"/speed-min","r")
     self.speed_min=string.rstrip(f.readline())
     f.close
     # reads speed_max
     f=file("/proc/sys/cpu/"+str(self.cpu)+"/speed-max","r")
     self.speed_max=string.rstrip(f.readline())
     f.close
     return True
Example #30
0
    def _dbFieldsRead(self, data):
        """Reads the valid key, value pairs.

        Takes a string that is expected to consist of database field
        names separated by newlines.

        Returns a tuple of field names.

        *For internal use only.*"""
        li = []
        for line in string.split(data, os.linesep):
            if self.debug:
                print string.rstrip(line)
            li.append(string.rstrip(line))
        return tuple(li)