Пример #1
0
 def create_session(self, request, email, ssid, expiration):
   """
   Encrypt parameters, set valid in DB, set cookie on client
   """
   account = memcache_db.get_entity(email, "Accounts")
   if account != None:
     update_fields = {"cookieKey" : ssid}
     memcache_db.update_fields(email, "Accounts", update_fields)
   
     email_enc = encryption.des_encrypt_str(email)
     ssid_enc = encryption.des_encrypt_str(ssid)
     exp_enc = encryption.des_encrypt_str(expiration)
     
     import base64
     import string
     email_encoded = string.rstrip(base64.encodestring(email_enc), "\n")
     ssid_encoded = string.rstrip(base64.encodestring(ssid_enc), "\n")
     exp_encoded = string.rstrip(base64.encodestring(exp_enc), "\n")
 
     # the email will be set as the key so we can use it to look up in the DB
     request.response.headers.add_header("Set-Cookie", WEB_ADMIN_PARAMS.COOKIE_EMAIL_PARAM + "=" + email_encoded)
     request.response.headers.add_header("Set-Cookie", WEB_ADMIN_PARAMS.COOKIE_KEY_PARAM + "=" + ssid_encoded)
     request.response.headers.add_header("Set-Cookie", WEB_ADMIN_PARAMS.COOKIE_EXPIRATION + "=" + exp_encoded)
     
     """ Create a new session object and return it """
     self.email = email
     self.ssid = ssid
     self.expiration = expiration
     self.account = account
   
     return self
   else:
     return None
Пример #2
0
    def on_afMoleculeAddPB_clicked(self):

        molecule = str(self.afMoleculeLE.text())
        molecule = string.rstrip(molecule)
        rows = self.afTable.rowCount()
        if molecule == "":
            return

        # check if molecule with this name already exist               
        moleculeAlreadyExists = False
        for rowId in range(rows):
            name = str(self.afTable.item(rowId, 0).text())
            name = string.rstrip(name)
            if name == molecule:
                moleculeAlreadyExists = True
                break

        if moleculeAlreadyExists:
            QMessageBox.warning(self, "Molecule Name Already Exists",
                                "Molecule name already exist. Please choose different name", QMessageBox.Ok)
            return

        self.afTable.insertRow(rows)
        moleculeItem = QTableWidgetItem(molecule)
        self.afTable.setItem(rows, 0, moleculeItem)

        # reset molecule entry line
        self.afMoleculeLE.setText("")
        return
Пример #3
0
  def constructmenuitemstring(self,items,keybindingsdict):
    def emptystring(num):
      if num<=0:
        return ""
      return " "+emptystring(num-1)
    
    if len(items)==1:
      return items[0]

    keykey=string.lstrip(string.rstrip(items[1]))
    if keykey not in keybindingsdict:
      ret = string.rstrip(items[0])
      commands[string.lstrip(ret)] = string.lstrip(items[1])
      return ret
    
    key=keybindingsdict[keykey]
    qualifier=""
    
    for item in key[1]:
      qualifier+=get_key_name(item)+" + "

      #print "items[0]",items[0],len(items[0]),qualifier
    stripped0 = string.rstrip(items[0])
    ret = stripped0 + emptystring(41-len(stripped0)) + qualifier + get_key_name(key[0][0])
    commands[string.lstrip(ret)] = string.lstrip(items[1])
    return ret
Пример #4
0
	def include(self, domain, initialstate, goalstate, plan, success, shouldIWrite=True):
		domain = string.rstrip(domain, '\n')
		initialstate = string.rstrip(initialstate, '\n')
		goalstate = string.rstrip(goalstate, '\n')
		#try:
		if shouldIWrite:
			try:
				#print 'PLAN: ', plan
				D = open('cache_D'+str(self.availableId)+'.py', 'w')
				D.write(domain)
				D.close()
				I = open('cache_I'+str(self.availableId)+'.xml', 'w')
				I.write(initialstate)
				I.close()
				G = open('cache_G'+str(self.availableId)+'.py', 'w')
				G.write(goalstate)
				G.close()
				P = open('cache_plan'+str(self.availableId)+'.agglp', 'w')
				if success: P.write(plan)
				else: P.write('fail')
				P.close()
			except:
				print 'Can\'t write cache'
				traceback.print_exc()
				sys.exit(-1)
		self.availableId += 1
		md = md5.new()
		md.update(domain+initialstate+goalstate)
		checksum = md.hexdigest()
		print 'Including planning context with checksum', checksum
		try:
			self.data[checksum].append((domain, initialstate, goalstate, plan, success))
		except:
			self.data[checksum] = []
			self.data[checksum].append((domain, initialstate, goalstate, plan, success))
Пример #5
0
 def read( self, faFileHandler ):
     line = faFileHandler.readline()
     if line == "":
         self.header = None
         self.sequence = None
         return
     while line == "\n":
         line = faFileHandler.readline()
     if line[0] == '>':
         self.header = string.rstrip(line[1:])
     else:
         print "error, line is",string.rstrip(line)
         return
     line = " "
     seq = cStringIO.StringIO()
     while line:
         prev_pos = faFileHandler.tell()
         line = faFileHandler.readline()
         if line == "":
             break
         if line[0] == '>':
             faFileHandler.seek( prev_pos )
             break
         seq.write( string.rstrip(line) )
     self.sequence = seq.getvalue()
Пример #6
0
	def get_sender(self, ignore_header=""):
		"""
		Returns a tuple in the format (sender_name, sender_email). Some processing is
		done to treat X-Primary-Address/Resent-From/Reply-To/From. Headers in "ignore_header"
		are not checked.
		"""

		headers = ["X-Primary-Address", "Resent-From", "Reply-To", "From"];

		## Remove unwanted headers
		def fn_remove(x, y = ignore_header): return(x != y)
		headers = filter(fn_remove, headers)

		sender_name = ''
		sender_mail = ''

		for hdr in headers:
			(sender_name, sender_mail) = self.msg.getaddr(hdr)

			if sender_name == None: sender_name = ''
			if sender_mail == None: sender_mail = ''

			## Decode sender name and return if found
			if sender_mail:
				if sender_name:
                                       sender_name = decode_header(sender_name)
                                       sender_name = sender_name[0][0]
				break

		return(string.rstrip(sender_name), string.rstrip(sender_mail))
Пример #7
0
def build_para(doc, parent, start, i):
    children = parent.childNodes
    after = start + 1
    have_last = 0
    BREAK_ELEMENTS = PARA_LEVEL_ELEMENTS + RECURSE_INTO_PARA_CONTAINERS
    # Collect all children until \n\n+ is found in a text node or a
    # member of BREAK_ELEMENTS is found.
    for j in range(start, i):
        after = j + 1
        child = children[j]
        nodeType = child.nodeType
        if nodeType == ELEMENT:
            if child.get_tagName() in BREAK_ELEMENTS:
                after = j
                break
        elif nodeType == TEXT:
            pos = string.find(child.data, "\n\n")
            if pos == 0:
                after = j
                break
            if pos >= 1:
                child.splitText(pos)
                break
    else:
        have_last = 1
    if (start + 1) > after:
        raise ConversionError(
            "build_para() could not identify content to turn into a paragraph")
    if children[after - 1].nodeType == TEXT:
        # we may need to split off trailing white space:
        child = children[after - 1]
        data = child.data
        if string.rstrip(data) != data:
            have_last = 0
            child.splitText(len(string.rstrip(data)))
    para = doc.createElement(PARA_ELEMENT)
    prev = None
    indexes = range(start, after)
    indexes.reverse()
    for j in indexes:
        node = parent.childNodes[j]
        parent.removeChild(node)
        para.insertBefore(node, prev)
        prev = node
    if have_last:
        parent.appendChild(para)
        parent.appendChild(doc.createTextNode("\n\n"))
        return len(parent.childNodes)
    else:
        nextnode = parent.childNodes[start]
        if nextnode.nodeType == TEXT:
            if nextnode.data and nextnode.data[0] != "\n":
                nextnode.data = "\n" + nextnode.data
        else:
            newnode = doc.createTextNode("\n")
            parent.insertBefore(newnode, nextnode)
            nextnode = newnode
            start = start + 1
        parent.insertBefore(para, nextnode)
        return start + 1
Пример #8
0
def colorize(line):
	orig_line = line
	try:
		time = get_first_bracket(line)
		line = remove_first_bracket(line)

		found = 0
		for log in log_color.keys():
			if string.find(line, log) >= 0:
				found = 1
				line = remove_first_bracket(line)
				line = string.strip(line)
				line = string.split(line, ' ',2) ## XXX: magic number 2
				line[filename] = GREEN + line[filename] + CLEAR
				line[message] = log_color[log] + line[message] + CLEAR
				break

		if not found:
			return string.rstrip(orig_line)

		if show_time:
			line.insert(0,time)
	except:
		return string.rstrip(orig_line)

	return string.join(line, ' ')
Пример #9
0
def getallsubs(content, language, search_string, season, episode):
    i = 0
    for matches in re.finditer(subtitle_pattern, content, re.IGNORECASE | re.DOTALL):
        i = i + 1
        title_found = unescape(string.rstrip(matches.group(2)))
        log( __name__ , title_found )
        log( __name__ , search_string )
        if string.find(string.lower(title_found),string.lower(search_string)) > -1:
            subtitle_id    = matches.group(1)
            year_found     = matches.group(3)
            season_episode_found = string.rstrip(matches.group(4))
            filename = title_found
            languageshort = toOpenSubtitles_two(language)
            match = re.match(subinfo_pattern, matches.group(5), re.IGNORECASE | re.DOTALL)
            if match:
                description = match.group(1)
                filename = filename + ' ' + description
            if len(season) > 0:
                season_episode = 'Sezona' + season + ' Epizoda' + episode
                if season_episode == season_episode_found:
                    subtitles_list.append({'rating': '0', 'sync': False, 'filename': filename, 'subtitle_id': subtitle_id, 'language_flag': 'flags/' + languageshort+ '.gif', 'language_name': language})
                    log( __name__ ,"%s Subtitles found: %s (id = %s)" % (debug_pretext, filename, subtitle_id))
            else:
                if len(season_episode_found) == 0:
                    subtitles_list.append({'rating': '0', 'sync': False, 'filename': filename, 'subtitle_id': subtitle_id, 'language_flag': 'flags/' + languageshort+ '.gif', 'language_name': language})
                    log( __name__ ,"%s Subtitles found: %s (id = %s)" % (debug_pretext, filename, subtitle_id))
def main():
    addr = {}

    if len(sys.argv) != 2:
        print "Usage: {0} ipaddress".format(sys.argv[0])
        sys.exit(1)

    ctx = getdns.context_create()
    addr["address_data"] = sys.argv[1]
    if string.find(sys.argv[1], ":") != -1:
        addr["address_type"] = "IPv6"
    elif string.find(sys.argv[1], ".") != 1:
        addr["address_type"] = "IPv4"
    else:
        print "{0}: undiscernable address type".format(sys.argv[1])
        sys.exit(1)
    results = getdns.hostname(ctx, address=addr)
    if results["status"] == getdns.GETDNS_RESPSTATUS_GOOD:
        print "Hostnames:"
        for responses in results["replies_tree"]:
            for ans in responses["answer"]:
                print string.rstrip(ans["rdata"]["ptrdname"], ".")

    if results["status"] == getdns.GETDNS_RESPSTATUS_NO_NAME:
        print "{0} not found".format(sys.argv[1])
Пример #11
0
    def getLabel(self, partition):
        label = None
        fd = self.openPartition(partition)

        # valid block sizes in reiserfs are 512 - 8192, powers of 2
        # we put 4096 first, since it's the default
        # reiserfs superblock occupies either the 2nd or 16th block
        for blksize in (4096, 512, 1024, 2048, 8192):
            for start in (blksize, (blksize*16)):
                try:
                    os.lseek(fd, start, 0)
                    # read 120 bytes to get s_magic and s_label
                    buf = os.read(fd, 120)

                    # see if this block is the superblock
                    # this reads reiserfs_super_block_v1.s_magic as defined
                    # in include/reiserfs_fs.h in the reiserfsprogs source
                    m = string.rstrip(buf[52:61], "\0x00")
                    if m == "ReIsErFs" or m == "ReIsEr2Fs" or m == "ReIsEr3Fs":
                        # this reads reiserfs_super_block.s_label as
                        # defined in include/reiserfs_fs.h
                        label = string.rstrip(buf[100:116], "\0x00")
                        os.close(fd)
                        return label
                except OSError, e:
                    # [Error 22] probably means we're trying to read an
                    # extended partition. 
                    e = "error reading reiserfs label on %s: %s" %(partition.getPath(), e)
                    raise YaliException, e
Пример #12
0
def reformat_paragraph(data, limit=70):
    lines = string.split(data, "\n")
    i = 0
    n = len(lines)
    while i < n and is_all_white(lines[i]):
        i = i+1
    if i >= n:
        return data
    indent1 = get_indent(lines[i])
    if i+1 < n and not is_all_white(lines[i+1]):
        indent2 = get_indent(lines[i+1])
    else:
        indent2 = indent1
    new = lines[:i]
    partial = indent1
    while i < n and not is_all_white(lines[i]):
        # XXX Should take double space after period (etc.) into account
        words = re.split("(\s+)", lines[i])
        for j in range(0, len(words), 2):
            word = words[j]
            if not word:
                continue # Can happen when line ends in whitespace
            if len(string.expandtabs(partial + word)) > limit and \
               partial != indent1:
                new.append(string.rstrip(partial))
                partial = indent2
            partial = partial + word + " "
            if j+1 < len(words) and words[j+1] != " ":
                partial = partial + " "
        i = i+1
    new.append(string.rstrip(partial))
    # XXX Should reformat remaining paragraphs as well
    new.extend(lines[i:])
    return string.join(new, "\n")
Пример #13
0
def read(category, disc_id, server_url=default_server, 
         user=default_user, host=hostname, client_name=name,
         client_version=version):

    url = "%s?cmd=cddb+read+%s+%s&hello=%s+%s+%s+%s&proto=%i" % \
          (server_url, category, disc_id, user, host, client_name,
           client_version, proto)

    response = urllib.urlopen(url)
    
    header = string.split(string.rstrip(response.readline()), ' ', 3)

    header[0] = string.atoi(header[0])
    if header[0] == 210 or header[0] == 417: # success or access denied
        reply = []

        for line in response.readlines():
            line = string.rstrip(line)

            if line == '.':
                break;

            line = string.replace(line, r'\t', "\t")
            line = string.replace(line, r'\n', "\n")
            line = string.replace(line, r'\\', "\\")

            reply.append(line)

        if header[0] == 210:                # success, parse the reply
            return [ header[0], parse_read_reply(reply) ]
        else:                                # access denied. :(
            return [ header[0], reply ]
    else:
        return [ header[0], None ]
Пример #14
0
def get_url_contents(url):
    global lns
    lns = []
    url_comps = urlparse(url)
    if (url_comps[0] == "file"):
	f = open(url_comps[2])
	ln = f.readline()
	while ln:
	    lns.append(string.rstrip(ln))
	    ln = f.readline()
	f.close()
    elif (url_comps[0] == "ftp"):
	def ftp_line(ln):
	    lns.append(ln)
	h = ftplib.FTP(url_comps[1])
	h.login()
	i = string.rfind(url_comps[2], '/')
	if (i >= 0):
	    h.cwd(url_comps[2][:i])
	    h.retrlines("RETR "+url_comps[2][i+1:], ftp_line)
	else:
	    h.retrlines("RETR "+url_comps[2], ftp_line)
	h.close()
    elif (url_comps[0] == "http"):
	h = httplib.HTTP(url_comps[1])
	h.putrequest('GET', url_comps[2])
	h.putheader('Accept', 'text/html')
	h.putheader('Accept', 'text/plain')
	h.endheaders()
	errcode, errmsg, headers = h.getreply()
	# HTTP/1.1 replies seem to generate an errorcode of -1, so try
	# to handle this case.  This may simply be a manifestation of
	# a broken Python 1.4 httplib module.  This bug has been fixed
	# with Python version 1.5.
	version = sys.version[0:3]
	if ((version < "1.5") and (errcode == -1)):
	    try:
		real_errcode = string.atoi(string.split(errmsg)[1])
	    except ValueError:
		real_errcode = -1 # yes, it really is bogus :-/
	    sys.stderr.write("%d" % (real_errcode)) # Should be 200
	else:
	    sys.stderr.write("%d" % (errcode)) # Should be 200
            if (errcode == 200):
                f = h.getfile()
                ln = f.readline()
                # once again, try to compensate for broken behavior on HTTP/1.1
                # by eating the header lines which would otherwise show up in
                # the data.  This bug has been fixed with Python version 1.5.
                if ((version < "1.5") and (errcode == -1) and (real_errcode <> -1)):
                    while ((ln) and
                           ((len(ln) > 2) or
                            (ln[0] <> "\r") or (ln[-1] <> "\n"))):
                        ln = f.readline()
                while ln:
                    lns.append(string.rstrip(ln)) # Get the raw HTML
                    ln = f.readline()
                f.close()
    return lns
Пример #15
0
def toHexString(bytes=[], format=0):
    """Returns an hex string representing bytes

        bytes:  a list of bytes to stringify,
                    e.g. [59, 22, 148, 32, 2, 1, 0, 0, 13]
        format: a logical OR of
                COMMA: add a comma between bytes
                HEX: add the 0x chars before bytes
                UPPERCASE: use 0X before bytes (need HEX)
                PACK: remove blanks

        example:
        bytes = [ 0x3B, 0x65, 0x00, 0x00, 0x9C, 0x11, 0x01, 0x01, 0x03 ]

        toHexString(bytes) returns  3B 65 00 00 9C 11 01 01 03

        toHexString(bytes, COMMA) returns  3B, 65, 00, 00, 9C, 11, 01, 01, 03

        toHexString(bytes, HEX) returns
            0x3B 0x65 0x00 0x00 0x9C 0x11 0x01 0x01 0x03

        toHexString(bytes, HEX | COMMA) returns
            0x3B, 0x65, 0x00, 0x00, 0x9C, 0x11, 0x01, 0x01, 0x03

        toHexString(bytes, PACK) returns  3B6500009C11010103

        toHexString(bytes, HEX | UPPERCASE) returns
            0X3B 0X65 0X00 0X00 0X9C 0X11 0X01 0X01 0X03

        toHexString(bytes, HEX | UPPERCASE | COMMA) returns
            0X3B, 0X65, 0X00, 0X00, 0X9C, 0X11, 0X01, 0X01, 0X03
    """

    from string import rstrip

    for byte in tuple(bytes):
        pass

    if type(bytes) is not list:
        raise TypeError('not a list of bytes')

    if bytes == None or bytes == []:
        return ""
    else:
        pformat = "%-0.2X"
        if COMMA & format:
            pformat = pformat + ","
        pformat = pformat + " "
        if PACK & format:
            pformat = rstrip(pformat)
        if HEX & format:
            if UPPERCASE & format:
                pformat = "0X" + pformat
            else:
                pformat = "0x" + pformat
        return rstrip(rstrip(
                reduce(lambda a, b: a + pformat % ((b + 256) % 256),
                        [""] + bytes)), ',')
Пример #16
0
def query(track_info, server_url=default_server,
	  user=default_user, host=hostname, client_name=name,
          client_version=version, trying=0):

    disc_id = track_info[0]
    num_tracks = track_info[1]

    query_str = (('%08lx %d ') % (long(disc_id), num_tracks))

    for i in track_info[2:]:
	query_str = query_str + ('%d ' % i)
	
    query_str = urllib.quote_plus(string.rstrip(query_str))

    url = "%s?cmd=cddb+query+%s&hello=%s+%s+%s+%s&proto=%i" % \
	  (server_url, query_str, user, host, client_name,
           client_version, proto)

    response = urllib.urlopen(url)
    
    # Four elements in header: status, category, disc-id, title
    header = string.split(string.rstrip(response.readline()), ' ', 3)

    try:
        header[0] = string.atoi(header[0])
    except:
        if trying > 10:
            return [ 900, None ]
        return query(track_info, default_server,
                     default_user, hostname, name, version, trying+1)

    if header[0] == 200:		# OK
	result = { 'category': header[1], 'disc_id': header[2], 'title':
		   header[3] }

	return [ header[0], result ]

    elif header[0] == 211 or header[0] == 210: # multiple matches
	result = []

	for line in response.readlines():
	    line = string.rstrip(line)

	    if line == '.':		# end of matches
		break
					# otherwise:
					# split into 3 pieces, not 4
					# (thanks to bgp for the fix!)
	    match = string.split(line, ' ', 2)

	    result.append({ 'category': match[0], 'disc_id': match[1], 'title':
			    match[2] })

	return [ header[0], result ]

    else:
	return [ header[0], None ]
Пример #17
0
	def setName(self, fileName):
		self.name = fileName
		if (string.count(self.name,"gdc")):
			fileName = string.lstrip(fileName,"gdc")
		if (string.count(self.name,"dat")):
			fileName = string.rstrip(fileName,".dat")
		if (string.count(self.name,".000")):
			fileName = string.rstrip(fileName,".000")
		self.runNumber = int(fileName)
Пример #18
0
def BrowseDefaultDir(root_window,calcon_in,DirNameVar,):
    startdir = string.rstrip(calcon_in['target_dir'])
    startdir = string.rstrip(startdir,"\\") #          Strip off the backslash
    DirName=tkFileDialog.askdirectory(parent=root_window,initialdir=startdir, \
        title = 'Select Default Directory')
#    endloc=string.rfind(DirName,'/')+1   # 
    DirName = string.replace(DirName, "/", "\\")
    DirNameVar.set(DirName)
    calcon_in['target_dir'] = DirName+"\\"            # Restore the backslash	
Пример #19
0
    def closeMeeting(self, meetingID):

        # JSON Response BEGIN (error message)
        jsonError = jsonObject()
        jsonError.clearResponse()
        jsonError.add('result','false')
        jsonError.add('method','closeMeeting')
        jsonError.add('meetingID',meetingID)
        jsonError.add('error','7404')
        #JSON Response END

        # Do meetingID-related input verification
        if (meetingID == ''):
            return jsonError.jsonResponse()

        if sys.platform.startswith('win'):
            archiveDirectory = string.rstrip(osconfig.slidedeckArchive(), '\\')
        else:
            archiveDirectory = string.rstrip(osconfig.slidedeckArchive(), '/')

        localMeetingDir = os.path.join(archiveDirectory, meetingID)

        if not os.path.isdir(localMeetingDir):
            return jsonError.jsonResponse()

        docList = os.listdir(localMeetingDir)

        if len(docList) == 0:
            return jsonError.jsonResponse()

        for i in range (0, len(docList)):
            if docList[i] == 'Preloaded': # we don't delete preloaded directories
                continue
            docPath = os.path.join(localMeetingDir, docList[i])
            try:
                shutil.rmtree(docPath)
            except:
                pass

        docList = os.listdir(localMeetingDir)
        if len(docList) == 0:
            try:
                os.rmdir(localMeetingDir)
            except:
                pass

        # JSON Response BEGIN
        jsonSuccess = jsonObject()
        jsonSuccess.clearResponse()
        jsonSuccess.add('result','true')
        jsonSuccess.add('method','closeMeeting')
        jsonSuccess.add('meetingID',meetingID)
        jsonSuccess.add('error', '7200')
        #JSON Response END

        return jsonSuccess.jsonResponse()
Пример #20
0
def editLine(filename, username, increment):

    setFlag = 0

    lineCount = getLineCount(filename)
    print(lineCount, end="")

    dataFile = open(filename, "r")
    for line in dataFile:
        lineScan = string.rstrip(line)
        lineScan = string.split(lineScan)
        if lineScan[0] == username:
            setFlag = 1
    dataFile.close()

    if setFlag == 1:
        """Search for the line in question"""
        for line in fileinput.input("witch", inplace=1):

            """Format the line so the whole username is line[0]"""
            lineTemp = string.rstrip(line)
            lineTemp = string.split(lineTemp)

            if username == lineTemp[0]:
                newVal = int(lineTemp[1]) + increment
                if newVal < 0:
                    newVal = 0

                if fileinput.filelineno() == lineCount:
                    newLine = (
                        lineTemp[0]
                        + " "
                        + str(newVal)
                        + " "
                        + datetime.strftime(getCurrentDateTime(), "%d/%m/%Y/%H:%M:%S")
                    )
                else:
                    newLine = (
                        lineTemp[0]
                        + " "
                        + str(newVal)
                        + " "
                        + datetime.strftime(getCurrentDateTime(), "%d/%m/%Y/%H:%M:%S")
                        + "\n"
                    )
                line = line.replace(line, newLine)
                print(line, end="")
            else:
                print(line, end="")

    else:
        createdLine = (
            "\n" + username + " " + str(increment) + " " + datetime.strftime(getCurrentDateTime(), "%d/%m/%Y/%H:%M:%S")
        )
        dataFile = open(filename, "a")
        dataFile.write(createdLine)
Пример #21
0
 def jbofihe(self, irc, msg, args, text):
     """<text>
     
     Analyze text with jbofihe, falls back on cmafihe for
     ungrammatical text.
     """
     pipe = Popen(['jbofihe', '-ie', '-x'], stdin=PIPE, stdout=PIPE)
     result = rstrip(pipe.communicate(text)[0])
     if pipe.returncode > 0:
         pipe = Popen('cmafihe', stdin=PIPE, stdout=PIPE)
         result = "not grammatical: " + rstrip(pipe.communicate(text)[0])
     irc.reply(result)
Пример #22
0
def food(type, page):

	tree = html.fromstring(page.content)	
	counter = 1
	typearr = []

	while True:
		path = '//div[@class="mensa_day mensa_day_speise {}"]//tr[@class="mensa_day_speise_row"][{}]//td[@class="mensa_day_speise_'
		path = path.format(type, str(counter))
		
		pathyfy = lambda x, y : tree.xpath(path+x+'"]'+y)

		name = pathyfy('name', '/text()')

		if name == []:
			break

		#init food dict
		food = {'Name': None, 'Type': type, 'Price': None, 'Ampel': None, 'Veg': None, 'Bio': None, 'Klimaessen': None}

		#get name
		if name[1] == '        ':
			food['Name'] = string.rstrip(name[2], None)
		else:
			food['Name'] = string.strip(name[1], None)


		price = pathyfy('preis', '/text()')
		
		food['Price'] = string.rstrip(price[0], None)

		#get Ampel
		food['Ampel'] = pathyfy('name', '//a/@href')[0]

		#Vegetarian/Vegan option
		if pathyfy('name', '//img[@alt="Vegan"]') != []:
			food['Veg'] = 'vegan'
		elif pathyfy('name', '//img[@alt="Vegetarisch"]') != []:
			food['Veg'] = 'vegetarisch'

		#Bio option
		if pathyfy('name', '//img[@alt="Bio"]') != []:
			food['Bio'] = True

		#Klimaessen
		if pathyfy('name', '//img[@alt="Klimaessen"]') != []:
			food['Klimaessen'] = True

		typearr.append(food)
		counter += 1	

	return typearr
Пример #23
0
    def GetFileName(type, filename):
        if type == "open":
            fn = qt.QFileDialog.getOpenFileName(qt.QString.null, qt.QString.null, qt.QMainWindow())
        else:
            fn = qt.QFileDialog.getSaveFileName(qt.QString.null, qt.QString.null, qt.QMainWindow())

        file = open(filename, "w")
        if not fn.isNull():
            print filename
            print fn.ascii()
            print string.rstrip(fn.ascii())
            file.write(string.rstrip(fn.ascii()))
        file.close()
Пример #24
0
def main(argv):
    "Parse command line and do it..."
    import os, getopt, string
    
    queries = []
    url = os.getenv("SQLCLURL",default_url)
    fmt = default_fmt
    writefirst = 1
    verbose = 0
    
    # Parse command line
    try:
        optlist, args = getopt.getopt(argv[1:],'s:f:q:vlh?')
    except getopt.error as e:
        usage(1,e)
        
    for o,a in optlist:
        if     o=='-s': url = a
        elif o=='-f': fmt = a
        elif o=='-q': queries.append(a)
        elif o=='-l': writefirst = 0
        elif o=='-v': verbose += 1
        else: usage(0)
        
    if fmt not in formats:
        usage(1,'Wrong format!')

    # Enqueue queries in files
    for fname in args:
        try:
            queries.append(open(fname).read())
        except IOError as e:
            usage(1,e)

    # Run all queries sequentially
    for qry in queries:
        ofp = sys.stdout
        if verbose:
            write_header(ofp,'#',url,qry)
        file = query(qry,url,fmt)
        # Output line by line (in case it's big)
        line = file.readline()
        if line.startswith("ERROR"): # SQL Statement Error -> stderr
            ofp = sys.stderr
        if writefirst:
            ofp.write(string.rstrip(line)+os.linesep)
        line = file.readline()
        while line:
            ofp.write(string.rstrip(line)+os.linesep)
            line = file.readline()
Пример #25
0
 def GetFileName(type,filename):
   if type=="open":
     fn=QtGui.QFileDialog.getOpenFileName() #None, QtCore.QString(), QtCore.QString(), QtGui.QMainWindow() )
   else:
     fn=QtGui.QFileDialog.getSaveFileName() #None, QtCore.QString(), QtCore.QString(), QtGui.QMainWindow() )
     
   file=open(filename,'w')
   if not fn.isNull():
       s = str(fn)
       print "filename",filename
       print "fn",s
       print string.rstrip(s)
       file.write(string.rstrip(s))
       file.close()
Пример #26
0
def read(category, disc_id, server_url=default_server,
	 user=default_user, host=hostname, client_name=name,
         client_version=version):

    url = "%s?cmd=cddb+read+%s+%s&hello=%s+%s+%s+%s&proto=%i" % \
	  (server_url, category, disc_id, user, host, client_name,
           client_version, proto)

    response = urllib.urlopen(url)
    
    # kaa.metadata unicode fix
    encoding = ''
    if response.headers.has_key('content-type'):
        for value in response.headers['content-type'].split(';'):
            if value.strip().lower().startswith('charset='):
                encoding = value[value.find('=')+1:].strip().lower()

    header = string.split(string.rstrip(response.readline()), ' ', 3)

    header[0] = string.atoi(header[0])
    if header[0] == 210 or header[0] == 417: # success or access denied
	reply = []

	for line in response.readlines():
	    line = string.rstrip(line)

	    if line == '.':
		break;

	    line = string.replace(line, r'\t', "\t")
	    line = string.replace(line, r'\n', "\n")
	    line = string.replace(line, r'\\', "\\")

	    reply.append(line)

	if header[0] == 210:		# success, parse the reply
            # kaa.metadata unicode fix
            reply = parse_read_reply(reply)
            for key, value in reply.items():
                if isinstance(value, str):
                    try:
                        reply[key] = unicode(reply[key], encoding)
                    except UnicodeDecodeError:
                        reply[key] = unicode(reply[key], errors='replace')
	    return [ header[0], reply ]
	else:				# access denied. :(
	    return [ header[0], reply ]
    else:
	return [ header[0], None ]
Пример #27
0
    def _dbFieldsRead(self, data):
        """Reads the valid key, value pairs.

        Takes a string that is expected to consist of database field
        names separated by newlines.

        Returns a tuple of field names.

        *For internal use only.*"""
        li = []
        for line in string.split(data, os.linesep):
            if self.debug:
                print string.rstrip(line)
            li.append(string.rstrip(line))
        return tuple(li)
Пример #28
0
 def dbHeaders( inFile, verbose=0 ):
     lHeaders = []
     
     inFileHandler = open( inFile, "r" )
     while True:
         line = inFileHandler.readline()
         if line == "":
             break
         if line[0] == ">":
             lHeaders.append( string.rstrip(line[1:]) )
             if verbose > 0:
                 print string.rstrip(line[1:])
     inFileHandler.close()
     
     return lHeaders
Пример #29
0
    def update(self, entry):
        if self.buff:
            text = self.buff.get_text(self.buff.get_start_iter(), self.buff.get_end_iter())
            text = string.rstrip(text).encode("latin-1")
        else:
            text = string.rstrip(self.edit.get_chars(0, -1)).encode("latin-1")

        if text == self.string:
            return 0

        if text == "":
            del entry[self.field]
            return 1

        return self.update_content(entry, text)
Пример #30
0
 def status (self):
     """ """
     # reads speed
     f=file("/proc/sys/cpu/"+str(self.cpu)+"/speed","r")
     self.speed=string.rstrip(f.readline())
     f.close
     # reads speed min
     f=file("/proc/sys/cpu/"+str(self.cpu)+"/speed-min","r")
     self.speed_min=string.rstrip(f.readline())
     f.close
     # reads speed_max
     f=file("/proc/sys/cpu/"+str(self.cpu)+"/speed-max","r")
     self.speed_max=string.rstrip(f.readline())
     f.close
     return True
Пример #31
0
    def read(self, file):
        """
		reads an NMR specturm in its own format
		"""

        try:
            FILE = open(file)
        except:
            print "unable to open file"
            return

        previd = ""
        res = None
        for line in FILE.readlines():
            line = string.rstrip(line)

            id = int(line[0:4])
            resn = line[5:8]
            atmn = line[12:16]
            cs = float(line[17:25])

            if len(line) > 25:
                std = float(line[26:34])
            else:
                std = 0.0

            if id != previd:
                res = self.newResidue()
                res.id = id
                res.name = resn

            atom = res.newAtom()
            atom.resi = id
            atom.name = atmn
            atom.cs_value = cs
            atom.cs_stdev = std

        FILE.close()
Пример #32
0
    def __init__(self, master, quitfunc):
        from Main import __version__, docstring
        self.__root = root = Toplevel(master, class_='Pynche')
        root.protocol('WM_DELETE_WINDOW', self.__withdraw)
        root.title('Pynche Help Window')
        root.iconname('Pynche Help Window')
        root.bind('<Alt-q>', quitfunc)
        root.bind('<Alt-Q>', quitfunc)
        root.bind('<Alt-w>', self.__withdraw)
        root.bind('<Alt-W>', self.__withdraw)

        # more elaborate help is available in the README file
        readmefile = os.path.join(sys.path[0], 'README')
        try:
            fp = None
            try:
                fp = open(readmefile)
                contents = fp.read()
                # wax the last page, it contains Emacs cruft
                i = string.rfind(contents, '\f')
                if i > 0:
                    contents = string.rstrip(contents[:i])
            finally:
                if fp:
                    fp.close()
        except IOError:
            sys.stderr.write("Couldn't open Pynche's README, "
                             'using docstring instead.\n')
            contents = docstring()

        self.__text = text = Text(root, relief=SUNKEN, width=80, height=24)
        self.__text.focus_set()
        text.insert(0.0, contents)
        scrollbar = Scrollbar(root)
        scrollbar.pack(fill=Y, side=RIGHT)
        text.pack(fill=BOTH, expand=YES)
        text.configure(yscrollcommand=(scrollbar, 'set'))
        scrollbar.configure(command=(text, 'yview'))
Пример #33
0
def document_graph(documents,
                   save_image=False,
                   graph_type='bigram',
                   print_graph_info=False,
                   filenames=None):
    stop = set(stopwords.words('english'))
    wordList1 = documents.split(None)
    wordList2 = [
        string.rstrip(
            WordNetLemmatizer().lemmatize(clean_str(
                (x.lower()), together=True)), ',.!?;') for x in wordList1
        if x not in stop
    ]
    #document_entities = [NERTag(document.split()) for document in wordList2]
    if filenames:
        kpe_textrank = get_doc_kpe(documents,
                                   filenames,
                                   type="textrank",
                                   format="list")
    if graph_type is 'textrank':
        kpe_textrank_words = [kpe[0] for kpe in kpe_textrank]
        kpe_textrank_scores = [kpe[1] for kpe in kpe_textrank]
        #wordList2 = [[kpe,wl2] for kpe,wl2 in zip(kpe_textrank_words,wordList2)]
        dG = make_graph(kpe_textrank_words, scores=kpe_textrank_scores)
    elif graph_type is 'bigram':
        dG = make_graph(wordList2)
    if print_graph_info:
        for node in dG.nodes():
            print('%s:%d\n' % (node, dG.node[node]['count']))
        for edge in dG.edges():
            print('%s:%d\n' %
                  (edge, maxsize - dG.edge[edge[0]][edge[1]]['weight']))
    if save_image:
        print(dG.nodes(data=True))
        plt.show()
        nx.draw(dG, width=2, with_labels=True)
        plt.savefig("images/bigram_graph.png")
    return dG
Пример #34
0
def _partition_date(date):
    """
    Partition a date string into three sub-strings
    year, month, day

    The following styles are understood:
    (1) 22-AUG-1993 or
        22-Aug-03 (2000+yy if yy<50)
    (2) 20010131
    (3) mm/dd/yy or
        mm/dd/yyyy
    (4) 10Aug2004 or
        10Aug04
    (5) yyyy-mm-dd
    """

    date = string.lstrip(string.rstrip(date))
    for reg, idx in date_re_list:
        mo = reg.match(date)
        if mo != None:
            return (mo.group(idx[0]), mo.group(idx[1]), mo.group(idx[2]))

    raise Exception("couldn't partition date: %s" % date)
Пример #35
0
    def converse(self, message, msglength=4096):
        if self.communicationMode == ASCII_MODE:
            if type(message) == 'string':
                # the following if isn't working
                if message[-2:-1] != "\r\n":
                    message = message + "\r\n"
                if self.cycAccess == None:
                    print "CycAccess is required to process commands"
                result = CycListParser(message).makeCycList()
                self.messageCycList = result[1]
            elif (str(type(message)) == "<type 'instance'>") and isinstance(
                    message, CycObject.CycList):
                self.messageCycList = message

            self.asciiSocket.send(message)
            data = self.asciiSocket.recv(msglength)
            answerlist = split(rstrip(data), None, 1)

            self.close()
            if answerlist[0] == '200':
                return [1, answerlist[1]]
            else:
                return "Error:", answerlist
Пример #36
0
 def feed(self, data):
     data = self.buffer + data
     self.buffer = ''
     if self.signature:
         self.send_data(data)
     else:
         lines = string.split(data, '\n')
         if lines:
             self.buffer = lines[-1]
             for line in lines[:-1]:
                 if line == '-- ':
                     self.signature = 1
                     self.set_flow(0)
                 if self.signature:
                     self.send_data(line + '\n')
                     continue
                 if len(string.rstrip(line)) == (len(line) - 1) \
                    and line[-1] == ' ':
                     self.set_flow(1)
                     self.send_data(line + '\n')
                 else:
                     self.set_flow(0)
                     self.send_data(line + '\n')
Пример #37
0
    def __init__(self, atom, sgmultip):
#        if isSequence(atoms):
            # changes properties of both a regular atom and a magnetic atom
            #   (they should correspond to the same atomic position)
#            self.atom = atoms[0]
#            self.magAtom = atoms[1]
#            self.magAtom.label = rstrip(self.magAtom.label)
#            self.magnetic = True
#        else:
        self.atom = atom
        self.magnetic = False
        self.magAtoms = None
        self.matom_index = -1
        self.coeffs = None
        self.phases = None
        self.sgmultip = sgmultip
        self.atom.set_atom_lab(rstrip(self.atom.label()))
        self.B = Parameter(self.atom.BIso(), name=self.atom.label() + " B")
        occ = self.atom.occupancy() / self.atom.multip() * self.sgmultip
        self.occ = Parameter(occ, name=getAtom_lab(self.atom) + " occ")
        self.x = Parameter(self.atom.coords()[0], name=self.atom.label() + " x")
        self.y = Parameter(self.atom.coords()[1], name=self.atom.label() + " y")
        self.z = Parameter(self.atom.coords()[2], name=self.atom.label() + " z")
Пример #38
0
def SafeParseParam(line):
  try:
    lvalue,rvalue = string.split(line, '=', 1)

    var_name = string.rstrip(lvalue)
    assert len(var_name) > 0

    # convert the argument into a python value using eval.
    # this should be safe since eval only allows expressions.  we also pass in
    # a dummy environment so that ours isn't accessible.
    dummy = {}
    # rvalue may have '\r', and it's causing problem.
    value = eval(string.strip(rvalue), dummy, dummy)

    # make sure we're dealing with a simple type.  it is dangerous to
    # deal with more complex types (such as lambda or built in functions)
    assert(has_valid_type(value))

    return (var_name, value)
  except:
    pass

  return None
Пример #39
0
    def __init__(self, filename):
        if not '_keybindingsdict' in dir(ra):
            ra.addMessage("Error. Unable to generate menues.")

        try:
            keybindingsdict = ra._keybindingsdict
        except:
            print sys.exc_info()
            ra.addMessage("Unable to generate menues. (" +
                          str(sys.exc_info()) + ")")

        #print "AAAAAAAAAAAA",keybindingsdict

        file = open(filename, 'r')

        self.lines = map(
            lambda x: self.constructmenuitemstring(string.split(x, "|"),
                                                   keybindingsdict),
            filter(lambda x: len(x) > 0 and x[0] != "#",
                   map(lambda x: string.rstrip(x), file.readlines())))

        file.close()
        self.currline = 0
Пример #40
0
def CompareRunTimeError(fullName, resFile):
  report.Progress(4,"Searching for runtime error")
  bn = util.ExtractName(fullName)
  actualResult = util.ReadFile(bn+".res")
  if actualResult == None:
    return None
  if string.find(actualResult, "Run-Time Error") != -1:
    #expectedResult = util.ReadFile(resFile)
    expectedResult = string.rstrip(util.ReadFile(resFile), "\n")
    ok = (string.find(expectedResult, "Run-Time Error") != -1)
    if ok:
      actualResult = string.strip(re.sub("\s+", " ", actualResult))
      expectedResult = re.sub("Run-Time Error[ 0-9]*: ", "Run-Time Error:", expectedResult)
      #expectedResult = string.strip(re.sub("\s+", " ", expectedResult))
      ok = (actualResult == expectedResult)

    if not ok:
      report.Error("Actual result is different from expected result for " + `fullName`,
                   "expected result : " + expectedResult + "\n" +
                   "actual result   : " + actualResult)
    return ok
  else:
    return None
Пример #41
0
 def OnCleanWhiteSpace(self):
     newText = ""
     for lineNo in self._GetSelectedLineNumbers():
         lineText = string.rstrip(self.GetCtrl().GetLine(lineNo))
         indent = 0
         lstrip = 0
         for char in lineText:
             if char == '\t':
                 indent = indent + self.GetCtrl().GetIndent()
                 lstrip = lstrip + 1
             elif char in string.whitespace:
                 indent = indent + 1
                 lstrip = lstrip + 1
             else:
                 break
         if self.GetCtrl().GetUseTabs():
             indentText = (indent / self.GetCtrl().GetIndent()) * '\t' + (
                 indent % self.GetCtrl().GetIndent()) * ' '
         else:
             indentText = indent * ' '
         lineText = indentText + lineText[lstrip:] + '\n'
         newText = newText + lineText
     self._ReplaceSelectedLines(newText)
Пример #42
0
def most_common_genre(band_lst):
    dictionary = {}
    band_content = band_data_conversion(band_lst)
    band_lst = [Band(item) for item in band_content]
    for item in band_lst:
        if item.genre not in dictionary:
            dictionary[item.genre] = 0
        dictionary[item.genre] += 1
    key = dictionary.items()
    sort = sorted(key, key=lambda key: key[1], reverse=True)
    genres_lst = []
    string = ""
    top_score = sort[0][1]
    for item in sort:
        if item[1] == top_score and item[1] not in genres_lst:
            genres_lst.append(item)
    for item in genres_lst:
        string = string + item[0] + ", "
    string = string.rstrip(", ")
    if len(genres_lst) == 1:
        return "most common genre is " + string + "."
    else:
        return "most common genres are " + string + "."
Пример #43
0
 def precmd(self, line):
     """Handle alias expansion and ';;' separator."""
     if not line:
         return line
     args = string.split(line)
     while self.aliases.has_key(args[0]):
         line = self.aliases[args[0]]
         ii = 1
         for tmpArg in args[1:]:
             line = string.replace(line, "%" + str(ii), tmpArg)
             ii = ii + 1
         line = string.replace(line, "%*", string.join(args[1:], ' '))
         args = string.split(line)
     # split into ';;' separated commands
     # unless it's an alias command
     if args[0] != 'alias':
         marker = string.find(line, ';;')
         if marker >= 0:
             # queue up everything after marker
             next = string.lstrip(line[marker + 2:])
             self.cmdqueue.append(next)
             line = string.rstrip(line[:marker])
     return line
Пример #44
0
def find(string, isProp):
    if isProp:
        params = {
            'action': 'wbsearchentities',
            'language': 'en',
            'format': 'json',
            'type': 'property'
        }
    else:
        params = {
            'action': 'wbsearchentities',
            'language': 'en',
            'format': 'json'
        }

    url = 'https://www.wikidata.org/w/api.php'

    params['search'] = string.rstrip()
    json = requests.get(url, params).json()

    if len(json['search']) > 0:
        result = json['search'][0]
        return result['id']
def postprocess(output, paths=('', src_engine)):
    '''Process PyChecker output string.

       Adapted from Python Cookbook recipe at ActiveState
       <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/546532>.
    '''
    py = ''  # source file name
    src = []  # source lines
    ms = []  # non-OK'd warning messages
    for m in string.split(output, os.linesep):  # process each message
        m = string.rstrip(m)
        if m:  # only non-blank message
            s, okd = string.split(m, ':', 2), ''
            if len(s) > 2:  # file name, line number, rest
                if s[1] and s[1][0] in string.digits:  ## s[1].isdigit()
                    py, src = python_source(s[0], py, src, paths)
                    okd = pychecked(s[1], py, src, okd)
            if not okd:
                ms.append(m)
            elif OK_d:
                m = string.replace(m, src_engine_, '')
                print '%s - %s' % (m, okd)
    return string.join(ms, os.linesep)
Пример #46
0
def list_stations():
    body = ""
    dir_title = "Station list"
    btn_sel = "<img class=\"navbutton\" src=\"/images/btn-play.svg\"/>"
    btn_back = "<a href=\"javascript:rp_back()\"><img class=\"navbutton\" src=\"/images/btn-back.svg\"/></a>"

    body += """
  <div class="directorylisting">
   <div class="directoryentry">
    <div class="directorybutton">""" + btn_back + """</div>
    <div class="directorytext">""" + dir_title + """</div>
   </div>
   <div class="directoryentry"><hr/></div>
"""

    sl = open(radiopi_cfg.stationlist, "r")
    for line in sl:
        if line[0] != "#":
            line = string.rstrip(
                string.lstrip(line)
            )  # Remove leading and trailing whitespace (incl. newline)
            if string.find(line, "|") > 0:
                name, url = string.split(line, "|")
                station_link = "<a href=\"javascript:rp_station('" + url + "')\">"
                station_link += btn_sel + "</a>"
                body += """
   <div class="directoryentry">
    <div class="directorybutton">""" + station_link + """</div>
    <div class="directorytext">""" + name + """</div>
   </div>"""

    sl.close()

    body += """
  </div>
"""
    print_page("WebRadioPi", body, "webradiopi")
Пример #47
0
 def read(self, query_item):
     result = {}
     url = "%s?cmd=cddb+read+%s+%s&hello=%s+%s+%s+%s&proto=%d" % \
           (self.cddb_server, query_item['category'], query_item['disc_id'],
            self.user, self.host, self.app, self.version, self.protocol)
     response = urllib.urlopen(url)
     header = response.readline()
     try:
         self.code = string.atoi(string.split(header, ' ', 1)[0])
         if self.code == 210:
             re_indexed_key = re.compile("([^=0123456789]+)([0-9]+)=(.*)")
             re_key = re.compile("([^=]+)=(.*)")
             for line in response.readlines():
                 line = string.rstrip(line)
                 m = re_indexed_key.match(line)
                 if m:
                     (key, index, data) = m.groups()
                     if result.has_key(key):
                         i = string.atoi(index)
                         if len(result[key]) > i:
                             result[key][i] = result[key][i] + data
                         else:
                             result[key].append(data)
                     else:
                         result[key] = []
                         result[key].append(data)
                 else:
                     m = re_key.match(line)
                     if m:
                         (key, data) = m.groups()
                         if result.has_key(key):
                             result[key] = result[key] + data
                         else:
                             result[key] = data
     except:
         result = {}
     return result
Пример #48
0
    def get_dir(self, path, dest='', env='base'):
        '''
        Get a directory recursively from the salt-master
        '''
        # TODO: We need to get rid of using the string lib in here
        ret = []
        # Strip trailing slash
        path = string.rstrip(self._check_proto(path), '/')
        # Break up the path into a list containing the bottom-level directory
        # (the one being recursively copied) and the directories preceding it
        separated = string.rsplit(path, '/', 1)
        if len(separated) != 2:
            # No slashes in path. (This means all files in env will be copied)
            prefix = ''
        else:
            prefix = separated[0]

        # Copy files from master
        for fn_ in self.file_list(env):
            if fn_.startswith(path):
                # Remove the leading directories from path to derive
                # the relative path on the minion.
                minion_relpath = string.lstrip(fn_[len(prefix):], '/')
                ret.append(
                    self.get_file('salt://{0}'.format(fn_),
                                  '%s/%s' % (dest, minion_relpath), True, env))
        # Replicate empty dirs from master
        for fn_ in self.file_list_emptydirs(env):
            if fn_.startswith(path):
                # Remove the leading directories from path to derive
                # the relative path on the minion.
                minion_relpath = string.lstrip(fn_[len(prefix):], '/')
                minion_mkdir = '%s/%s' % (dest, minion_relpath)
                os.makedirs(minion_mkdir)
                ret.append(minion_mkdir)
        ret.sort()
        return ret
Пример #49
0
    def _setup_common(self):
        cmd_strs = []

        if self.title:
            cmd_strs.append('set title "%s"\n' % self.title)
        for dim in ('x', 'y', 'z'):
            for place in ('', '2'):
                axis_idx = (dim, place)
                if self.axis[axis_idx].set:
                    if self.axis[axis_idx].idx_list != None:
                        tic_str = "("
                        for tic in self.axis[axis_idx].idx_list:
                            tic_str += str(tic) + ","
                        tic_str = rstrip(tic_str, ',') + ')'
                        cmd_strs.append("set %s%stics %s\n" %
                                        (dim, place, tic_str))
                    else:
                        cmd_strs.append("set %s%srange [%d:%d]\n" % (dim, place, self.axis[axis_idx].min, \
                                                                     self.axis[axis_idx].max))
                        cmd_strs.append("set %s%stics autofreq\n" %
                                        (dim, place))
                if self.label.has_key(axis_idx):
                    cmd_strs.append(
                        'set %s%slabel "%s"\n' %
                        (dim, place, mk_enhanced_gp(self.label[axis_idx])))
                if self.logscale.has_key(axis_idx):
                    cmd_strs.append("set logscale %s%s %d\n" %
                                    (dim, place, self.logscale[axis_idx]))

            if self.axis_cnt[dim] > 1:
                cmd_strs.append("set %stics nomirror\n" % dim)
                cmd_strs.append("set %s2tics nomirror\n" % dim)

        for opt in self.set_opts:
            cmd_strs.append("set %s\n" % opt)

        return cmd_strs
Пример #50
0
 def process_comment_body(self, type, token, start, end, line):
     if type == tokenize.COMMENT:
         if start[1] != self.comment_start[1]:
             self.warning(
                 start,
                 "comment line should be aligned with marker"
                 )
         line = string.rstrip(token)
         if line == "##":
             # handle module comments (experimental)
             # FIXME: add more consistency checks?
             if self.stack[0].find("info") is not None:
                 self.warning(
                     self.comment_start,
                     "multiple module comments are not allowed"
                     )
                 # FIXME: ignore additional comments?
             self.process_subject_info(None, self.stack[0])
             return self.look_for_pythondoc
         elif line[:2] == "# ":
             line = line[2:]
         elif line[:1] == "#":
             line = line[1:]
         self.comment.append(line)
     else:
         if not self.comment:
             self.warning(
                 self.comment_start,
                 "found pythondoc marker but no comment body"
                 )
             return self.look_for_pythondoc
         self.subject_start = None
         self.subject = []
         if type != tokenize.NL:
             return self.process_subject(type, token, start, end, line)
         return self.process_subject # end of comment
     return self.process_comment_body
Пример #51
0
def svm_test(test_data,
             model_file="/tmp/svm.model",
             in_file="/tmp/svm.in",
             out_file="/tmp/svm.out"):
    """@param test_data[ pos/neg ][ sample_index, data ]
    @param model_file is an input
    @param in_file is a debugging output.
    @return [accuracy, array(results)]."""

    from subprocess import Popen, PIPE
    import re
    from threading import Timer
    from string import rstrip
    assert_file_exists(model_file)

    # test classification model
    svm_light_format(test_data, in_file)
    p = Popen("svm_classify %s %s %s" % (in_file, model_file, out_file),
              shell=True,
              stdout=PIPE)
    # kill the process if no results in 5 seconds
    Timer(5, my_kill, [p.pid]).start()
    output = p.communicate()[0]
    # parse out results
    match = re.search("Accuracy on test set: (.*)%", output)
    if match:
        accuracy = float(match.group(1)) / 100.0
        f = open(out_file, 'r')
        results = f.readlines()  # this will be a float for each test vec
        f.close()
        # remove trailing newline
        for r in range(len(results)):
            results[r] = float(rstrip(results[r]))
        return [accuracy, array(results)]
    else:
        print "svm_test: failure!"
        return [-0.000001, []]  # store nonsense value if svm failed
def add_word(word, glove_dict, words_set, not_found_set, word_origin):
    possessive_suffix = "'s"

    def remove_suffix(w):
        # remove question mark in the end
        if len(w) > 0 and w[-1] == '?':
            w = w[:-1]
        # remove possible possessive form
        if w.endswith(possessive_suffix):
            w = w[:-len(possessive_suffix)]
        return w

    def add_list(original_word, lst):
        found = False
        if original_word not in lst:
            lst.append(original_word)
        for w in lst:
            if w in glove_dict:
                words_set.add(w)
                found = True
        if not found:
            if raw_word not in not_found_set and verbose:
                print('{}:{} is not in glove model'.format(
                    word_origin, raw_word))
            not_found_set.add(raw_word)

    word = string.lower(word.strip())
    word_lst = [word]
    # word_no_suffix
    word_lst.append(remove_suffix(word))
    # strip punctuation
    word_stripped = string.lstrip(string.rstrip(word, punctuation),
                                  punctuation)
    word_lst.append(word_stripped)
    word_lst.append(remove_suffix(word_stripped))

    add_list(word, word_lst)
Пример #53
0
def _load_locale(fname):
    """
    open a locale file and set the internal locale

    fname - string, the name of the locale file to load
    """
    global _locale
    try:
        file = open(fname, 'r')
    except:
        gdal.Debug("nls", 'load locale failed')
        _locale = None
        return

    loc_dict = {}
    lines = file.readlines()
    for line in lines:
        if line[0] == '#':
            continue
        try:
            if string.find(line, '=') >= 0:
                key, value = string.split(line, '=')
                #take care with CR-LF
                #does this work on UNIX?
                value = string.rstrip(value)
                value = string.replace(value, '\\n', '\n')
                loc_dict[key] = value
        except:
            gdal.Debug("nls", 'an exception occurred in nls.py')
            pass

    file.close()
    _locale = loc_dict
    try:
        gdal.Debug("nls", 'locale changed to %s' % loc_dict['locale-name'])
    except:
        gdal.Debug("nls", 'unknown locale loaded')
def help_func(): 
    help_lines = []
    offset = 0
    fh_help = open('txt2html.txt')
    for line in fh_help.readlines():
        help_lines.append(string.rstrip(line))
    s = curses.newwin(19, 77, 3, 1)
    s.box()
    num_lines = len(help_lines)
    end = 0
    while not end:
        for i in range(1,18):
            if i+offset < num_lines:
                line = string.ljust(help_lines[i+offset],74)[:74]
            else:
                line = " "*74
                end = 1
            if i<3 and offset>0: s.addstr(i, 2, line, curses.A_BOLD) 
            else: s.addstr(i, 2, line, curses.A_NORMAL)
        s.refresh()
        c = s.getch()
        offset = offset+15
    s.erase()
    return CONTINUE
Пример #55
0
    def translate_path(self, path):
        """Translate a /-separated PATH to the local filename syntax.

        Components that mean special things to the local file system
        (e.g. drive or directory names) are ignored.  (XXX They should
        probably be diagnosed.)

        """
        # abandon query parameters
        parsed = urlparse.urlparse(path)
        host = parsed[1]
        if (host.startswith("localhost")): host = ""
        path = parsed[2]
        path = posixpath.normpath(urllib.unquote(path))
        words = path.split('/')
        words = filter(None, words)
        path = string.rstrip(self.get_root() + host, "/")
        for word in words:
            drive, word = os.path.splitdrive(word)
            head, word = os.path.split(word)
            if word in (os.curdir, os.pardir): continue
            path = os.path.join(path, word)
        print "path=%s" % (path)
        return path
Пример #56
0
    def read(self, file):
        """
		reads in a resfile
		"""

        try:
            FILE = open(file)
        except:
            print "unable to open file"
            sys.exit()

        lines = FILE.readlines()

        for i in range(26, len(lines)):
            line = string.rstrip(lines[i])
            cols = line.split()
            self.native.append("G")
            self.type.append(cols[3])
            self.allowed.append("")
            if len(cols) == 5:
                iall = len(self.allowed) - 1
                self.allowed[iall] = cols[4]

        FILE.close()
Пример #57
0
 def m6_decrypt(self):
     if os.path.isfile(CACHE_FILE):
         f = open(CACHE_FILE, 'r')
         try:
             self.catalog = pickle.load(f)
         except:
             self.catalog = None
         f.close()
     else:
         self.catalog = None
     if self.catalog is None or self.catalog.is_old():
         data = urllib.urlopen(M6_XML_CATALOG).read()
         cipher = DES.new(base64.b64decode('RWxGc2cuT3Q='), DES.MODE_ECB)
         catalog_xml = cipher.decrypt(base64.standard_b64decode(data))
         catalog_xml = string.rstrip(
             string.split(catalog_xml, '</template_exchange_WEB>')
             [0]) + '</template_exchange_WEB>'
         self.catalog = Catalog(catalog_xml)
         if not os.path.exists(CACHE_DIR):
             os.mkdir(CACHE_DIR)
         f = open(CACHE_FILE, 'w')
         pickle.dump(self.catalog, f)
         f.close()
     return self.catalog.catalog
Пример #58
0
def rewrite_mpi_header(fp_in, fp_out, new_defs):
    """rewrite_mpi_header (fp_in, fp_out, fp_defs)

    Rewrites the '#define' statements in an MPI header file to use
    declared external constants. 'fp_in' is a file pointer to the
    original header file, 'fp_out' to the desired output file,
    and 'fp_defs' to store the defining declarations."""

    # Stores new declarations.
    # Use a hash to prevent duplicate declarations.
    new_externs = {}
    for line in fp_in.xreadlines():
        line = string.rstrip(line)

        m = match_mpi_def(line)
        if m:
            new_externs[m['id']] = "extern const int %s; /* == %s @MOD@ */\n" \
                                   % (m['id'], m['val'])
            write_line(fp_out, "/* Replacing #define %s */" % m['id'])
            new_defs[m['id']] = "const int %s = %s;\n" % (m['id'], m['val'])
            continue

        m = match_mpi_typed_def(line)
        if m:
            new_externs[m['id']] = "extern const %s %s; /* == %s @MOD@ */\n" \
                                   % (m['type-id'], m['id'], m['val'])
            write_line(fp_out, "/* Replacing #define %s */" % m['id'])
            new_defs[m['id']] = "const %s %s = %s;\n" \
                                % (m['type-id'], m['id'], m['val'])
            continue

        # No match; just dump the declaration as-is.
        write_line(fp_out, line)

    # Dump all new 'extern' declarations.
    fp_out.writelines(new_externs.values())
Пример #59
0
    def get_file_list(self):
        if not self.fn:
            return []

        if self.isdeb:
            f = open(self.fn, "rb")
            ar = arfile.ArFile(f)
            tarStream = ar.open("data.tar.gz")
            tarf = tarfile.open("data.tar.gz", "r", tarStream)
            self.file_list = tarf.getnames()
            f.close()
        else:
            f = os.popen(
                "tar xfzO " + self.fn + " './data.tar.gz' | tar tfz -", "r")
            while 1:
                line = f.readline()
                if not line: break
                self.file_list.append(string.rstrip(line))
            f.close()

        # Make sure that filelist has consistent format regardless of tar version
        self.file_list = map(lambda a: ["./", ""][a.startswith("./")] + a,
                             self.file_list)
        return self.file_list
Пример #60
0
 def _output(self):
     data = self.data
     self.text = ''
     for field in self.format:
         type = field[0]
         if type == "'":
             self.text = self.text + field[1]
         elif type == 'X':
             self.text = self.text + field[1]*' '
         else: # fields that use input data
             length = field[1]
             if len(field) > 2: fraction = field[2]
             value = data[0]
             data = data[1:]
             if type == 'A':
                 self.text = self.text + (value+length*' ')[:length]
             else: # numeric fields
                 if value is None:
                     s = ''
                 elif type == 'I':
                     s = `value`
                 elif type == 'D':
                     s = ('%'+`length`+'.'+`fraction`+'e') % value
                     n = string.find(s, 'e')
                     s = s[:n] + 'D' + s[n+1:]
                 elif type == 'E':
                     s = ('%'+`length`+'.'+`fraction`+'e') % value
                 elif type == 'F':
                     s = ('%'+`length`+'.'+`fraction`+'f') % value
                 elif type == 'G':
                     s = ('%'+`length`+'.'+`fraction`+'g') % value
                 else:
                     raise ValueError('Not yet implemented')
                 s = string.upper(s)
                 self.text = self.text + ((length*' ')+s)[-length:]
     self.text = string.rstrip(self.text)