Esempio n. 1
0
    def __init__(self, fields):

	if len(fields) != 6:
	    raise "Netstat Parse Error", "tcp class requires 6 fields, not %d" % (len(fields))

	# local address
	dot = string.rfind(fields[3], '.')
	if dot == -1:
	    raise "Netstat Parse Error", "tcp, expected '<ip>.<port>', found '%s'" % (fields[3])
	self.local_addr_ip = fields[3][:dot]
	self.local_addr_port = fields[3][dot+1:]

	# remote address
	dot = string.rfind(fields[4], '.')
	if dot == -1:
	    raise "Netstat Parse Error", "tcp, expected '<ip>.<port>', found '%s'" % (fields[4])
	self.remote_addr_ip = fields[4][:dot]
	self.remote_addr_port = fields[4][dot+1:]

	# send queue size
	self.send_queue = int(fields[2])

	# receive queue size
	self.receive_queue = int(fields[1])

	# state
	self.state = fields[5]
Esempio n. 2
0
def process_contents(contents):
	global includePaths;
	includeFind = re.compile(r"^\s*#\s*include\s*\"([\w./\\]+)\"\s*$", re.MULTILINE)
	matchCount = 0
	pos = 0
	while (True):
		match = includeFind.search(contents, pos)
		if match == None:
			break;
		
		# print match.group(0), 
		
		value = match.group(1);
		valueStart = match.start(1)
		valueEnd = match.end(1)
		
		slash = string.rfind(value, "/");
		if slash != -1:
			value = value[slash + 1:]

		slash = string.rfind(value, "\\");
		if slash != -1:
			value = value[slash + 1:]
		
		if value in includePaths:
			contents = contents[:match.start(1)] + includePaths[value] + contents[match.end(1):];
			pos = match.start(1) + len(includePaths[value]) + 1;
			matchCount = matchCount + 1
		else:
			pos = match.end(0)
	
	# print contents;
	return (contents, matchCount);
Esempio n. 3
0
def check_name(name, is_name_ok):
  """ entfernt Umlaute etc. aus Dateinamen """
  try:
    name = unicode(name, 'utf-8')
  except:
    pass
  name = name[max(string.rfind(name,'/'),
                  string.rfind(name,'\\'),
                  string.rfind(name,':')
                  )+1:]
  name = string.replace(name, u"'", u'_')
  name = string.replace(name, u'ä', u'ae')
  name = string.replace(name, u'ö', u'oe')
  name = string.replace(name, u'ü', u'ue')
  name = string.replace(name, u'Ä', u'Ae')
  name = string.replace(name, u'Ö', u'Oe')
  name = string.replace(name, u'Ü', u'Ue')
  name = string.replace(name, u'ß', u'ss')
  bad_chars  = ' ,;()[]{}*"#%+~!'
  good_chars = '________________'
  TRANSMAP = string.maketrans(bad_chars, good_chars)
  name = name.encode('iso-8859-1')
  name = string.translate(name, TRANSMAP)
  if is_name_ok:
    return name
  html = '.html'
  if name[-5:] != html :
    name += html
  return name
Esempio n. 4
0
def time_string_to_core(time):
    try:
        time=float(time)
        return time
    except:
        time=str(time)
    if(string.lower(time[-1])=='s'):
        return float(time[:-1])
    if(string.lower(time[-1])=='m'):
        return float(time[:-1])*60
    if(string.lower(time[-1])=='h'):
        return float(time[:-1])*60*60
    ftime=0.0
    multiple=1.0
    l=string.rfind(time, ':')
    while (l!=-1):
        n=float(time[l+1:])
        ftime=ftime+(n*multiple)
        time=time[:l]
        multiple=multiple*60.0
        if (multiple>3600):
            return None
        l=string.rfind(time, ':')
    if (len(time)):
        ftime=ftime+(float(time)*multiple)
    return ftime
Esempio n. 5
0
        def callback(match):
            if not any([match.group(0).startswith('/%s' % s) for s in self.start_markers]):
                return match.group(0)
            start, end = match.start(), match.end()
            start_line = string.count(text, '\n', 0, start)
            start_col = start - string.rfind(text, '\n', 0, start) - 1
            if start_col < 0:
                start_col = 0
            end_line = string.count(text, '\n', 0, end)
            end_col = end - string.rfind(text, '\n', 0, end) - 1
            offset_line = start_line + 1
            raw_text = match.group(0)
            # Compute offset column.
            idx = raw_text.find('\n') + 1
            for offset_col in xrange(idx, len(raw_text)):
                if raw_text[offset_col] not in [' ', '\t']:
                    break
            if raw_text[offset_col] == '*':
                offset_col += 1
                if raw_text[offset_col] in [' ', '\t']:
                    offset_col += 1
            offset_col -= idx

            proc_text = self._stripText(raw_text, offset_col)
            comments.append(Comment(start_line, start_col, start, end_line, end_col, end,
                                    offset_line, offset_col, proc_text, raw_text))
            return match.group(0)
Esempio n. 6
0
def get_extension (path):
	dirsep = string.rfind (path, '/')
	dotsep = string.rfind (path, '.')
	if dotsep > dirsep:
		return path[dotsep+1:]
	else:
		return ''
Esempio n. 7
0
    def load_directory(self, path):

        resource.path.append(path)
        print resource.path
        osPath = ''
        for _ in resource.path:
            osPath += _
            osPath += os.sep
        osPath = osPath[:-1]
        print osPath
        dirList = os.listdir(osPath)
            
        print "Entering directory %s.\n" % path
        resource.reindex()
        for fname in dirList:
            ext = ''
            print fname
            if string.rfind(fname,".") != -1:
                name = fname[:string.rfind(fname,".")]
                ext = fname[string.rfind(fname,"."):]
            else:
                name = fname
            print "name = %s" % name
            print "ext = %s" % ext
            if ( ext ) and (ext in self.filetypes):
                self.load_file(name, ext, osPath)
            if not ext:
                self.load_directory(name)
        print "Leaving directory %s.\n" % resource.path.pop()
Esempio n. 8
0
def get_actions(request, user_perms, item_container):
  from django.template.loader import get_template
  from django.template import Context
  t = get_template('app/file/manage_options.html')
  nPos = max ( string.rfind ( request.path, '/add/' ),
               string.rfind ( request.path, '/edit/' ),
             )
  if nPos > -1 :
    path = request.path[:nPos]
    show_mode = True
  else :
    path = request.path
    show_mode = False
  if ( string.find(request.path, '/add/') >= 0 ) :
    edit_mode = False
  elif ( string.find(request.path, '/edit/') >= 0 ) :
    edit_mode = False
  else :
    edit_mode = request.user.is_authenticated()
  c = Context ( { 'authenticated'  : request.user.is_authenticated(),
                  'show_mode'      : show_mode,
                  'edit_mode'      : edit_mode,
                  'user_perms'     : user_perms,
                  'user_name'      : request.user,
                  'path'           : get_site_url(item_container, item_container.item.name), } )
  return t.render ( c)
def get_docs(code):
    # Get list of links that match
    # <a href="(/people/.*?\.(pdf|doc|jpg|docx|zip)?)"
    if code:
        docs = re.findall(LOCAL_DOC_TAG_PATTERN, code)
        for doc in docs:
            # Clean up URL
            doc_path = doc[0].replace(' ', '%20')
            doc_url = 'http://www.sjsu.edu' + doc_path
            output_dir = fix_name(doc_path[1:string.rfind(doc_path, '/')])
            # Change file name back to match link
            file_name = doc_path[string.rfind(doc_path, '/') + 1:].replace('%20', ' ')
            logging.info("reading file: " + file_name)
            # logging.info("urlencoded name: " + urllib.quote_plus(file_name))
            # print "encoded name: " + file_name.encode('utf-8')
            
            # Create directory if necessary
            if not os.path.exists(output_dir):
                logging.info( "Creating dir " + output_dir)
                os.makedirs(output_dir)
                    
            try:
                remote = urllib2.urlopen(doc_url)
                # output_path = output_dir + '/' + file_name)
                output_path = output_dir + '/' + urllib.quote_plus(file_name)
                output_path = output_path.replace('+', ' ')
                logging.info( "Writing " + output_path )
                local_doc = open(output_path, 'w+')
                local_doc.write(remote.read())
                local_doc.close()
            except Exception as err:
                error_message = str(err.args)
                logging.error( "Error: " + error_message + ' in ' + file_name )
    else:
        print "code is empty"
Esempio n. 10
0
def main(argv):
  
	handle = ilu.LookupObject("relocate-server", "dummy", relocate.Foo)
	handle_sbh = handle.IluSBH()
	print 'sbh of handle is', handle_sbh
	cinfo = string.rfind(handle_sbh, ';')
	if cinfo < 0:
		print "can't find cinfo of dummy object in sbh <" + handle_sbh + "!"
		sys.exit(1)
	handle_cinfo = handle_sbh[cinfo+1:]
	if not handle:
		print "Can't find dummy"
		sys.exit(2)
	newobj = handle.dummy()
	newobj_sbh = newobj.IluSBH()
	print 'sbh of new obj is', newobj_sbh
	cinfo = string.rfind(newobj_sbh, ';')
	if cinfo < 0:
		print "can't find cinfo of dummy object in sbh <" + newobj_sbh + "!"
		sys.exit(1)
	newobj_cinfo = newobj_sbh[cinfo+1:]
	if (newobj_cinfo != handle_cinfo):
		print 'different cinfos!'
		sys.exit(1)
	sys.exit(0)
Esempio n. 11
0
def nmapScanUpload():
    print 'Nmap Scan & Upload'
    ipToScan = getPrivateIP()
    #Generates the ip address that can be fed into the nmap command. This simple replaces the 4th octet with a 0 and appends a /24 to scan the class C subnet.
    ipToScan = ipToScan[:string.rfind(ipToScan, '.') + 1] + '0/24'
    index = string.rfind(ipToScan, '.')
    print index
    print ipToScan
    #Starts the bridge interface and sets the display.
    nmapScan = subprocess.Popen('nmap ' + ipToScan + ' -oN nmapoutput.txt', shell=True, stderr=PIPE)
    lcd.backlight(lcd.GREEN)
    lcd.clear()
    lcd.message("Running\nNmap Scan")
    error = nmapScan.communicate()
    errorCheck(error, 'NMAP\nFailed', 'Scan\nComplete')
    lcd.clear()
    lcd.message("Scan\nComplete")
    sleep(1)
    lcd.clear()
    lcd.message("Uploading\nFile")
    #Starts FTP session
    ftpSession = ftplib.FTP(ftp_server, ftp_username, ftp_password)
    ftpSession.cwd('nmap')
    #Opens file to be uploaded
    file = open('nmapoutput.txt', 'rb')
    #Uploads the File
    ftpSession.storbinary('STOR nmapoutput.txt', file)
    file.close()
    ftpSession.quit()
    lcd.clear()
    lcd.message("Upload\nSuccessful")
    sleep(3)
    funtionBreak()
Esempio n. 12
0
    def __init__(self,similarity_table,unordered_filelist, list_of_files):

        self._list_of_files = list_of_files

        #given as parameter, this is result of document representation
        self.table=similarity_table

        #the clusters are represented as regular expressions
        #final regular expression is the string representation of cluster_list
        self.cluster_list = list()

        #a cluster is represented as [[1,2],3]
        #all the hierarchy that group at a certain level is maintained in this cluster_dict
        #this keeps height as key and cluster formed at that hieght as its value
        self.cluster_dict = dict()

        #contains the indexed filenames
        #each filename is given a particular id
        self.unordered_filelist = unordered_filelist

        #contains the classes we have in our dataset
        self.classes = list()
        for name in self.unordered_filelist:
            cls = name[:rfind(name, "/")]
            cls = cls[rfind(cls, "/")+1:]
            if cls not in self.classes:
                self.classes.append(cls)
        self.class_count = len(self.classes)
Esempio n. 13
0
	def __parseCTypedefMemberdef(self, node):
		name = node.find('./name').text
		definition = node.find('./definition').text
		if string.find(definition, 'typedef ') == 0:
			definition = definition[8 :]
		if string.rfind(name, 'Cb') == len(name) - 2:
			pos = string.find(definition, "(*")
			if pos == -1:
				return None
			returntype = definition[0:pos].strip()
			if returntype != "void":
				return None
			returnarg = CArgument(returntype, enums = self.enums, structs = self.__structs)
			definition = definition[pos + 2 :]
			pos = string.find(definition, "(")
			definition = definition[pos + 1 : -1]
			argslist = CArgumentsList()
			for argdef in definition.split(', '):
				argType = ''
				starPos = string.rfind(argdef, '*')
				spacePos = string.rfind(argdef, ' ')
				if starPos != -1:
					argType = argdef[0 : starPos + 1]
					argName = argdef[starPos + 1 :]
				elif spacePos != -1:
					argType = argdef[0 : spacePos]
					argName = argdef[spacePos + 1 :]
				argslist.addArgument(CArgument(argType, argName, self.enums, self.__structs))
			if len(argslist) > 0:
				paramdescs = node.findall("detaileddescription/para/parameterlist[@kind='param']/parameteritem")
				if paramdescs:
					for arg in argslist.arguments:
						for paramdesc in paramdescs:
							if arg.name == paramdesc.find('./parameternamelist').find('./parametername').text:
								arg.description = self.__cleanDescription(paramdesc.find('./parameterdescription'))
					missingDocWarning = ''
					for arg in argslist.arguments:
						if arg.description == None:
							missingDocWarning += "\t'" + arg.name + "' parameter not documented\n";
					if missingDocWarning != '':
						print(name + ":\n" + missingDocWarning)
			f = CEvent(name, returnarg, argslist)
			deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
			if deprecatedNode is not None:
				f.deprecated = True
			f.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
			f.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
			return f
		else:
			pos = string.rfind(definition, " " + name)
			if pos != -1:
				definition = definition[0 : pos]
			td = CTypedef(name, definition)
			deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
			if deprecatedNode is not None:
				td.deprecated = True
			td.briefDescription = ''.join(node.find('./briefdescription').itertext()).strip()
			td.detailedDescription = self.__cleanDescription(node.find('./detaileddescription'))
			return td
		return None
Esempio n. 14
0
    def __init__(self, fields):

	if len(fields) != 6:
	    raise "Netstat Parse Error", "tcp class requires 6 fields, not %d" % (len(fields))

	# protcol
	self.protocol = fields[0]

	# receive queue size
	self.receive_queue = int(fields[1])

	# send queue size
	self.send_queue = int(fields[2])

	# local address
	dot = string.rfind(fields[3], '.')
	if dot == -1:
	    raise "Netstat Parse Error", "tcp, expected '<ip>.<port>', found '%s'" % (fields[0])
	self.local_addr_ip = fields[3][:dot]
	self.local_addr_port = fields[3][dot+1:]
	# CM 2004-05-16: convert '*' to standard wildcard address '0.0.0.0'
	if self.local_addr_ip == '*':
	    self.local_addr_ip = '0.0.0.0'

	# remote address
	dot = string.rfind(fields[4], '.')
	if dot == -1:
	    raise "Netstat Parse Error", "tcp, expected '<ip>.<port>', found '%s'" % (fields[1])
	self.remote_addr_ip = fields[4][:dot]
	self.remote_addr_port = fields[4][dot+1:]

	# state
	self.state = fields[5]
Esempio n. 15
0
    def addEvent(self, event):
        title = event.name 
        try:
            content = getDescription(event.link)
        except Exception:
            pass
        where = event.place + ", Windsor, Ontario"

        start_time = [0, 0, 0, 0, 0, 0, 0, 0, -1]
        start_time[0] = int(event.date[string.rfind(event.date,'/')+1:])
        start_time[1] = int(event.date[:string.find(event.date,'/')])
        start_time[2] = int(event.date[string.find(event.date,'/')+1:string.rfind(event.date,'/')])
        start_time[3] = int(event.time[:string.find(event.time,':')])
        if (event.time[-2:] == "PM"):
            start_time[3] += 12
        start_time[4] = int(event.time[string.find(event.time,':')+1:-3])
        
        end_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.localtime(time.mktime(tuple(start_time)) + 7200 + DEFAULT_TIME_OFFSET * 3600))
        start_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.localtime(time.mktime(tuple(start_time)) + DEFAULT_TIME_OFFSET * 3600))

        cevent = gdata.calendar.CalendarEventEntry()
        cevent.title = atom.Title(text=title)
        cevent.content = atom.Content(text=content)
        cevent.where.append(gdata.calendar.Where(value_string=where))
        cevent.when.append(gdata.calendar.When(start_time=start_time, end_time=end_time))

        feed = self.chrysler_cal.GetAlternateLink().href[21:]
        new_event = self.cal_client.InsertEvent(cevent, feed)
        print "Added event" , title , "at" , event.time , event.date
Esempio n. 16
0
def item_rename(request, op):
  """ Namen des Objektes aendern """
  item_container = get_my_item_container(request, op)
  if item_container.is_changeable:
    if request.GET.has_key('new_name') :
      is_folderish = item_container.item.app.is_folderish
      app_name = item_container.item.app.name
      # --- muss eventuell .html ergaenzt werden?
      check_this_name = is_folderish or is_file_type(app_name)
      new_name = check_name(request.GET['new_name'], check_this_name)
      # --- Gibt es schon in Objekt mit dem gewuenschten Namen?
      parent = item_container.get_parent()
      if parent != None and exist_item(parent, new_name):
        transaction.commit()
        return show_error_object_exist(request, item_container, new_name)
      if is_file_type(app_name):
        old_path = get_file_path(item_container)
        new_path = old_path[:string.rfind(old_path,'/')+1] + new_name
        os.rename(old_path, new_path)
      # --- Aenderungen vornehmen
      if is_folderish:
        n_pos = 1 + string.rfind(item_container.container.path[:-1], '/')
        new_path = item_container.container.path[:n_pos] + new_name +'/'
        old_path = item_container.container.path
        change_container_path(old_path, new_path)
      item_container.item.name = new_name
      item_container.item.save()
  transaction.commit()
  parent = item_container.get_parent()
  path = get_site_url(parent, 'index.html/manage/')
  return HttpResponseRedirect(path)
Esempio n. 17
0
	def __init__(self, file, timestamp=None, size=None, ext=1):
		File.__init__(self, file, timestamp, size)
		self.list	= []

		# Remove ext count extensions, the default is 1, but for
		# rolls we remove two (.diskN.iso)
		
		s = self.filename	 # name-ver-rpmver.arch.rpm
		for x in range(0, ext):
			i = string.rfind(s, ".")
			s = self.filename[:i]
    
		i = string.rfind(s, ".")
		self.list.append(s[i+1:])	# get architecture string
		s = self.filename[:i]

		i = string.rfind(s, "-")	# get RPM version string
		self.release = s[i+1:]
		self.list.append(self.versionList(s[i+1:]))
		s = self.filename[:i]

		i = string.rfind(s, "-")	# get software version string
		self.version = s[i+1:]
		self.list.append(self.versionList(s[i+1:]))

	
		self.list.append(self.filename[:i]) # get package name
	
		self.list.reverse()		# we built the list backwards
Esempio n. 18
0
 def garbicat(item0):
     p1 = string.find(item0, 'href="http://dmoz.org/')
     p2 = string.find(item0, '">')
     p3 = string.find(item0, '</a>')
     p4 = string.rfind(item0, '">')
     p5 = string.rfind(item0, '</a>')
     return item0[p1+21:p2], item0[p2+2:p3], item0[p4+2:p5]
Esempio n. 19
0
def manage_addExtImage(
    self,
    id="",
    title="",
    descr="",
    file="",
    preview="",
    content_type="",
    create_prev=NO_PREVIEW,
    maxx="",
    maxy="",
    ratio=0,
    permission_check=0,
    redirect_default_view=0,
    REQUEST=None,
):
    """ Add an ExtImage to a folder. """
    if not id and getattr(file, "filename", None) is not None:
        # generate id from filename and make sure, it has no 'bad' chars
        id = file.filename
        id = id[max(string.rfind(id, "/"), string.rfind(id, "\\"), string.rfind(id, ":")) + 1 :]
        title = title or id
        id = normalize_id(id)
    tempExtImage = ExtImage(id, title, descr, permission_check, redirect_default_view)
    self._setObject(id, tempExtImage)
    if file != "":
        self._getOb(id).manage_file_upload(file, content_type, 0, create_prev, maxx, maxy, ratio)
    if preview != "" and (create_prev == UPLOAD_NORESIZE or create_prev == UPLOAD_RESIZE):
        self._getOb(id).manage_file_upload(preview, "", 1, create_prev, maxx, maxy, ratio)
    if REQUEST is not None:
        return self.manage_main(self, REQUEST, update_menu=0)
    return id
Esempio n. 20
0
File: io.py Progetto: eddienko/SamPy
def rename(source, dest):
    """
Renames files specified by UNIX inpattern to those specified by UNIX
outpattern.  Can only handle a single '*' in the two patterns!!!

Usage:   rename (source, dest)     e.g., rename('*.txt', '*.c')
"""
    infiles = glob.glob(source)
    outfiles = []
    incutindex = string.index(source, "*")
    outcutindex = string.index(source, "*")
    findpattern1 = source[0:incutindex]
    findpattern2 = source[incutindex + 1 :]
    replpattern1 = dest[0:incutindex]
    replpattern2 = dest[incutindex + 1 :]
    for fname in infiles:
        if incutindex > 0:
            newname = re.sub(findpattern1, replpattern1, fname, 1)
        if outcutindex < len(dest) - 1:
            if incutindex > 0:
                lastone = string.rfind(newname, replpattern2)
                newname = newname[0:lastone] + re.sub(findpattern2, replpattern2, fname[lastone:], 1)
            else:
                lastone = string.rfind(fname, findpattern2)
                if lastone <> -1:
                    newname = fname[0:lastone]
                    newname = newname + re.sub(findpattern2, replpattern2, fname[lastone:], 1)
        print fname, newname
        os.rename(fname, newname)
    return
Esempio n. 21
0
    def __init__(self, *args):
        c = len(args)
        if c == 1:
            name, val = None, args[0]
        elif c == 2:
            name, val = args[0], args[1]
        else:
            raise ValueError, "Invalid arguments"

        h = {"Content-Type": {"_v": ""}, "Content-Transfer-Encoding": {"_v": ""}, "Content-Disposition": {"_v": ""}}
        dt = type(val)
        b = t = None

        if dt == DictType:
            t = 1
            b = self.boundary()
            d = []
            h["Content-Type"]["_v"] = "multipart/form-data; boundary=%s" % b
            for n, v in val.items():
                d.append(MultiPart(n, v))

        elif (dt == ListType) or (dt == TupleType):
            raise ValueError, "Sorry, nested multipart is not done yet!"

        elif dt == FileType or hasattr(val, "read"):
            if hasattr(val, "name"):
                fn = gsub("\\\\", "/", val.name)
                fn = fn[(rfind(fn, "/") + 1) :]
                ex = fn[(rfind(fn, ".") + 1) :]
                if self._extmap.has_key(ex):
                    ct = self._extmap[ex]
                else:
                    ct = self._extmap[""]
            else:
                fn = ""
                ct = self._extmap[None]
            if self._encmap.has_key(ct):
                ce = self._encmap[ct]
            else:
                ce = ""

            h["Content-Disposition"]["_v"] = "form-data"
            h["Content-Disposition"]["name"] = '"%s"' % name
            h["Content-Disposition"]["filename"] = '"%s"' % fn
            h["Content-Transfer-Encoding"]["_v"] = ce
            h["Content-Type"]["_v"] = ct
            d = []
            l = val.read(8192)
            while l:
                d.append(l)
                l = val.read(8192)
        else:
            h["Content-Disposition"]["_v"] = "form-data"
            h["Content-Disposition"]["name"] = '"%s"' % name
            d = [str(val)]

        self._headers = h
        self._data = d
        self._boundary = b
        self._top = t
Esempio n. 22
0
def renamePicInPage(fname, order=1):

    parser = pageParser()
    f = open(fname, 'r')
    parser.reset()
    parser.feed(f.read())
    parser.close()
    f.close()

    n = order
    for l in parser.links:

        if string.find(l, 'http://') >= 0:
            l = l[string.rfind(l, '/') : ]
            name = fname[ : string.rfind(fname, '.')]
            l = name + '_files' + l
            
        if os.access(l, os.F_OK):
            pos = string.rfind(l, '/')
            path = l[:pos]
            pos = string.rfind(l, '.')
            ext = l[pos:]
            name = '%s/p%03d%s' % (path, n, ext)
            n += 1
            print l, '-->', name
            os.rename(l, name)
Esempio n. 23
0
    def uploadZip(self, file='', REQUEST=None, **kwargs):
        """
        Expand a zipfile into a number of Photos.
        Go through the zipfile and for each file create a I{Naaya Photo} object.
        """
        if REQUEST is not None:
            schema_raw_data = dict(REQUEST.form)
        else:
            schema_raw_data = kwargs

        err = ''
        title = None
        if 'title' in schema_raw_data.keys():
            title = schema_raw_data.get('title')
            del schema_raw_data['title']
        try:
            if type(file) is not type('') and hasattr(file,'filename'):
                # According to the zipfile.py ZipFile just needs a file-like object
                zf = ZZipFile(file)
                for name in zf.namelist():
                    zf.setcurrentfile(name)
                    content = zf.read()
                    if self.isValidImage(content):
                        id = name[max(rfind(name,'/'), rfind(name,'\\'), rfind(name,':'))+1:]
                        if title:
                            photo_title = title
                        else:
                            photo_title = name
                        self._add_photo(id=id, title=photo_title, file=content,
                            lang=self.gl_get_selected_language(), **schema_raw_data)
            else:
                err = 'Invalid zip file.'
        except Exception, error:
            err = str(error)
def extract_filename(path_filename):
    """extract filename right from slash"""
    if ac.app_windows == "no":
        filename = path_filename[string.rfind(path_filename, "/") + 1:]
    else:
        filename = path_filename[string.rfind(path_filename, "\\") + 1:]
    return filename
Esempio n. 25
0
def print_error(input, err, scanner):
    """This is a really dumb long function to print error messages nicely."""
    p = err.pos
    # Figure out the line number
    line = count(input[:p], '\n')
    print err.msg+" on line "+repr(line+1)+":"
    # Now try printing part of the line
    text = input[max(p-80, 0):p+80]
    p = p - max(p-80, 0)

    # Strip to the left
    i = rfind(text[:p], '\n')
    j = rfind(text[:p], '\r')
    if i < 0 or (0 <= j < i): i = j
    if 0 <= i < p:
	p = p - i - 1
	text = text[i+1:]

    # Strip to the right
    i = find(text,'\n', p)
    j = find(text,'\r', p)
    if i < 0 or (0 <= j < i): i = j
    if i >= 0:
	text = text[:i]

    # Now shorten the text
    while len(text) > 70 and p > 60:
	# Cut off 10 chars
	text = "..." + text[10:]
	p = p - 7

    # Now print the string, along with an indicator
    print '> ',text
    print '> ',' '*p + '^'
    print 'List of nearby tokens:', scanner
Esempio n. 26
0
def fetch(file):
   global download_url
   print 'file: '+file

   if base_url in file:
      dir = file[len(base_url) - 1:rfind(file, '/') + 1]
      file = file[rfind(file, '/') + 1:]
      download_url = base_url
   elif 'http' in file:
      dir = file[len(base_url2) - 1:rfind(file, '/') + 1]
      file = file[rfind(file, '/') + 1:]
      download_url = base_url2
   else:
      dir = '/'
      download_url = base_url

   print 'dir: '+dir
   print 'file: '+file
   print 'download_url: '+download_url

   process(dir + file)
   base_dir = path.dirname(dir + file)
   if base_dir != '/':
      base_dir += '/'
   tree = ElementTree.parse(out_dir + dir + file)
   for element in tree.getiterator():
      if element.tag.split('}')[1] == 'url':
         if element.text[-4:] != '.xml':
            if not 'http' in element.text:
               process(base_dir + element.text)
         else:
            fetch(element.text)
Esempio n. 27
0
def rfind_count(s, sub, count = 1, start = None, end = None):
    """Generalizes string.rfind to find the count-th non-overlapping occurrence 
    of sub from the end of s[start:end]

    **INPUTS**

    *STR* s -- string to search

    *STR* sub -- look for sub

    *INT* count -- ignore the first count-1 occurences

    *INT* start, end -- look in s[start:end]

    **OUTPUTS**

    *INT* -- index of count-th occurence, or -1 if there are fewer
    than count occureneces

    """
    first, last = start, end
    if first == None:
        first = 0
    if last == None:
        last = len(s)
    for i in range(count - 1):
        found = string.rfind(s, sub, first, last)
        if found == -1:
            return found
        last = found
    return string.rfind(s, sub, first, last)
Esempio n. 28
0
def process(filename, size):
   if filename[rfind(filename, '/') + 1:] == 'update.ver':
      return

   dir = filename[:find(filename, '/')]
   if dir[rfind(dir, '-') + 1:] != 'sta':
      return

   try:
      handle = urlopen(base_url + filename)
      headers = handle.info()
      content_length = int(headers.getheader('Content-Length'))
      last_modified = mktime(strptime(headers.getheader('Last-Modified'), '%a, %d %b %Y %H:%M:%S %Z'))
   except IOError:
      print 'Retrying:    ' + filename
      process(filename, size)
      return

   if size != content_length:
      print 'Retrying:    ' + filename
      process(filename, size)
      return

   file = out_dir + filename
   if path.isfile(file):
      file_stat = stat(file)
      if (size == -1 or file_stat.st_size == size) and file_stat.st_mtime == last_modified:
         return

   dir = out_dir + filename[:rfind(filename, '/')]
   if not path.isdir(dir): 
      makedirs(dir)

   download(filename, last_modified)
   process(filename, size)
Esempio n. 29
0
def process(filename, size=-1):
   file = out_dir + filename
   if path.isfile(file) and stat(file).st_size == size:
      print 'Skipping: ' + filename
      return

   print 'Processing: ' + filename
   handle = urlopen(base_url + filename)
   headers = handle.info()
   content_length = int(headers.getheader('Content-Length'))
   last_modified = mktime(strptime(headers.getheader('Last-Modified'), '%a, %d %b %Y %H:%M:%S %Z'))

   if rfind(filename, '/') > 0:
      dir = out_dir + filename[:rfind(filename, '/')]
   else:
      dir = out_dir

   if not path.isdir(dir):
      print 'Creating ' + dir
      makedirs(dir)

   if not path.isfile(file):
      download(filename, last_modified)
   else:
      file_stat = stat(file)
      if file_stat.st_mtime != last_modified or file_stat.st_size != content_length:
         download(filename, last_modified)
      else:
         print 'Skipping: ' + filename
Esempio n. 30
0
	def getFileUrl(self):
		satellitePos = find(self.html, ">%s</TD>"%SatelliteId)
		if satellitePos == -1:
			return ""
		headerEndPos = rfind(self.html, "</TH>", 0, satellitePos)
		firstPos = rfind(self.html, "A HREF=\"", 0, headerEndPos) + len("A HREF=\"")
		lastPos = find(self.html, "\"", firstPos)
		return self.html[firstPos:lastPos]
Esempio n. 31
0
import sys
from string import rfind

if __name__ == '__main__':
    if len(sys.argv) > 1:
        s = sys.argv[1]
        s = s.strip(' ')
        if len(s) == 0:
            print 'no word in string'
        elif rfind(s, ' ') == -1:
            print 'length of last word: ', len(s)
        else:
            print 'length of last word: ', len(s) - rfind(s, ' ') - 1
    else:
        print 'input a word'
        print 'run the program as:'
        print 'python last_word_length.py " xyz abcdefg "'
Esempio n. 32
0
def do_plot(plotfile1, plotfile2, plotfile3, component, outFile, 
            log, minval, maxval, ncontours, eps, dpi, 
            xmin, xmax, ymin, ymax,
            label1, label2, label3):


    #--------------------------------------------------------------------------
    # construct the output file name
    #--------------------------------------------------------------------------
    if (outFile == ""):
        outFile = "compare_" + component

        if (not eps):
            outFile += ".png"

        else:
            outFile += ".eps"

    else:
        # make sure the proper extension is used
        if (not eps):
            if (not string.rfind(outFile, ".png") > 0):
                outFile = outFile + ".png"

        else:
            if (not string.rfind(outFile, ".eps") > 0):
                outFile = outFile + ".eps"


    #--------------------------------------------------------------------------
    # read in the data from plotfile1
    #--------------------------------------------------------------------------
    (nx, ny, nz) = fsnapshot.fplotfile_get_size(plotfile1)

    time = fsnapshot.fplotfile_get_time(plotfile1)

    (xmin1, xmax1, ymin1, ymax1, zmin1, zmax1) = \
        fsnapshot.fplotfile_get_limits(plotfile1)

    x1 = xmin1 + numpy.arange( (nx), dtype=numpy.float64 )*(xmax1 - xmin1)/nx
    y1 = ymin1 + numpy.arange( (ny), dtype=numpy.float64 )*(ymax1 - ymin1)/ny


    if (not nz == -1):
        print "ERROR: 2-d support only"
        sys.exit(2)


    # read in the main component
    data1 = numpy.zeros( (nx, ny), dtype=numpy.float64)

    (data1, err) = fsnapshot.fplotfile_get_data_2d(plotfile1, component, data1)
    if (not err == 0):
        sys.exit(2)

    data1 = numpy.transpose(data1)


    if log:
        data1 = numpy.log10(data1)


    extent1 = [xmin1, xmax1, ymin1, ymax1]


    #--------------------------------------------------------------------------
    # read in the data from plotfile2
    #--------------------------------------------------------------------------
    (nx, ny, nz) = fsnapshot.fplotfile_get_size(plotfile2)

    time = fsnapshot.fplotfile_get_time(plotfile2)

    (xmin2, xmax2, ymin2, ymax2, zmin2, zmax2) = \
        fsnapshot.fplotfile_get_limits(plotfile2)

    x2 = xmin2 + numpy.arange( (nx), dtype=numpy.float64 )*(xmax2 - xmin2)/nx
    y2 = ymin2 + numpy.arange( (ny), dtype=numpy.float64 )*(ymax2 - ymin2)/ny


    if (not nz == -1):
        print "ERROR: 2-d support only"
        sys.exit(2)


    # read in the main component
    data2 = numpy.zeros( (nx, ny), dtype=numpy.float64)

    (data2, err) = fsnapshot.fplotfile_get_data_2d(plotfile2, component, data2)
    if (not err == 0):
        sys.exit(2)

    data2 = numpy.transpose(data2)


    if log:
        data2 = numpy.log10(data2)


    extent2 = [xmin2, xmax2, ymin2, ymax2]


    #--------------------------------------------------------------------------
    # read in the data from plotfile3 -- if present
    #--------------------------------------------------------------------------
    if (not plotfile3 == ""):
        (nx, ny, nz) = fsnapshot.fplotfile_get_size(plotfile3)

        time = fsnapshot.fplotfile_get_time(plotfile3)

        (xmin3, xmax3, ymin3, ymax3, zmin3, zmax3) = \
            fsnapshot.fplotfile_get_limits(plotfile3)

        x3 = xmin3 + numpy.arange( (nx), dtype=numpy.float64 )*(xmax3 - xmin3)/nx
        y3 = ymin3 + numpy.arange( (ny), dtype=numpy.float64 )*(ymax3 - ymin3)/ny


        if (not nz == -1):
            print "ERROR: 2-d support only"
            sys.exit(2)


        # read in the main component
        data3 = numpy.zeros( (nx, ny), dtype=numpy.float64)

        (data3, err) = fsnapshot.fplotfile_get_data_2d(plotfile3, component, data3)
        if (not err == 0):
            sys.exit(2)

        data3 = numpy.transpose(data3)


        if log:
            data3 = numpy.log10(data3)


        extent3 = [xmin3, xmax3, ymin3, ymax3]



    #--------------------------------------------------------------------------
    # find data limits, etc.
    #--------------------------------------------------------------------------
    if (not extent1 == extent2):
        print "ERROR: extent of domains do not agree"
        sys.exit(2)

    if (not plotfile3 == ""):
        if (not extent1 == extent3):
            print "ERROR: extent of domains do not agree"
            sys.exit(2)


    extent = extent1

    if (not xmin == None):
        extent[0] = xmin

    if (not xmax == None):
        extent[1] = xmax

    if (not ymin == None):
        extent[2] = ymin

    if (not ymax == None):
        extent[3] = ymax


    if (minval == None):
        minval = min(numpy.min(data1), numpy.min(data2))
        if (not plotfile3 == ""):
            minval = min(minval, numpy.min(data3))

    if (maxval == None):
        maxval = max(numpy.max(data1), numpy.max(data2))
        if (not plotfile3 == ""):
            maxval = max(maxval, numpy.max(data3))


    levels = numpy.linspace(minval, maxval, ncontours, endpoint=True)


    #--------------------------------------------------------------------------
    # make the figure
    #--------------------------------------------------------------------------
    cs1 = pylab.contour(x1, y1, data1, ncontours, colors='k', levels=levels)
    cs2 = pylab.contour(x2, y2, data2, ncontours, colors='r', levels=levels)
    if (not plotfile3 == ""):
        cs3 = pylab.contour(x3, y3, data3, ncontours, colors='g', levels=levels)


    # make the labels -- see http://www.scipy.org/Cookbook/Matplotlib/Legend
    # for this technique
    lines = []
    labels = []

    if (not label1 == None):
        line1 = matplotlib.lines.Line2D(range(10), range(10), linestyle='-', color='k')
        lines.append(line1)
        labels.append(label1)

    if (not label2 == None):
        line2 = matplotlib.lines.Line2D(range(10), range(10), linestyle='-', color='r')
        lines.append(line2)
        labels.append(label2)


    if (not label3 == None):
        line3 = matplotlib.lines.Line2D(range(10), range(10), linestyle='-', color='g')
        lines.append(line3)
        labels.append(label3)

        
    pylab.legend(lines, labels, fontsize="small", frameon=False)


    formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)
    #pylab.clabel(cs, fontsize=9, inline=1)#, fmt=formatter)

    pylab.axis(extent)

    ax = pylab.gca()
    ax.set_aspect("equal")

    fig1 = ax.get_figure()

    ax.xaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))
    ax.yaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))

    pylab.xlabel("x")
    pylab.ylabel("y")


    if (not eps):
        pylab.savefig(outFile, bbox_inches='tight', dpi=dpi, pad_inches=0.5)
    else:
        pylab.savefig(outFile, bbox_inches='tight', pad_inches=0.5)
Esempio n. 33
0
def addRelations(rootElement, allRelationChildren, mamIdToAttributesMap):
    removeAtts = ['family', 'class']
    iter = allRelationChildren.iterator()
    while iter.hasNext():
        relElement = Element('relation')
        linkElement = iter.next()
        linkType = linkElement.getAttribute(
            'targetRelationshipClass').getValue()
        targetParent = linkElement.getAttribute('targetParent').getValue()
        splitIdx_targetParent = string.rfind(targetParent, '.')
        targetParentFamilyName = targetParent[0:splitIdx_targetParent]
        targetParentClassName = targetParent[splitIdx_targetParent +
                                             1:len(targetParent)]

        targetChild = linkElement.getAttribute('targetChild').getValue()
        splitIdx_targetChild = string.rfind(targetChild, '.')
        targetChildFamilyName = targetChild[0:splitIdx_targetChild]
        targetChildClassName = targetChild[splitIdx_targetChild +
                                           1:len(targetChild)]

        id1base = ''
        id2base = ''
        if linkType != None and targetParent != None and targetChild != None:
            fieldChildren = linkElement.getChildren('field')
            if fieldChildren is not None:
                iter2 = fieldChildren.iterator()
                while iter2.hasNext():
                    fieldElement = iter2.next()
                    fieldName = fieldElement.getAttributeValue('name')
                    if fieldName == 'DiscoveryID1':
                        id1base = fieldElement.getText()
                    if fieldName == 'DiscoveryID2':
                        id2base = fieldElement.getText()

                # set type
                typeElement = Element('type')
                typeElement.setText(linkType)
                relElement.addContent(typeElement)

                # set provider
                providerElement = Element('provider')

                if mamIdToAttributesMap.containsKey(id1base):
                    otherProviderAttributes = mamIdToAttributesMap.get(id1base)
                    iter3 = otherProviderAttributes.entrySet().iterator()
                    while iter3.hasNext():
                        entry = iter3.next()
                        name = entry.getKey()
                        if name in removeAtts:
                            continue
                        value = entry.getValue()
                        otherElement = Element(name)
                        otherElement.setText(value)
                        providerElement.addContent(otherElement)

                relElement.addContent(providerElement)

                # set dependent
                dependentElement = Element('dependent')

                if mamIdToAttributesMap.containsKey(id2base):
                    otherdependentAttributes = mamIdToAttributesMap.get(
                        id2base)
                    iter3 = otherdependentAttributes.entrySet().iterator()
                    while iter3.hasNext():
                        entry = iter3.next()
                        name = entry.getKey()
                        if name in removeAtts:
                            continue
                        value = entry.getValue()
                        otherElement = Element(name)
                        otherElement.setText(value)
                        dependentElement.addContent(otherElement)

                relElement.addContent(dependentElement)
        rootElement.addContent(relElement)

    return rootElement
Esempio n. 34
0
            if debuglevel >= DEBUG_VERBOSE:
                print "Adding rpath " + string.join(rpath_val, ":") + " for " + obj
            lib_rpath.extend(rpath_val)
        else:
            print "warning: " + obj + " may need rpath, but --root not specified"

lib_path.extend(lib_rpath)

passnr = 1
previous_pass_unresolved = Set()
while 1:
    debug(DEBUG_NORMAL, "I: library reduction pass", `passnr`)
    if debuglevel >= DEBUG_VERBOSE:
        print "Objects:",
        for obj in objects.values():
            print obj[string.rfind(obj, '/') + 1:],
        print

    passnr = passnr + 1
    # Gather all already reduced libraries and treat them as objects as well
    small_libs = []
    for lib in regexpfilter(os.listdir(dest_path), "(.*-so-stripped)$").elems():
        obj = dest_path + "/" + lib
        small_libs.append(obj)
        inode = os.stat(obj)[ST_INO]
        if objects.has_key(inode):
            debug(DEBUG_SPAM, obj, "is hardlink to", objects[inode])
        else:
            objects[inode] = obj

    # DEBUG
Esempio n. 35
0
def AddHeader(platform, project, header_name, component, add_options):

    specfile_path = os.path.join(project.src_root_path, component[2])
    verfile_path = os.path.join(project.src_root_path, component[1])

    if platform.type == 'unix':
        verfile_path = string.translate(verfile_path,
                                        unix_path_translation_table)
        specfile_path = string.translate(specfile_path,
                                         unix_path_translation_table)

    if (platform.type == 'unix'):
        index = string.rfind(specfile_path, '/')
    else:
        index = string.rfind(specfile_path, '\\')
    specfile_name = specfile_path[index + 1:]

    if sysinfo.host_type == 'mac':
        verfile_path = string.translate(verfile_path,
                                        mac_path_translation_table)
        specfile_path = string.translate(specfile_path,
                                         mac_path_translation_table)

    out_file = open(header_name + '.hdr', add_options)
    in_file = open(verfile_path, 'r')

    try:
        lines = in_file.readlines()
    except:
        raise VersionError, 'Error opening ' + verfile_path

    for line in lines:
        if string.find(line, '#define TARVER_STRING_VERSION') == 0:
            index = string.find(line, '"')
            line = line[index + 1:]
            index = string.find(line, '"')
            line = line[:index]
            out_file.write('COMPONENT ' + component[0] + '|' + line + '|' +
                           specfile_name + '\n')

    out_file.close()
    in_file.close()

    if project.target_name != project.module_dir:
        specfile_name = specfile_name + '_' + project.target_name

    out_file = open(specfile_name, 'w')
    in_file = open(specfile_path, 'r')

    try:
        lines = in_file.readlines()
    except:
        raise VersionError, 'Error opening ' + specfile_path

    for line in lines:

        ## Exclude other platform's commands
        include_line = 1
        if (string.find(line, '#') == 0):
            include_line = 0

            ## determine whether this line should be included for the current platform
            line = line[1:]
            index = string.find(line, '#')
            platforms_string = line[0:index]
            platforms_string = string.split(platforms_string)
            for platform_name in platforms_string:
                if platform_name == platform.name or platform_name == platform.type:
                    include_line = 1
                    break

            ## strip the platform info from line
            line = line[index + 1:]
            line = string.lstrip(line)

        if (include_line == 1):
            output_line = ''

            ## Insert platform specific dll names
            index = string.find(line, 'dll_name(')
            while index != -1:
                output_line = output_line + line[:index]
                line = line[index + 9:]
                index = string.find(line, ')')
                platform.versioning.version = ''
                output_line = output_line + platform.versioning.create_dll_name(
                    line[:index], line[:index])
                platform.versioning.version = ''
                line = line[index + 1:]
                index = string.find(line, 'dll_name(')

            output_line = output_line + line

            out_file.write(output_line)

    out_file.close()
    in_file.close()
Esempio n. 36
0
    def prepareContents(self, registry, register_subdirs=0):
        # Creates objects for each file.
        fp = expandpath(self.filepath)
        data = {}
        objects = []
        types = self._readTypesFile()
        for entry in _filtered_listdir(fp):
            if not self._isAllowableFilename(entry):
                continue
            e_filepath = path.join(self.filepath, entry)
            e_fp = expandpath(e_filepath)
            if path.isdir(e_fp):
                # Add a subdirectory only if it was previously registered,
                # unless register_subdirs is set.
                info = registry.getDirectoryInfo(e_filepath)
                if info is None and register_subdirs:
                    # Register unknown subdirs
                    registry.registerDirectoryByPath(e_fp)
                    info = registry.getDirectoryInfo(e_filepath)
                if info is not None:
                    mt = types.get(entry)
                    t = None
                    if mt is not None:
                        t = registry.getTypeByMetaType(mt)
                    if t is None:
                        t = DirectoryView
                    ob = t(entry, e_filepath)
                    ob_id = ob.getId()
                    data[ob_id] = ob
                    objects.append({'id': ob_id, 'meta_type': ob.meta_type})
            else:
                pos = rfind(entry, '.')
                if pos >= 0:
                    name = entry[:pos]
                    ext = path.normcase(entry[pos + 1:])
                else:
                    name = entry
                    ext = ''
                if not name or name == 'REQUEST':
                    # Not an allowable id.
                    continue
                mo = bad_id(name)
                if mo is not None and mo != -1:  # Both re and regex formats
                    # Not an allowable id.
                    continue
                t = None
                mt = types.get(entry, None)
                if mt is None:
                    mt = types.get(name, None)
                if mt is not None:
                    t = registry.getTypeByMetaType(mt)
                if t is None:
                    t = registry.getTypeByExtension(ext)

                if t is not None:
                    metadata = FSMetadata(e_fp)
                    metadata.read()
                    try:
                        ob = t(name,
                               e_filepath,
                               fullname=entry,
                               properties=metadata.getProperties())
                    except:
                        import traceback
                        typ, val, tb = exc_info()
                        try:
                            exc_lines = traceback.format_exception(
                                typ, val, tb)
                            LOG('DirectoryView', ERROR, join(exc_lines, '\n'))

                            ob = BadFile(name,
                                         e_filepath,
                                         exc_str=join(exc_lines, '\r\n'),
                                         fullname=entry)
                        finally:
                            tb = None  # Avoid leaking frame!

                    # FS-based security
                    try:
                        permissions = metadata.getSecurity()
                        if permissions is not None:
                            for name in permissions.keys():
                                acquire, roles = permissions[name]
                                ob.manage_permission(name, roles, acquire)
                    except:
                        LOG('DirectoryView',
                            ERROR,
                            'Error setting permissions',
                            error=exc_info())

                    # only DTML Methods can have proxy roles
                    if hasattr(ob, '_proxy_roles'):
                        try:
                            ob._proxy_roles = tuple(metadata.getProxyRoles())
                        except:
                            LOG('DirectoryView',
                                ERROR,
                                'Error setting proxy role',
                                error=exc_info())

                    ob_id = ob.getId()
                    data[ob_id] = ob
                    objects.append({'id': ob_id, 'meta_type': ob.meta_type})

        return data, tuple(objects)
Esempio n. 37
0
def test_suite(argv):
    """
    the main test suite driver
    """

    # parse the commandline arguments
    args = test_util.get_args(arg_string=argv)

    # read in the test information
    suite, test_list = params.load_params(args)

    active_test_list = [t.name for t in test_list]

    test_list = suite.get_tests_to_run(test_list)

    suite.log.skip()
    suite.log.bold("running tests: ")
    suite.log.indent()
    for obj in test_list:
        suite.log.log(obj.name)
    suite.log.outdent()

    if not args.complete_report_from_crash == "":

        # make sure the web directory from the crash run exists
        suite.full_web_dir = "{}/{}/".format(suite.webTopDir,
                                             args.complete_report_from_crash)
        if not os.path.isdir(suite.full_web_dir):
            suite.log.fail("Crash directory does not exist")

        suite.test_dir = args.complete_report_from_crash

        # find all the tests that completed in that web directory
        tests = []
        test_file = ""
        was_benchmark_run = 0
        for sfile in os.listdir(suite.full_web_dir):
            if os.path.isfile(sfile) and sfile.endswith(".status"):
                index = string.rfind(sfile, ".status")
                tests.append(sfile[:index])

                with open(suite.full_web_dir + sfile, "r") as f:
                    for line in f:
                        if line.find("benchmarks updated") > 0:
                            was_benchmark_run = 1

            if os.path.isfile(sfile) and sfile.endswith(".ini"):
                test_file = sfile

        # create the report for this test run
        num_failed = report.report_this_test_run(
            suite, was_benchmark_run, "recreated report after crash of suite",
            "", tests, test_file)

        # create the suite report
        suite.log.bold("creating suite report...")
        report.report_all_runs(suite, active_test_list)
        suite.log.close_log()
        sys.exit("done")

    #--------------------------------------------------------------------------
    # check bench dir and create output directories
    #--------------------------------------------------------------------------
    all_compile = all([t.compileTest == 1 for t in test_list])

    if not all_compile:
        bench_dir = suite.get_bench_dir()

    if not args.copy_benchmarks is None:
        last_run = suite.get_last_run()

    suite.make_test_dirs()

    if suite.slack_post:
        msg = "> {} ({}) test suite started, id: {}\n> {}".format(
            suite.suiteName, suite.sub_title, suite.test_dir, args.note)
        suite.slack_post_it(msg)

    if not args.copy_benchmarks is None:
        old_full_test_dir = suite.testTopDir + suite.suiteName + "-tests/" + last_run
        copy_benchmarks(old_full_test_dir, suite.full_web_dir, test_list,
                        bench_dir, suite.log)

        # here, args.copy_benchmarks plays the role of make_benchmarks
        num_failed = report.report_this_test_run(
            suite, args.copy_benchmarks,
            "copy_benchmarks used -- no new tests run", "", test_list,
            args.input_file[0])
        report.report_all_runs(suite, active_test_list)

        if suite.slack_post:
            msg = "> copied benchmarks\n> {}".format(args.copy_benchmarks)
            suite.slack_post_it(msg)

        sys.exit("done")

    #--------------------------------------------------------------------------
    # figure out what needs updating and do the git updates, save the
    # current hash / HEAD, and make a ChangeLog
    # --------------------------------------------------------------------------
    now = time.localtime(time.time())
    update_time = time.strftime("%Y-%m-%d %H:%M:%S %Z", now)

    no_update = args.no_update.lower()
    if not args.copy_benchmarks is None:
        no_update = "all"

    # the default is to update everything, unless we specified a hash
    # when constructing the Repo object
    if no_update == "none":
        pass

    elif no_update == "all":
        for k in suite.repos:
            suite.repos[k].update = False

    else:
        nouplist = [k.strip() for k in no_update.split(",")]

        for repo in suite.repos.keys():
            if repo.lower() in nouplist:
                suite.repos[repo].update = False

    os.chdir(suite.testTopDir)

    for k in suite.repos:
        suite.log.skip()
        suite.log.bold("repo: {}".format(suite.repos[k].name))
        suite.log.indent()

        if suite.repos[k].update or suite.repos[k].hash_wanted:
            suite.repos[k].git_update()

        suite.repos[k].save_head()

        if suite.repos[k].update:
            suite.repos[k].make_changelog()

        suite.log.outdent()

    # keep track if we are running on any branch that is not the suite
    # default
    branches = [suite.repos[r].branch_wanted for r in suite.repos]
    if not all(suite.default_branch == b for b in branches):
        suite.log.warn("some git repos are not on the default branch")
        bf = open("{}/branch.status".format(suite.full_web_dir), "w")
        bf.write("branch different than suite default")
        bf.close()

    #--------------------------------------------------------------------------
    # build the tools and do a make clean, only once per build directory
    #--------------------------------------------------------------------------
    suite.build_tools(test_list)

    all_build_dirs = find_build_dirs(test_list)

    suite.log.skip()
    suite.log.bold("make clean in...")

    for d, source_tree in all_build_dirs:

        if not source_tree == "":
            suite.log.log("{} in {}".format(d, source_tree))
            os.chdir(suite.repos[source_tree].dir + d)
            suite.make_realclean(repo=source_tree)
        else:
            suite.log.log("{}".format(d))
            os.chdir(suite.source_dir + d)
            if suite.sourceTree in ["AMReX", "amrex"]:
                suite.make_realclean(repo="AMReX")
            else:
                suite.make_realclean()

    os.chdir(suite.testTopDir)

    #--------------------------------------------------------------------------
    # Setup Cmake if needed
    #--------------------------------------------------------------------------
    if (suite.useCmake):
        cmake_setup(suite)

    #--------------------------------------------------------------------------
    # main loop over tests
    #--------------------------------------------------------------------------
    for test in test_list:

        suite.log.outdent()  # just to make sure we have no indentation
        suite.log.skip()
        suite.log.bold("working on test: {}".format(test.name))
        suite.log.indent()

        if not args.make_benchmarks is None and (test.restartTest
                                                 or test.compileTest
                                                 or test.selfTest):
            suite.log.warn("benchmarks not needed for test {}".format(
                test.name))
            continue

        output_dir = suite.full_test_dir + test.name + '/'
        os.mkdir(output_dir)
        test.output_dir = output_dir

        #----------------------------------------------------------------------
        # compile the code
        #----------------------------------------------------------------------
        if not test.extra_build_dir == "":
            bdir = suite.repos[test.extra_build_dir].dir + test.buildDir
        else:
            bdir = suite.source_dir + test.buildDir

        # # For cmake builds, there is only one build dir
        # if ( suite.useCmake ): bdir = suite.source_build_dir

        os.chdir(bdir)

        if test.reClean == 1:
            # for one reason or another, multiple tests use different
            # build options, make clean again to be safe
            suite.log.log("re-making clean...")
            if not test.extra_build_dir == "":
                suite.make_realclean(repo=test.extra_build_dir)
            else:
                suite.make_realclean()

        suite.log.log("building...")

        coutfile = "{}/{}.make.out".format(output_dir, test.name)

        if suite.sourceTree == "C_Src" or test.testSrcTree == "C_Src":
            if (suite.useCmake):
                comp_string, rc = suite.build_test_cmake(test=test,
                                                         outfile=coutfile)
            else:
                comp_string, rc = suite.build_c(test=test, outfile=coutfile)

            executable = test_util.get_recent_filename(bdir, "", ".ex")

        elif suite.sourceTree == "F_Src" or test.testSrcTree == "F_Src":
            comp_string, rc = suite.build_f(test=test, outfile=coutfile)
            executable = test_util.get_recent_filename(bdir, "main", ".exe")

        test.comp_string = comp_string

        # make return code is 0 if build was successful
        if rc == 0: test.compile_successful = True

        # copy the make.out into the web directory
        shutil.copy("{}/{}.make.out".format(output_dir, test.name),
                    suite.full_web_dir)

        if not test.compile_successful:
            error_msg = "ERROR: compilation failed"
            report.report_single_test(suite,
                                      test,
                                      test_list,
                                      failure_msg=error_msg)
            continue

        if test.compileTest:
            suite.log.log("creating problem test report ...")
            report.report_single_test(suite, test, test_list)
            continue

        #----------------------------------------------------------------------
        # copy the necessary files over to the run directory
        #----------------------------------------------------------------------
        suite.log.log("copying files to run directory...")

        needed_files = []
        if executable is not None:
            needed_files.append((executable, "move"))

        needed_files.append((test.inputFile, "copy"))
        # strip out any sub-directory from the build dir
        test.inputFile = os.path.basename(test.inputFile)

        if test.probinFile != "":
            needed_files.append((test.probinFile, "copy"))
            # strip out any sub-directory from the build dir
            test.probinFile = os.path.basename(test.probinFile)

        for auxf in test.auxFiles:
            needed_files.append((auxf, "copy"))

        # if any copy/move fail, we move onto the next test
        skip_to_next_test = 0
        for nfile, action in needed_files:
            if action == "copy":
                act = shutil.copy
            elif action == "move":
                act = shutil.move
            else:
                suite.log.fail("invalid action")

            try:
                act(nfile, output_dir)
            except IOError:
                error_msg = "ERROR: unable to {} file {}".format(action, nfile)
                report.report_single_test(suite,
                                          test,
                                          test_list,
                                          failure_msg=error_msg)
                skip_to_next_test = 1
                break

        if skip_to_next_test: continue

        skip_to_next_test = 0
        for lfile in test.linkFiles:
            if not os.path.exists(lfile):
                error_msg = "ERROR: link file {} does not exist".format(lfile)
                report.report_single_test(suite,
                                          test,
                                          test_list,
                                          failure_msg=error_msg)
                skip_to_next_test = 1
                break

            else:
                link_source = os.path.abspath(lfile)
                link_name = os.path.join(output_dir, os.path.basename(lfile))
                try:
                    os.symlink(link_source, link_name)
                except IOError:
                    error_msg = "ERROR: unable to symlink link file: {}".format(
                        lfile)
                    report.report_single_test(suite,
                                              test,
                                              test_list,
                                              failure_msg=error_msg)
                    skip_to_next_test = 1
                    break

        if skip_to_next_test: continue

        #----------------------------------------------------------------------
        # run the test
        #----------------------------------------------------------------------
        suite.log.log("running the test...")

        os.chdir(output_dir)

        test.wall_time = time.time()

        if suite.sourceTree == "C_Src" or test.testSrcTree == "C_Src":

            base_cmd = "./{} {} amr.plot_file={}_plt amr.check_file={}_chk".format(
                executable, test.inputFile, test.name, test.name)

            # keep around the checkpoint files only for the restart runs
            if test.restartTest:
                base_cmd += " amr.checkpoint_files_output=1 amr.check_int=%d" % \
                                (test.restartFileNum)
            else:
                base_cmd += " amr.checkpoint_files_output=0"

            base_cmd += " {} {}".format(suite.globalAddToExecString,
                                        test.runtime_params)

        elif suite.sourceTree == "F_Src" or test.testSrcTree == "F_Src":

            base_cmd = "./{} {} --plot_base_name {}_plt --check_base_name {}_chk ".format(
                executable, test.inputFile, test.name, test.name)

            # keep around the checkpoint files only for the restart runs
            if not test.restartTest: base_cmd += " --chk_int 0 "

            base_cmd += "{} {}".format(suite.globalAddToExecString,
                                       test.runtime_params)

        if args.with_valgrind:
            base_cmd = "valgrind " + args.valgrind_options + " " + base_cmd

        if test.customRunCmd is not None:
            base_cmd = test.customRunCmd

        suite.run_test(test, base_cmd)

        # if it is a restart test, then rename the final output file and
        # restart the test
        if test.restartTest:
            skip_restart = False

            last_file = test.get_last_plotfile(output_dir=output_dir)

            if last_file == "":
                error_msg = "ERROR: test did not produce output.  Restart test not possible"
                skip_restart = True

            if len(test.find_backtrace()) > 0:
                error_msg = "ERROR: test produced backtraces.  Restart test not possible"
                skip_restart = True

            if skip_restart:
                # copy what we can
                test.wall_time = time.time() - test.wall_time
                shutil.copy("{}.run.out".format(test.name), suite.full_web_dir)
                if os.path.isfile("{}.err.out".format(test.name)):
                    shutil.copy("{}.err.out".format(test.name),
                                suite.full_web_dir)
                    test.has_stderr = True
                suite.copy_backtrace(test)
                report.report_single_test(suite,
                                          test,
                                          test_list,
                                          failure_msg=error_msg)
                continue
            orig_last_file = "orig_{}".format(last_file)
            shutil.move(last_file, orig_last_file)

            if test.diffDir:
                orig_diff_dir = "orig_{}".format(test.diffDir)
                shutil.move(test.diffDir, orig_diff_dir)

            # get the file number to restart from
            restart_file = "%s_chk%5.5d" % (test.name, test.restartFileNum)

            suite.log.log("restarting from {} ... ".format(restart_file))

            if suite.sourceTree == "C_Src" or test.testSrcTree == "C_Src":

                base_cmd = "./{} {} amr.plot_file={}_plt amr.check_file={}_chk amr.checkpoint_files_output=0 amr.restart={}".format(
                    executable, test.inputFile, test.name, test.name,
                    restart_file)

            elif suite.sourceTree == "F_Src" or test.testSrcTree == "F_Src":

                base_cmd = "./{} {} --plot_base_name {}_plt --check_base_name {}_chk --chk_int 0 --restart {} {}".format(
                    executable, test.inputFile, test.name, test.name,
                    test.restartFileNum, suite.globalAddToExecString)

            suite.run_test(test, base_cmd)

        test.wall_time = time.time() - test.wall_time

        #----------------------------------------------------------------------
        # do the comparison
        #----------------------------------------------------------------------
        if not test.selfTest:

            if test.outputFile == "":
                if test.compareFile == "":
                    compare_file = test.get_last_plotfile(
                        output_dir=output_dir)
                else:
                    # we specified the name of the file we want to
                    # compare to -- make sure it exists
                    compare_file = test.compareFile
                    if not os.path.isdir(compare_file):
                        compare_file = ""

                output_file = compare_file
            else:
                output_file = test.outputFile
                compare_file = test.name + '_' + output_file

            # get the number of levels for reporting
            prog = "{} -l {}".format(suite.tools["fboxinfo"], output_file)
            stdout0, stderr0, rc = test_util.run(prog)
            test.nlevels = stdout0.rstrip('\n')
            if not type(params.convert_type(test.nlevels)) is int:
                test.nlevels = ""

            if args.make_benchmarks is None:

                suite.log.log("doing the comparison...")
                suite.log.indent()
                suite.log.log("comparison file: {}".format(output_file))

                test.compare_file_used = output_file

                if not test.restartTest:
                    bench_file = bench_dir + compare_file
                else:
                    bench_file = orig_last_file

                # see if it exists
                # note, with AMReX, the plotfiles are actually directories

                if not os.path.isdir(bench_file):
                    suite.log.warn("no corresponding benchmark found")
                    bench_file = ""

                    with open("{}.compare.out".format(test.name), 'w') as cf:
                        cf.write("WARNING: no corresponding benchmark found\n")
                        cf.write("         unable to do a comparison\n")

                else:
                    if not compare_file == "":

                        suite.log.log("benchmark file: {}".format(bench_file))

                        command = "{} -n 0 {} {}".format(
                            suite.tools["fcompare"], bench_file, output_file)

                        sout, serr, ierr = test_util.run(
                            command,
                            outfile="{}.compare.out".format(test.name),
                            store_command=True)

                        if ierr == 0:
                            test.compare_successful = True

                        if test.compareParticles:
                            for ptype in test.particleTypes.strip().split():
                                command = "{} {} {} {}".format(
                                    suite.tools["particle_compare"],
                                    bench_file, output_file, ptype)

                                sout, serr, ierr = test_util.run(
                                    command,
                                    outfile="{}.compare.out".format(test.name),
                                    store_command=True)

                                test.compare_successful = test.compare_successful and not ierr

                    else:
                        suite.log.warn("unable to do a comparison")

                        with open("{}.compare.out".format(test.name),
                                  'w') as cf:
                            cf.write(
                                "WARNING: run did not produce any output\n")
                            cf.write("         unable to do a comparison\n")

                suite.log.outdent()

                if not test.diffDir == "":
                    if not test.restartTest:
                        diff_dir_bench = bench_dir + '/' + test.name + '_' + test.diffDir
                    else:
                        diff_dir_bench = orig_diff_dir

                    suite.log.log("doing the diff...")
                    suite.log.log("diff dir: {}".format(test.diffDir))

                    command = "diff {} -r {} {}".format(
                        test.diffOpts, diff_dir_bench, test.diffDir)

                    outfile = "{}.compare.out".format(test.name)
                    sout, serr, diff_status = test_util.run(command,
                                                            outfile=outfile,
                                                            store_command=True)

                    if diff_status == 0:
                        diff_successful = True
                        with open("{}.compare.out".format(test.name),
                                  'a') as cf:
                            cf.write("\ndiff was SUCCESSFUL\n")
                    else:
                        diff_successful = False

                    test.compare_successful = test.compare_successful and diff_successful

            else:  # make_benchmarks

                suite.log.log(
                    "storing output of {} as the new benchmark...".format(
                        test.name))
                suite.log.indent()
                suite.log.warn("new benchmark file: {}".format(compare_file))
                suite.log.outdent()

                if not compare_file == "":
                    if not output_file == compare_file:
                        source_file = output_file
                    else:
                        source_file = compare_file

                    try:
                        shutil.rmtree("{}/{}".format(bench_dir, compare_file))
                    except:
                        pass
                    shutil.copytree(source_file,
                                    "{}/{}".format(bench_dir, compare_file))

                    with open("{}.status".format(test.name), 'w') as cf:
                        cf.write("benchmarks updated.  New file:  {}\n".format(
                            compare_file))

                else:
                    with open("{}.status".format(test.name), 'w') as cf:
                        cf.write("benchmarks failed")

                    # copy what we can
                    shutil.copy("{}.run.out".format(test.name),
                                suite.full_web_dir)
                    if os.path.isfile("{}.err.out".format(test.name)):
                        shutil.copy("{}.err.out".format(test.name),
                                    suite.full_web_dir)
                        test.has_stderr = True
                    suite.copy_backtrace(test)
                    error_msg = "ERROR: runtime failure during benchmark creation"
                    report.report_single_test(suite,
                                              test,
                                              test_list,
                                              failure_msg=error_msg)

                if not test.diffDir == "":
                    diff_dir_bench = "{}/{}_{}".format(bench_dir, test.name,
                                                       test.diffDir)
                    if os.path.isdir(diff_dir_bench):
                        shutil.rmtree(diff_dir_bench)
                        shutil.copytree(test.diffDir, diff_dir_bench)
                    else:
                        shutil.copy(test.diffDir, diff_dir_bench)
                    suite.log.log("new diffDir: {}_{}".format(
                        test.name, test.diffDir))

        else:  # selfTest

            if args.make_benchmarks is None:

                suite.log.log(
                    "looking for selfTest success string: {} ...".format(
                        test.stSuccessString))

                try:
                    of = open("{}.run.out".format(test.name), 'r')
                except IOError:
                    suite.log.warn("no output file found")
                    out_lines = ['']
                else:
                    out_lines = of.readlines()

                    # successful comparison is indicated by presence
                    # of success string
                    for line in out_lines:
                        if line.find(test.stSuccessString) >= 0:
                            test.compare_successful = True
                            break

                    of.close()

                with open("{}.compare.out".format(test.name), 'w') as cf:
                    if test.compare_successful:
                        cf.write("SELF TEST SUCCESSFUL\n")
                    else:
                        cf.write("SELF TEST FAILED\n")

        #----------------------------------------------------------------------
        # do any requested visualization (2- and 3-d only) and analysis
        #----------------------------------------------------------------------
        if not test.selfTest:
            if output_file != "":
                if args.make_benchmarks is None:

                    # get any parameters for the summary table
                    job_info_file = "{}/job_info".format(output_file)
                    if os.path.isfile(job_info_file):
                        test.has_jobinfo = 1

                    try:
                        jif = open(job_info_file, "r")
                    except:
                        suite.log.warn("unable to open the job_info file")
                    else:
                        job_file_lines = jif.readlines()
                        jif.close()

                        if suite.summary_job_info_field1 is not "":
                            for l in job_file_lines:
                                if l.startswith(suite.summary_job_info_field1.
                                                strip()) and l.find(":") >= 0:
                                    _tmp = l.split(":")[1]
                                    idx = _tmp.rfind("/") + 1
                                    test.job_info_field1 = _tmp[idx:]
                                    break

                        if suite.summary_job_info_field2 is not "":
                            for l in job_file_lines:
                                if l.startswith(suite.summary_job_info_field2.
                                                strip()) and l.find(":") >= 0:
                                    _tmp = l.split(":")[1]
                                    idx = _tmp.rfind("/") + 1
                                    test.job_info_field2 = _tmp[idx:]
                                    break

                        if suite.summary_job_info_field3 is not "":
                            for l in job_file_lines:
                                if l.startswith(suite.summary_job_info_field3.
                                                strip()) and l.find(":") >= 0:
                                    _tmp = l.split(":")[1]
                                    idx = _tmp.rfind("/") + 1
                                    test.job_info_field3 = _tmp[idx:]
                                    break

                    # visualization
                    if test.doVis:

                        if test.dim == 1:
                            suite.log.log(
                                "Visualization not supported for dim = {}".
                                format(test.dim))
                        else:
                            suite.log.log("doing the visualization...")
                            tool = suite.tools["fsnapshot{}d".format(test.dim)]
                            test_util.run(
                                '{} --palette {}/Palette -cname "{}" -p "{}"'.
                                format(tool, suite.f_compare_tool_dir,
                                       test.visVar, output_file))

                            # convert the .ppm files into .png files
                            ppm_file = test_util.get_recent_filename(
                                output_dir, "", ".ppm")
                            if not ppm_file is None:
                                png_file = ppm_file.replace(".ppm", ".png")
                                test_util.run("convert {} {}".format(
                                    ppm_file, png_file))
                                test.png_file = png_file

                    # analysis
                    if not test.analysisRoutine == "":

                        suite.log.log("doing the analysis...")
                        if not test.extra_build_dir == "":
                            tool = "{}/{}".format(
                                suite.repos[test.extra_build_dir].dir,
                                test.analysisRoutine)
                        else:
                            tool = "{}/{}".format(suite.source_dir,
                                                  test.analysisRoutine)

                        shutil.copy(tool, os.getcwd())

                        if test.analysisMainArgs == "":
                            option = ""
                        else:
                            option = eval("suite.{}".format(
                                test.analysisMainArgs))

                        cmd_name = os.path.basename(test.analysisRoutine)
                        cmd_string = "./{} {} {}".format(
                            cmd_name, option, output_file)
                        outfile = "{}.analysis.out".format(test.name)
                        _, _, rc = test_util.run(cmd_string,
                                                 outfile=outfile,
                                                 store_command=True)

                        if rc == 0:
                            analysis_successful = True
                        else:
                            analysis_successful = False
                            suite.log.warn("analysis failed...")

                        test.compare_successful = test.compare_successful and analysis_successful

            else:
                if test.doVis or test.analysisRoutine != "":
                    suite.log.warn("no output file.  Skipping visualization")

        #----------------------------------------------------------------------
        # move the output files into the web directory
        #----------------------------------------------------------------------
        if args.make_benchmarks is None:
            shutil.copy("{}.run.out".format(test.name), suite.full_web_dir)
            if os.path.isfile("{}.err.out".format(test.name)):
                shutil.copy("{}.err.out".format(test.name), suite.full_web_dir)
                test.has_stderr = True
            shutil.copy("{}.compare.out".format(test.name), suite.full_web_dir)
            try:
                shutil.copy("{}.analysis.out".format(test.name),
                            suite.full_web_dir)
            except:
                pass

            shutil.copy(
                test.inputFile, "{}/{}.{}".format(suite.full_web_dir,
                                                  test.name, test.inputFile))

            if test.has_jobinfo:
                shutil.copy(
                    job_info_file,
                    "{}/{}.job_info".format(suite.full_web_dir, test.name))

            if suite.sourceTree == "C_Src" and test.probinFile != "":
                shutil.copy(
                    test.probinFile,
                    "{}/{}.{}".format(suite.full_web_dir, test.name,
                                      test.probinFile))

            for af in test.auxFiles:

                # strip out any sub-directory under build dir for the aux file
                # when copying
                shutil.copy(
                    os.path.basename(af),
                    "{}/{}.{}".format(suite.full_web_dir, test.name,
                                      os.path.basename(af)))

            if not test.png_file is None:
                try:
                    shutil.copy(test.png_file, suite.full_web_dir)
                except IOError:
                    # visualization was not successful.  Reset image
                    test.png_file = None

            if not test.analysisRoutine == "":
                try:
                    shutil.copy(test.analysisOutputImage, suite.full_web_dir)
                except IOError:
                    # analysis was not successful.  Reset the output image
                    test.analysisOutputImage = ""

            # were any Backtrace files output (indicating a crash)
            suite.copy_backtrace(test)

        else:
            shutil.copy("{}.status".format(test.name), suite.full_web_dir)

        #----------------------------------------------------------------------
        # archive (or delete) the output
        #----------------------------------------------------------------------
        suite.log.log("archiving the output...")
        for pfile in os.listdir(output_dir):
            if (os.path.isdir(pfile)
                    and (pfile.startswith("{}_plt".format(test.name))
                         or pfile.startswith("{}_chk".format(test.name)))):

                if suite.purge_output == 1 and not pfile == output_file:
                    # delete the plt/chk file
                    if os.path.isdir(pfile):
                        try:
                            shutil.rmtree(pfile)
                        except:
                            suite.log.warn("unable to remove {}".format(pfile))

                else:
                    # tar it up
                    try:
                        tar = tarfile.open("{}.tgz".format(pfile), "w:gz")
                        tar.add("{}".format(pfile))
                        tar.close()

                    except:
                        suite.log.warn(
                            "unable to tar output file {}".format(pfile))

                    else:
                        try:
                            shutil.rmtree(pfile)
                        except OSError:
                            suite.log.warn("unable to remove {}".format(pfile))

        #----------------------------------------------------------------------
        # write the report for this test
        #----------------------------------------------------------------------
        if args.make_benchmarks is None:
            suite.log.log("creating problem test report ...")
            report.report_single_test(suite, test, test_list)

    #--------------------------------------------------------------------------
    # Clean Cmake build and install directories if needed
    #--------------------------------------------------------------------------
    if (suite.useCmake):
        suite.cmake_clean("AMReX", suite.amrex_dir)
        suite.cmake_clean(suite.suiteName, suite.source_dir)

    #--------------------------------------------------------------------------
    # write the report for this instance of the test suite
    #--------------------------------------------------------------------------
    suite.log.outdent()
    suite.log.skip()
    suite.log.bold("creating new test report...")
    num_failed = report.report_this_test_run(suite, args.make_benchmarks,
                                             args.note, update_time, test_list,
                                             args.input_file[0])

    # make sure that all of the files in the web directory are world readable
    for file in os.listdir(suite.full_web_dir):
        current_file = suite.full_web_dir + file

        if os.path.isfile(current_file):
            os.chmod(current_file, 0o644)

    # reset the branch to what it was originally
    suite.log.skip()
    suite.log.bold("reverting git branches/hashes")
    suite.log.indent()

    for k in suite.repos:
        if suite.repos[k].update or suite.repos[k].hash_wanted:
            suite.repos[k].git_back()

    suite.log.outdent()

    # For temporary run, return now without creating suote report.
    if args.do_temp_run:
        return num_failed

    # store an output file in the web directory that can be parsed easily by
    # external program
    name = "source"
    if suite.sourceTree in ["AMReX", "amrex"]: name = "AMReX"
    branch = ''
    if suite.repos[name].branch_wanted:
        branch = suite.repos[name].branch_wanted.strip("\"")

    with open("{}/suite.{}.status".format(suite.webTopDir, branch), "w") as f:
        f.write("{}; num failed: {}; source hash: {}".format(
            suite.repos[name].name, num_failed,
            suite.repos[name].hash_current))

    #--------------------------------------------------------------------------
    # generate the master report for all test instances
    #--------------------------------------------------------------------------
    suite.log.skip()
    suite.log.bold("creating suite report...")
    report.report_all_runs(suite, active_test_list)

    def email_developers():
        msg = email.message_from_string(suite.emailBody)
        msg['From'] = suite.emailFrom
        msg['To'] = ",".join(suite.emailTo)
        msg['Subject'] = suite.emailSubject

        server = smtplib.SMTP('localhost')
        server.sendmail(suite.emailFrom, suite.emailTo, msg.as_string())
        server.quit()

    if num_failed > 0 and suite.sendEmailWhenFail and not args.send_no_email:
        suite.log.skip()
        suite.log.bold("sending email...")
        email_developers()

    if suite.slack_post:
        suite.slack_post_it("> test complete, num failed = {}\n{}".format(
            num_failed, suite.emailBody))

    return num_failed
    def Get(self,
            collection,
            uri_at,
            sort,
            view,
            page,
            partial_match,
            flatList,
            logging_me=true,
            debugging_me=false):
        """
    Get a list of maps that contain host metadata or a list of maps that contain
    files and directory metdata from the urltracker. The last map in the list
    has the number of list chunks that match the given uri_at, and view.

    collection - collection to get results from.  Collections are restricts
    using good and bad URL patterns.

    uri_at - URI that is the parent of files and directories in the returned list.
    Leave blank to get a list of hosts.

    sort - *_ASC or *_DESC

    view - ALL, SUCCESSFUL, ERRORS, or EXCLUDED.  If not ALL, restrict files to
    this status and directories to having at least one descendant file with
    this status.

    page - which chunk of results to return.  urltracker returns at most 100
    hosts or 100 files and 100 directories at a time.  page starts at 1.
    The term is page and not chunk because the end user sees the results on a
    web page with a Google navigational bar if there is more than one page.

    partial_match - whether to allow partial match when urltracker_server
    searches for files or directories

    flatList - whether the result should be a flat list of complete URLs, else
               a hierarchical list of directory/file names (the default).
    """

        if logging_me:
            logging.info("[urltracker_client:Get] Sitemap flatList = " +
                         str(flatList) + " [" + str(flatList == 0) + "]")

        this_many_results = 100

        command = self.BuildBrowseCommand(collection, uri_at, sort, view, page,
                                          partial_match, this_many_results,
                                          flatList)

        response = self.SendCommand(command)
        if response == None: return None

        # davidw TODO: why not just return response?
        contents = []
        uri_path = response.searchedpath(
        )  # Don't clobber formal params, please.
        path_len = len(uri_path)  # `path_len' = current node's URI len.
        first_slash_index = string.find(uri_path, '/')
        # if this is a partial search and
        # the search prefix is not a hostname, nor a directory search,
        # then the file name is just everything after the last slash (`/') char.
        if partial_match == 1 and first_slash_index != -1 and \
           uri_path[-1] != '/':
            head_index = string.rfind(uri_path, '/') + 1  # 1st after last `/'.
        # else it's an exhaustive search, or slashless, or a directory/folder URI,
        else:
            head_index = path_len  # ...so elide the entire node's URI.
        for dir in response.dirs_list():
            content = {
                'numCrawledURLs': 0,
                'numRetrievalErrors': 0,
                'numExcludedURLs': 0
            }
            if first_slash_index == -1:  # If no `/'s, must've been a host name.
                content['type'] = 'HostContentData'
                path = dir.path()
                if path[-1] == '/':
                    path = path[:-1]
                content['name'] = path
                content['uri'] = path
            else:
                content['type'] = 'DirectoryContentData'
                content['name'] = dir.path()[head_index:]
                if content['name'] == '': continue
                content['uri'] = dir.path()
            if flatList:  # For flatList mode perusal,
                content['name'] = content['uri']  # ...we want complete URIs.
            for count in dir.data().counts_list():
                if count.state() in self.CrawledURLStates:
                    content['numCrawledURLs'] += int(count.count())
                elif count.state() in self.RetrievalErrorStates:
                    content['numRetrievalErrors'] += int(count.count())
                elif count.state() in self.ExcludedURLStates:
                    content['numExcludedURLs'] += int(count.count())
            if debugging_me:
                if flatList == 0:
                    content['name'] = '[Tree=' + str(
                        flatList) + '] ' + content['name']
                #content['uri' ] = '[Tree=' + str(flatList) + '] ' + content['uri' ]
                else:
                    content['name'] = '[Flat=' + str(
                        flatList) + '] ' + content['name']
                #content['uri' ] = '[Flat=' + str(flatList) + '] ' + content['uri' ]
            contents.append(content)
        for url in response.urls_list():
            content = {
                'name':
                url.path()[head_index:],
                'uri':
                url.path(),
                'state':
                url.data().states_list()[-1].state(),
                'timestamp':
                url.data().states_list()[-1].timestamp(),
                'isCookieServerError':
                url.data().states_list()[-1].iscookieservererror(),
                'type':
                'FileContentData'
            }
            if content['name'] == '':
                content['name'] = '/'  # This really should be `.', yes?  Grr.
            if flatList:  # For flatList mode perusal,
                content['name'] = content['uri']  # ...we want complete URIs.
            if debugging_me:
                if flatList == 0:
                    content['name'] = '[Tree=' + str(
                        flatList) + '] ' + content['name']
                #content['uri' ] = '[Tree=' + str(flatList) + '] ' + content['uri' ]
                else:
                    content['name'] = '[Flat=' + str(
                        flatList) + '] ' + content['name']
                #content['uri' ] = '[Flat=' + str(flatList) + '] ' + content['uri' ]
            contents.append(content)
        contents.append({
            'uriAt': uri_path,  # _Not_ necessarily == `uri_at' (?)
            'numPages': UrlTrackerServerResponseNumPages(response),
        })

        return contents
 def get_safe_logs(self, get):
     pythonV = sys.version_info[0]
     if 'drop_ip' in get:
         path = '/www/server/btwaf/drop_ip.log'
         num = 14
     else:
         path = '/www/wwwlogs/btwaf/' + get.siteName + '_' + get.toDate + '.log'
         num = 10
     if not os.path.exists(path): return []
     p = 1
     if 'p' in get:
         p = int(get.p)
     import cgi
     start_line = (p - 1) * num
     count = start_line + num
     fp = open(path, 'rb')
     buf = ""
     try:
         fp.seek(-1, 2)
     except:
         return []
     if fp.read(1) == "\n": fp.seek(-1, 2)
     data = []
     b = True
     n = 0
     for i in range(count):
         while True:
             newline_pos = string.rfind(buf, "\n")
             pos = fp.tell()
             if newline_pos != -1:
                 if n >= start_line:
                     line = buf[newline_pos + 1:]
                     try:
                         data.append(json.loads(cgi.escape(line)))
                     except:
                         pass
                 buf = buf[:newline_pos]
                 n += 1
                 break
             else:
                 if pos == 0:
                     b = False
                     break
                 to_read = min(4096, pos)
                 fp.seek(-to_read, 1)
                 t_buf = fp.read(to_read)
                 if pythonV == 3: t_buf = t_buf.decode('utf-8')
                 buf = t_buf + buf
                 fp.seek(-to_read, 1)
                 if pos - to_read == 0:
                     buf = "\n" + buf
         if not b: break
     fp.close()
     if 'drop_ip' in get:
         stime = time.time()
         for i in range(len(data)):
             if (stime - data[i][0]) < data[i][4]:
                 get.ip = data[i][1]
                 data[i].append(self.get_waf_drop_ip(get))
             else:
                 data[i].append(False)
     return data
Esempio n. 40
0
  # now go thru the urls
  #
  for url in urls:

    # pause before starting song to give my.mp3.com 
    # a chance at authorizing the url before we access it
    time.sleep(1)

    # strip leading and trailing whitespace
    url = string.lstrip(url)
    url = string.rstrip(url)

    # if the url consists only of a host treat it like
    # a playlist (ie. icecast m3u playlists)
    p1 = string.rfind(url, "/")
    if p1 == -1 or p1 > len(url) - 5 or opt_plsfile == 1:
      opt_plsfile = 1

      if url[-1] == "/":
        url = url[:-1]
      logline = "Trying stream " + url
      logger.logit(logline)
    else:
      logline = "Getting file " + url[p1+1:]
      logger.logit(logline)

    #
    # Open the URL using urllib for m3u and icy for pls
    #
    bytesread = 0
        cprnc_out = cprnc_out_file.read()
        cprnc_out_file.close()
    else:
        # Set the arguments to CPRNC, run, and catch output
        cprnc_args[2:] = [old_results_path, new_results_path]
        cprnc_proc = Popen(cprnc_args, stdout=PIPE, stderr=STDOUT)
        cprnc_out = cprnc_proc.communicate()[0]

        # Write the output file
        cprnc_out_file = open(cprnc_out_path, 'w')
        cprnc_out_file.write(cprnc_out)
        cprnc_out_file.close()

    # Add to the list of log file output
    log_result = ' GOOD: '
    if (string.rfind(cprnc_out, ident_str) < 0):
        num_failures[full_test_name] += 1
        log_result = '* BAD: '
    log_str = log_result + file_name + os.linesep
    log_output[full_test_name].append(log_str)

    print '   ', log_result + file_name

# Now wait for all files to be processed
if (not options.serial):
    MPI.COMM_WORLD.Barrier()

# Gather all of the statitics and log files from all processors
all_log_output = log_output
all_num_failures = num_failures
if (not options.serial):
Esempio n. 42
0
def do_plot(plotfile, component, component2, outFile, log, 
            minval, maxval, minval2, maxval2, eps, dpi, origin, annotation, 
            xmin_pass, ymin_pass, zmin_pass, xmax_pass, ymax_pass, zmax_pass):

    pylab.rc("font", size=9)


    #--------------------------------------------------------------------------
    # construct the output file name
    #--------------------------------------------------------------------------
    if (outFile == ""):
        outFile = os.path.normpath(plotfile) + "_" + component
        if (not component2 == ""): outFile += "_" + component2

        if (not eps):
            outFile += ".png"

        else:
            outFile += ".eps"

    else:
        # make sure the proper extension is used
        if (not eps):
            if (not string.rfind(outFile, ".png") > 0):
                outFile = outFile + ".png"

        else:
            if (not string.rfind(outFile, ".eps") > 0):
                outFile = outFile + ".eps"


    #--------------------------------------------------------------------------
    # read in the meta-data from the plotfile
    #--------------------------------------------------------------------------
    (nx, ny, nz) = fsnapshot.fplotfile_get_size(plotfile)

    time = fsnapshot.fplotfile_get_time(plotfile)

    (xmin, xmax, ymin, ymax, zmin, zmax) = \
        fsnapshot.fplotfile_get_limits(plotfile)

    dx = (xmax - xmin)/nx
    x = xmin + numpy.arange( (nx), dtype=numpy.float64 )*dx

    dy = (ymax - ymin)/ny
    y = ymin + numpy.arange( (ny), dtype=numpy.float64 )*dy

    if (nz > 0):
        dz = (zmax - zmin)/nz
        z = zmin + numpy.arange( (nz), dtype=numpy.float64 )*dz


    if (nz == -1):

        #----------------------------------------------------------------------
        # 2-d plots
        #----------------------------------------------------------------------
        extent = [xmin, xmax, ymin, ymax]

        ix0 = 0
        ix = nx
        iy0 = 0
        iy = ny

        if (not xmin_pass == None):
            extent[0] = xmin_pass
            ix0 = int((xmin_pass - xmin)/dx)

        if (not ymin_pass == None):
            extent[2] = ymin_pass
            iy0 = int((ymin_pass - ymin)/dy)

        if (not xmax_pass == None):
            extent[1] = xmax_pass
            ix = int((xmax_pass - xmin)/dx)

        if (not ymax_pass == None):
            extent[3] = ymax_pass
            iy = int((ymax_pass - ymin)/dy)


        sparseX = 0
        if (ny >= 3*nx):
            sparseX = 1

        # read in the main component
        data = numpy.zeros( (nx, ny), dtype=numpy.float64)

        (data, err) = fsnapshot.fplotfile_get_data_2d(plotfile, component, data)
        if (not err == 0):
            sys.exit(2)

        data = numpy.transpose(data)


        # read in the component #2, if present
        if (not component2 == ""):
            data2 = numpy.zeros( (nx, ny), dtype=numpy.float64)

            (data2, err) = fsnapshot.fplotfile_get_data_2d(plotfile, component2, data2)
            if (not err == 0):
                sys.exit(2)

            data2 = numpy.transpose(data2)

        # log?
        if log:
            if (numpy.min(data) < 0):
                data = numpy.log10(numpy.abs(data))
            else:
                data = numpy.log10(data)

            if (not component2 == ""): 
                if (numpy.min(data2) < 0.0):
                    data2 = numpy.log10(numpy.abs(data2))
                else:
                    data2 = numpy.log10(data2)
                
            if (not minval == None): minval = math.log10(minval)
            if (not maxval == None): maxval = math.log10(maxval)

            if (not minval2 == None): minval2 = math.log10(minval2)
            if (not maxval2 == None): maxval2 = math.log10(maxval2)


        #----------------------------------------------------------------------
        # plot main component
        #----------------------------------------------------------------------
        if (not component2 == ""):
            ax = pylab.subplot(1,2,1)
            pylab.subplots_adjust(wspace=0.5)

        else:
            ax = pylab.subplot(1,1,1)
    

        divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)    

        im=pylab.imshow(data[iy0:iy,ix0:ix],origin='lower', extent=extent, vmin=minval, vmax=maxval)

        pylab.title(component)
        pylab.xlabel("x")
        pylab.ylabel("y")

        # axis labels in scientific notation with LaTeX
        ax = pylab.gca()
        ax.xaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))
        ax.yaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))

        if (sparseX):
            ax.xaxis.set_major_locator(pylab.MaxNLocator(3))


        # make space for a colorbar -- getting it the same size as the
        # vertical extent of the plot is surprisingly tricky.  See
        # http://matplotlib.sourceforge.net/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes
        ax_cb = divider.new_horizontal(size="10%", pad=0.1)

        fig1 = ax.get_figure()
        fig1.add_axes(ax_cb)

        formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)
        cb = pylab.colorbar(im, format=formatter, cax=ax_cb)

        # make the font size for the colorbar axis labels small.  Note
        # the offsetText is the 10^N that appears at the top of the
        # y-axis.
        cl = pylab.getp(cb.ax, 'ymajorticklabels')
        pylab.setp(cl, fontsize=10)
        
        cb.ax.yaxis.offsetText.set_fontsize("small")


        # do a fixed offset in pixels from the (xmin,ymin) data point
        trans=matplotlib.transforms.offset_copy(ax.transData, x=0, y=-0.5, 
                                                fig=fig1, units='inches')

        pylab.text(xmin, ymin, "time = %7.3g s" % (time), 
                   verticalalignment="bottom", transform = trans, 
                   clip_on=False, fontsize=10)

        if (not annotation == ""):
            trans=matplotlib.transforms.offset_copy(ax.transData, x=0, y=-0.65,
                                                    fig=fig1, units='inches')

            pylab.text(xmin, ymin, "%s" % (annotation), 
                       verticalalignment="bottom", transform = trans, 
                       clip_on=False, fontsize=10)            


        #----------------------------------------------------------------------
        # plot component #2
        #----------------------------------------------------------------------
        if (not component2 == ""):
            ax = pylab.subplot(1,2,2)

            divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)

            im = pylab.imshow(data2[iy0:iy,ix0:ix], origin='lower', extent=extent, 
                              vmin=minval2, vmax=maxval2)

            pylab.title(component2)
            pylab.xlabel("x")
            pylab.ylabel("y")

            # axis labels in scientific notation with LaTeX
            ax.xaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))
            ax.yaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))

            if (sparseX):
                ax.xaxis.set_major_locator(pylab.MaxNLocator(3))

            # make space for a colorbar -- getting it the same size as
            # the vertical extent of the plot is surprisingly tricky.
            # See
            # http://matplotlib.sourceforge.net/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes
            ax_cb = divider.new_horizontal(size="10%", pad=0.1)

            fig1 = ax.get_figure()
            fig1.add_axes(ax_cb)

            formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)
            cb = pylab.colorbar(im, format=formatter, cax=ax_cb)

            # make the font size for the colorbar axis labels small.  Note the
            # offsetText is the 10^N that appears at the top of the y-axis.
            cl = pylab.getp(cb.ax, 'ymajorticklabels')
            pylab.setp(cl, fontsize=10)
        
            cb.ax.yaxis.offsetText.set_fontsize("small")

            #ax_cb.yaxis.tick_right()


    else:

        #----------------------------------------------------------------------
        # 3-d plot
        #----------------------------------------------------------------------

        # starting points for the figure positions

        # assume that the width of the plotting area is 0.05 to 0.95,
        # leaving 0.9 to be split amongst the 3 plots.  So each has a
        # width of 0.3

        # for the height, we will assume that the colorbar at the
        # bottom gets 0.15, and that we go until 0.95, leaving 0.8 of
        # height for the plots.
            
        pos1 = [0.05, 0.15, 0.3, 0.8]
        pos2 = [0.35, 0.15, 0.3, 0.8]
        pos3 = [0.65, 0.15, 0.3, 0.8]

        fig = pylab.figure()


        # read in the slices
        # x-y
        data_xy = numpy.zeros( (nx, ny), dtype=numpy.float64)

        indir = 3
        (data_xy, err) = \
            fsnapshot.fplotfile_get_data_3d(plotfile, component, indir, 
                                            origin, data_xy)
        if (not err == 0):
            sys.exit(2)

        data_xy = numpy.transpose(data_xy)

        if log:
            if (numpy.min(data_xy) < 0):
                data_xy = numpy.log10(numpy.abs(data_xy))
            else:
                data_xy = numpy.log10(data_xy)


        # x-z
        data_xz = numpy.zeros( (nx, nz), dtype=numpy.float64)
        (data_xz, err) = \
            fsnapshot.fplotfile_get_data_3d(plotfile, component, 2, 
                                            origin, data_xz)
        if (not err == 0):
            sys.exit(2)

        data_xz = numpy.transpose(data_xz)

        if log:
            if (numpy.min(data_xz) < 0):
                data_xz = numpy.log10(numpy.abs(data_xz))
            else:
                data_xz = numpy.log10(data_xz)
                

        # y-z
        data_yz = numpy.zeros( (ny, nz), dtype=numpy.float64)
        (data_yz, err) = \
            fsnapshot.fplotfile_get_data_3d(plotfile, component, 1, 
                                            origin, data_yz)
        if (not err == 0):
            sys.exit(2)

        data_yz = numpy.transpose(data_yz)

        if log:
            if (numpy.min(data_yz) < 0):
                data_yz = numpy.log10(numpy.abs(data_yz))
            else:
                data_yz = numpy.log10(data_yz)
                


                
        if (not minval == None): 
            if (log):
                minval = math.log10(minval)
        else:
            minval = numpy.min(data_xy)
            minval = min(minval,numpy.min(data_xz))
            minval = min(minval,numpy.min(data_yz))


        if (not maxval == None): 
            if (log):
                maxval = math.log10(maxval)
        else:
            maxval = numpy.max(data_xy)
            maxval = max(maxval,numpy.max(data_xz))
            maxval = max(maxval,numpy.max(data_yz))
            



        # x-y
        extent = [xmin, xmax, ymin, ymax]

        ix0 = 0
        ix = nx
        iy0 = 0
        iy = ny

        if (not xmin_pass == None):
            extent[0] = xmin_pass
            ix0 = int((xmin_pass - xmin)/dx)

        if (not ymin_pass == None):
            extent[2] = ymin_pass
            iy0 = int((ymin_pass - ymin)/dy)

        if (not xmax_pass == None):
            extent[1] = xmax_pass
            ix = int((xmax_pass - xmin)/dx)

        if (not ymax_pass == None):
            extent[3] = ymax_pass
            iy = int((ymax_pass - ymin)/dy)



        ax = pylab.subplot(1,3,1)
        pylab.subplots_adjust(wspace=0.4)
        #fig.add_axes(pos1)

        im=pylab.imshow(data_xy[iy0:iy,ix0:ix],origin='lower', extent=extent, 
                        vmin=minval, vmax=maxval, axes=pos1)

        pylab.xlabel("x")
        pylab.ylabel("y")

        # axis labels in scientific notation with LaTeX
        ax = pylab.gca()
        ax.xaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))
        ax.yaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))

        ax.xaxis.offsetText.set_fontsize("small")
        ax.yaxis.offsetText.set_fontsize("small")

        cl = pylab.getp(ax, 'ymajorticklabels')
        pylab.setp(cl, fontsize=10)
        cl = pylab.getp(ax, 'xmajorticklabels')
        pylab.setp(cl, fontsize=10)

        # do a fixed offset in pixels from the (xmin,ymin) data point
        fig1 = ax.get_figure()
        trans=matplotlib.transforms.offset_copy(ax.transData, x=0, y=-0.5, 
                                                fig=fig1, units='inches')

        # pylab.text(xmin_pass, ymin_pass, "time = %7.3g s" % (time), 
        #            verticalalignment="bottom", transform = trans, 
        #            clip_on=False, fontsize=10)


        # x-z
        extent = [xmin, xmax, zmin, zmax]

        ix0 = 0
        ix = nx
        iz0 = 0
        iz = nz

        if (not xmin_pass == None):
            extent[0] = xmin_pass
            ix0 = int((xmin_pass - xmin)/dx)

        if (not zmin_pass == None):
            extent[2] = zmin_pass
            iz0 = int((zmin_pass - zmin)/dz)

        if (not xmax_pass == None):
            extent[1] = xmax_pass
            ix = int((xmax_pass - xmin)/dx)

        if (not zmax_pass == None):
            extent[3] = zmax_pass
            iz = int((zmax_pass - zmin)/dz)



        ax = pylab.subplot(1,3,2)
        #fig.add_axes(pos2)

        im=pylab.imshow(data_xz[iz0:iz,ix0:ix],origin='lower', extent=extent, 
                        vmin=minval, vmax=maxval, axes=pos2)

        pylab.xlabel("x")
        pylab.ylabel("z")

        # axis labels in scientific notation with LaTeX
        ax = pylab.gca()
        ax.xaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))
        ax.yaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))

        ax.xaxis.offsetText.set_fontsize("small")
        ax.yaxis.offsetText.set_fontsize("small")

        cl = pylab.getp(ax, 'ymajorticklabels')
        pylab.setp(cl, fontsize=10)
        cl = pylab.getp(ax, 'xmajorticklabels')
        pylab.setp(cl, fontsize=10)


        # y-z
        extent = [ymin, ymax, zmin, zmax]

        iy0 = 0
        iy = ny
        iz0 = 0
        iz = nz

        if (not ymin_pass == None):
            extent[0] = ymin_pass
            iy0 = int((ymin_pass - ymin)/dy)

        if (not zmin_pass == None):
            extent[2] = zmin_pass
            iz0 = int((zmin_pass - zmin)/dz)

        if (not ymax_pass == None):
            extent[1] = ymax_pass
            iy = int((ymax_pass - ymin)/dy)

        if (not zmax_pass == None):
            extent[3] = zmax_pass
            iz = int((zmax_pass - zmin)/dz)



        ax = pylab.subplot(1,3,3)
        #fig.add_axes(pos3)

        im=pylab.imshow(data_yz[iz0:iz,iy0:iy],origin='lower', extent=extent, 
                        vmin=minval, vmax=maxval, axes=pos3)

        pylab.xlabel("y")
        pylab.ylabel("z")

        # axis labels in scientific notation with LaTeX
        ax = pylab.gca()
        ax.xaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))
        ax.yaxis.set_major_formatter(pylab.ScalarFormatter(useMathText=True))

        ax.xaxis.offsetText.set_fontsize("small")
        ax.yaxis.offsetText.set_fontsize("small")

        cl = pylab.getp(ax, 'ymajorticklabels')
        pylab.setp(cl, fontsize=10)
        cl = pylab.getp(ax, 'xmajorticklabels')
        pylab.setp(cl, fontsize=10)


        # colorbar
        pylab.subplots_adjust(bottom=0.1, left=0.05, right=0.95)

        formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)
        
        cax = pylab.axes([0.05, 0.06, 0.9, 0.04])
        pylab.colorbar(orientation="horizontal", cax=cax, format=formatter)

        pylab.title(component)


    #--------------------------------------------------------------------------
    # save the figure
    #--------------------------------------------------------------------------
    # try: pylab.tight_layout()  # requires matplotlib >= 1.1
    # except:
    #     pass

    if (not eps):
        pylab.savefig(outFile, bbox_inches='tight', dpi=dpi, pad_inches=0.33)
    else:
        pylab.savefig(outFile)#, bbox_inches='tight', pad_inches=0.33)
Esempio n. 43
0
def rfindFirstJSChars(string):
    b = [string.rfind(k) for k in jsChars]
    return max(b)
Esempio n. 44
0
 def __parseCTypedefMemberdef(self, node):
     name = node.find('./name').text
     definition = node.find('./definition').text
     if string.find(definition, 'typedef ') == 0:
         definition = definition[8:]
     if string.rfind(name, 'Cb') == len(name) - 2:
         pos = string.find(definition, "(*")
         if pos == -1:
             return None
         returntype = definition[0:pos].strip()
         if returntype != "void":
             return None
         returnarg = CArgument(returntype,
                               enums=self.enums,
                               structs=self.__structs)
         definition = definition[pos + 2:]
         pos = string.find(definition, "(")
         definition = definition[pos + 1:-1]
         argslist = CArgumentsList()
         for argdef in definition.split(', '):
             argType = ''
             starPos = string.rfind(argdef, '*')
             spacePos = string.rfind(argdef, ' ')
             if starPos != -1:
                 argType = argdef[0:starPos + 1]
                 argName = argdef[starPos + 1:]
             elif spacePos != -1:
                 argType = argdef[0:spacePos]
                 argName = argdef[spacePos + 1:]
             argslist.addArgument(
                 CArgument(argType, argName, self.enums, self.__structs))
         if len(argslist) > 0:
             paramdescs = node.findall(
                 "detaileddescription/para/parameterlist[@kind='param']/parameteritem"
             )
             if paramdescs:
                 for arg in argslist.arguments:
                     for paramdesc in paramdescs:
                         if arg.name == paramdesc.find(
                                 './parameternamelist').find(
                                     './parametername').text:
                             arg.description = self.__cleanDescription(
                                 paramdesc.find('./parameterdescription'))
                 missingDocWarning = ''
                 for arg in argslist.arguments:
                     if arg.description == None:
                         missingDocWarning += "\t'" + arg.name + "' parameter not documented\n"
                 if missingDocWarning != '':
                     print(name + ":\n" + missingDocWarning)
         f = CEvent(name, returnarg, argslist)
         deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
         if deprecatedNode is not None:
             f.deprecated = True
         f.briefDescription = ''.join(
             node.find('./briefdescription').itertext()).strip()
         f.detailedDescription = self.__cleanDescription(
             node.find('./detaileddescription'))
         return f
     else:
         pos = string.rfind(definition, " " + name)
         if pos != -1:
             definition = definition[0:pos]
         td = CTypedef(name, definition)
         deprecatedNode = node.find(".//xrefsect[xreftitle='Deprecated']")
         if deprecatedNode is not None:
             td.deprecated = True
         td.briefDescription = ''.join(
             node.find('./briefdescription').itertext()).strip()
         td.detailedDescription = self.__cleanDescription(
             node.find('./detaileddescription'))
         return td
     return None
Esempio n. 45
0
import string
import math
import time

path = sys.argv[1]
t1 = float(sys.argv[2])
t2 = float(sys.argv[3])

heightcount = 1
valuecount = 1

pointcount = 0
linecount = 0
layercount = 0

pointmesh = open(path[0:string.rfind(path, "\\")] + 'xyzmesh.xyz', 'w')

for imageslice in os.listdir(path):

    if math.sqrt(heightcount) >= valuecount:

        RGBslice = cv2.imread(path + imageslice)
        grayslice = cv2.cvtColor(RGBslice, cv2.COLOR_BGR2GRAY)
        edgeslice = cv2.Canny(grayslice, t1, t2, apertureSize=5)

        baseslice = RGBslice.copy()
        baseslice = np.uint8(baseslice / 2.)
        baseslice[edgeslice != 0] = (0, 255, 0)

        for line in edgeslice:
            for point in line:
Esempio n. 46
0
def parse(tokens, customize):
    #
    #  Special handling for opcodes that take a variable number
    #  of arguments -- we add a new rule for each:
    #
    #    expr ::= {expr}^n BUILD_LIST_n
    #    expr ::= {expr}^n BUILD_TUPLE_n
    #    unpack_list ::= UNPACK_LIST {expr}^n
    #    unpack ::= UNPACK_TUPLE {expr}^n
    #    unpack ::= UNPACK_SEQEUENE {expr}^n
    #    mkfunc ::= {expr}^n LOAD_CONST MAKE_FUNCTION_n
    #    mkfunc ::= {expr}^n load_closure LOAD_CONST MAKE_FUNCTION_n
    #    expr ::= expr {expr}^n CALL_FUNCTION_n
    #    expr ::= expr {expr}^n CALL_FUNCTION_VAR_n POP_TOP
    #    expr ::= expr {expr}^n CALL_FUNCTION_VAR_KW_n POP_TOP
    #    expr ::= expr {expr}^n CALL_FUNCTION_KW_n POP_TOP
    #
    global p
    for k, v in customize.items():
        # avoid adding the same rule twice to this parser
        if p.customized.has_key(k):
            continue
        p.customized[k] = None

        #nop = lambda self, args: None
        op = k[:string.rfind(k, '_')]
        if op in ('BUILD_LIST', 'BUILD_TUPLE', 'BUILD_SET'):
            rule = 'build_list ::= ' + 'expr ' * v + k
        elif op in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
            rule = 'unpack ::= ' + k + ' designator' * v
        elif op == 'UNPACK_LIST':
            rule = 'unpack_list ::= ' + k + ' designator' * v
        elif op == 'DUP_TOPX':
            # no need to add a rule
            continue
            #rule = 'dup_topx ::= ' + 'expr '*v + k
        elif op == 'MAKE_FUNCTION':
            p.addRule('mklambda ::= %s LOAD_LAMBDA %s' % ('expr ' * v, k), nop)
            rule = 'mkfunc ::= %s LOAD_CONST %s' % ('expr ' * v, k)
        elif op == 'MAKE_CLOSURE':
            p.addRule(
                'mklambda ::= %s load_closure LOAD_LAMBDA %s' %
                ('expr ' * v, k), nop)
            p.addRule(
                'genexpr ::= %s load_closure LOAD_GENEXPR %s expr GET_ITER CALL_FUNCTION_1'
                % ('expr ' * v, k), nop)
            p.addRule(
                'setcomp ::= %s load_closure LOAD_SETCOMP %s expr GET_ITER CALL_FUNCTION_1'
                % ('expr ' * v, k), nop)
            p.addRule(
                'dictcomp ::= %s load_closure LOAD_DICTCOMP %s expr GET_ITER CALL_FUNCTION_1'
                % ('expr ' * v, k), nop)
            rule = 'mkfunc ::= %s load_closure LOAD_CONST %s' % ('expr ' * v,
                                                                 k)
#            rule = 'mkfunc ::= %s closure_list LOAD_CONST %s' % ('expr '*v, k)
        elif op in ('CALL_FUNCTION', 'CALL_FUNCTION_VAR',
                    'CALL_FUNCTION_VAR_KW', 'CALL_FUNCTION_KW'):
            na = (v & 0xff)  # positional parameters
            nk = (v >> 8) & 0xff  # keyword parameters
            # number of apply equiv arguments:
            nak = (len(op) - len('CALL_FUNCTION')) / 3
            rule = 'call_function ::= expr ' + 'expr '*na + 'kwarg '*nk \
                   + 'expr ' * nak + k
        else:
            raise Exception('unknown customize token %s' % k)
        p.addRule(rule, nop)
    ast = p.parse(tokens)
    #    p.cleanup()
    return ast
Esempio n. 47
0
def runPDB2PQR(pdblist, ff, options):
    """
        Run the PDB2PQR Suite

        Parameters
            pdblist: The list of objects that was read from the PDB file
                     given as input (list)
            ff:      The name of the forcefield (string)
            options: A dictionary of PDB2PQR options, including:
                     verbose: When 1, script will print information to stdout
                              When 0, no detailed information will be printed (int)
                     debump:  When 1, debump heavy atoms (int)
                     opt:     When 1, run hydrogen optimization (int)
                     ph:      The desired ph of the system (float)
                     outname: The name of the desired output file
        Returns
            header:  The PQR file header (string)
            lines:   The PQR file atoms (list)
            missedligandresidues:  A list of ligand residue names whose charges could
                     not be assigned (ligand)
    """
    ph = None
    pkaname = ""
    outname = ""
    outroot = ""
    typemapname = ""
    neutraln = None
    neutralc = None
    lines = []
    Lig = None
    atomcount = 0  # Count the number of ATOM records in pdb

    # userff is CGI-based User Forcefield file object

    if "userff" in options: userff = options["userff"]
    else: userff = None

    if "usernames" in options: usernames = options["usernames"]
    else: usernames = None

    if "verbose" in options: verbose = 1
    else: verbose = 0

    if "opt" in options: optflag = 1
    else: optflag = 0

    if "typemap" in options: typemapflag = 1
    else: typemapflag = 0

    if "chain" in options: chainflag = 1
    else: chainflag = 0

    if "outname" not in options or options["outname"] == None:
        text = "Error: Output name not set!"
        raise ValueError, text
    else:
        outname = options["outname"]
        period = string.rfind(outname, ".")
        if period > 0: outroot = outname[0:period]
        else: outroot = outname

    if "ph" in options:
        pka = 1
        ph = options["ph"]
        pkaname = outroot + ".propka"
        if os.path.isfile(pkaname): os.remove(pkaname)
    else: pka = 0

    typemapname = "%s-typemap.html" % outroot

    extmap = options["extensions"]

    start = time.time()

    if verbose:
        print "Beginning PDB2PQR...\n"

    myDefinition = Definition()
    if verbose:
        print "Parsed Amino Acid definition file."

    # Check for the presence of a ligand!  This code is taken from pdb2pka/pka.py

    if "ligand" in options:
        from pdb2pka.ligandclean import ligff
        myProtein, myDefinition, Lig = ligff.initialize(
            myDefinition, options["ligand"], pdblist, verbose)
        for atom in myProtein.getAtoms():
            if atom.type == "ATOM":
                atomcount += 1
    else:
        myProtein = Protein(pdblist, myDefinition)

    if verbose:
        print "Created protein object -"
        print "\tNumber of residues in protein: %s" % myProtein.numResidues()
        print "\tNumber of atoms in protein   : %s" % myProtein.numAtoms()

    myRoutines = Routines(myProtein, verbose)

    for residue in myProtein.getResidues():
        multoccupancy = 0
        for atom in residue.getAtoms():
            if atom.altLoc != "":
                multoccupancy = 1
                txt = "Warning: multiple occupancies found: %s in %s\n" % (
                    atom.name, residue)
                sys.stderr.write(txt)
        if multoccupancy == 1:
            myRoutines.warnings.append(
                "WARNING: multiple occupancies found in %s,\n" % (residue))
            myRoutines.warnings.append(
                "         at least one of the instances is being ignored.\n")

    if "neutraln" in options: neutraln = 1
    if "neutralc" in options: neutralc = 1

    myRoutines.setTermini(neutraln, neutralc)
    myRoutines.updateBonds()

    if "clean" in options:
        header = ""
        lines = myProtein.printAtoms(myProtein.getAtoms(), chainflag)

        # Process the extensions
        for ext in extmap:
            module = extmap[ext]
            call = "module.%s(myRoutines, outroot)" % ext
            eval(call)

        if verbose:
            print "Total time taken: %.2f seconds\n" % (time.time() - start)
        return header, lines

    if not "assign-only" in options:
        # It is OK to process ligands with no ATOM records in the pdb
        if atomcount == 0 and Lig != None:
            pass
        else:
            myRoutines.findMissingHeavy()
        myRoutines.updateSSbridges()

        if "debump" in options:
            myRoutines.debumpProtein()

        if pka:
            myRoutines.runPROPKA(ph, ff, pkaname)

        myRoutines.addHydrogens()

        myhydRoutines = hydrogenRoutines(myRoutines)

        if "debump" in options:
            myRoutines.debumpProtein()

        if optflag:
            myhydRoutines.setOptimizeableHydrogens()
            myhydRoutines.initializeFullOptimization()
            myhydRoutines.optimizeHydrogens()
        else:
            myhydRoutines = hydrogenRoutines(myRoutines)
            myhydRoutines.initializeWaterOptimization()
            myhydRoutines.optimizeHydrogens()

        # Special for GLH/ASH, since both conformations were added
        myhydRoutines.cleanup()

    else:  # Special case for HIS if using assign-only
        for residue in myProtein.getResidues():
            if isinstance(residue, HIS):
                myRoutines.applyPatch("HIP", residue)

    myRoutines.setStates()

    myForcefield = Forcefield(ff, myDefinition, userff, usernames)
    hitlist, misslist = myRoutines.applyForcefield(myForcefield)

    ligsuccess = 0
    if "ligand" in options:

        # If this is independent, we can assign charges and radii here

        for residue in myProtein.getResidues():
            if isinstance(residue, LIG):
                templist = []
                Lig.make_up2date(residue)
                for atom in residue.getAtoms():
                    atom.ffcharge = Lig.ligand_props[atom.name]["charge"]
                    atom.radius = Lig.ligand_props[atom.name]["radius"]
                    if atom in misslist:
                        misslist.pop(misslist.index(atom))
                        templist.append(atom)

                charge = residue.getCharge()
                if abs(charge - int(charge)) > 0.001:
                    # Ligand parameterization failed
                    myRoutines.warnings.append(
                        "WARNING: PDB2PQR could not successfully parameterize\n"
                    )
                    myRoutines.warnings.append(
                        "         the desired ligand; it has been left out of\n"
                    )
                    myRoutines.warnings.append("         the PQR file.\n")
                    myRoutines.warnings.append("\n")

                    # remove the ligand
                    myProtein.residues.remove(residue)
                    for chain in myProtein.chains:
                        if residue in chain.residues:
                            chain.residues.remove(residue)
                else:
                    ligsuccess = 1
                    # Mark these atoms as hits
                    hitlist = hitlist + templist

    # Temporary fix; if ligand was successful, pull all ligands from misslist
    if ligsuccess:
        templist = misslist[:]
        for atom in templist:
            if isinstance(atom.residue, Amino) or isinstance(
                    atom.residue, Nucleic):
                continue
            misslist.remove(atom)

    # Creat the Typemap
    if typemapflag:
        myProtein.createHTMLTypeMap(myDefinition, typemapname)

    # Grab the protein charge

    reslist, charge = myProtein.getCharge()

    # If we want a different naming scheme, use that

    if "ffout" in options:
        scheme = options["ffout"]
        userff = None  # Currently not supported
        if scheme != ff:
            myNameScheme = Forcefield(scheme, myDefinition, userff)
        else:
            myNameScheme = myForcefield
        myRoutines.applyNameScheme(myNameScheme)

    header = printPQRHeader(misslist, reslist, charge, ff,
                            myRoutines.getWarnings(), options)
    lines = myProtein.printAtoms(hitlist, chainflag)

    # Determine if any of the atoms in misslist were ligands
    missedligandresidues = []
    for atom in misslist:
        if isinstance(atom.residue, Amino) or isinstance(
                atom.residue, Nucleic):
            continue
        if atom.resName not in missedligandresidues:
            missedligandresidues.append(atom.resName)

    # Process the extensions

    for ext in extmap:
        module = extmap[ext]
        call = "module.%s(myRoutines, outroot)" % ext
        eval(call)

    if verbose:
        print "Total time taken: %.2f seconds\n" % (time.time() - start)

    return header, lines, missedligandresidues
Esempio n. 48
0
def load(fname, lines_to_ignore=4, type='i'):
    """
Load in huge, flat, 2D text files.  Can handle differing line-lengths AND
can strip #/% on UNIX (or with a better NT grep).  Requires wc, grep, and
mmapfile.lib/.pyd. Type can be 'i', 'f' or 'd', for ints, floats or doubles,
respectively.  Lines_to_ignore determines how many lines at the start of the
file to ignore (required for non-working grep).

Usage:   load(fname,lines_to_ignore=4,type='i')
Returns: numpy array of specified type
"""
    start = time.time()  ## START TIMER
    if type == 'i':
        intype = int
    elif type in ['f', 'd']:
        intype = float
    else:
        raise ValueError, "type can be 'i', 'f' or 'd' in load()"

    ## STRIP OUT % AND # LINES
    tmpname = tempfile.mktemp()
    if sys.platform == 'win32':
        # NT VERSION OF GREP DOESN'T DO THE STRIPPING ... SIGH
        cmd = "grep.exe -v \'%\' " + fname + " > " + tmpname
        print cmd
        os.system(cmd)
    else:
        # UNIX SIDE SHOULD WORK
        cmd = "cat " + fname + " | grep -v \'%\' |grep -v \'#\' > " + tmpname
        print cmd
        os.system(cmd)

    ## GET NUMBER OF ROWS, COLUMNS AND LINE-LENGTH, USING WC
    wc = string.split(os.popen("wc " + tmpname).read())
    numlines = int(wc[0]) - lines_to_ignore
    tfp = open(tmpname)
    if lines_to_ignore <> 0:
        for i in range(lines_to_ignore):
            junk = tfp.readline()
    numcols = len(string.split(tfp.readline()))  #int(float(wc[1])/numlines)
    tfp.close()

    ## PREPARE INPUT SPACE
    a = np.zeros((numlines * numcols), type)
    block = 65536  # chunk to read, in bytes
    data = mmapfile.mmapfile(tmpname, '', 0)
    if lines_to_ignore <> 0 and sys.platform == 'win32':
        for i in range(lines_to_ignore):
            junk = data.readline()
    i = 0
    d = ' '
    carryover = ''
    while len(d) <> 0:
        d = carryover + data.read(block)
        cutindex = string.rfind(d, '\n')
        carryover = d[cutindex + 1:]
        d = d[:cutindex + 1]
        d = map(intype, string.split(d))
        a[i:i + len(d)] = d
        i = i + len(d)
    end = time.time()
    print "%d sec" % round(end - start, 2)
    data.close()
    os.remove(tmpname)
    return np.reshape(a, [numlines, numcols])
Esempio n. 49
0
def rewrap_line(longline):
    """
  Break a given long line "longline" up into 72 character long chunks that
  conform the Fortran77 standard. The chunks are written to standard output.
  In addition we do not want to break the line in the middle of numbers.
  """
    pattern = re.compile("\S")
    i = (pattern.search(longline)).start()
    indent = longline[:i]
    indent = indent[:5] + "+" + indent[7:] + "   "
    while len(longline) > 72:
        i = -1
        # wrap before * / ( ) + or -
        i = max(i, string.rfind(longline, ",", 0, 70) + 1)
        i = max(i, string.rfind(longline, "*", 0, 71))
        i = max(i, string.rfind(longline, "/", 0, 71))
        i = max(i, string.rfind(longline, "(", 0, 71))
        i = max(i, string.rfind(longline, ")", 0, 71))
        # wrap before + but not in the middle of a numerical constant...
        j = string.rfind(longline, "+", 0, 71)
        k = string.rfind(longline, "d+", 0, 71)
        if j - 1 == k:
            j = string.rfind(longline, "+", 0, k)
        i = max(i, j)
        # wrap before - but not in the middle of a numerical constant...
        j = string.rfind(longline, "-", 0, 71)
        k = string.rfind(longline, "d-", 0, 71)
        if j - 1 == k:
            j = string.rfind(longline, "-", 0, k)
        i = max(i, j)
        if i == -1:
            sys.stderr.write("No sensible break point found in:\n")
            sys.stderr.write(longline)
            sys.exit(1)
        elif i == 6:
            sys.stderr.write("Same break point found repeatedly in:\n")
            sys.stderr.write(longline)
            sys.exit(1)
        sys.stdout.write(longline[:i] + "\n")
        longline = indent + longline[i:]
    sys.stdout.write(longline + "\n")
Esempio n. 50
0
def main():
    print "cominciamo!!"
    outdir = ciop.tmp_dir
    #input = sys.stdin.readlines()
    #input_file = input[0][string.find(input[0], "'")+1:string.rfind(input[0],"'")]
    #print input_file
    #print "sys.stdin ", input

    date_list = []

    image_list = {}
    for input in sys.stdin:
        #print "sys.stdin ", input
        print "input: ", input
        input_file = input[string.find(input, "'") +
                           1:string.rfind(input, "'")].strip()
        print "input_file: ", input_file
        #image_list.append(input_file)
        #input_file = input[string.find(input, "'")+1:string.rfind(input,"'")]
        #print input_file
        #print 'sto facendo un test'
        #print input_file[-38:-34], input_file[-34:-32], input_file[-32:-30]
        #input_file_basename = os.path.basename(input_file)
        print "vai con la data!"
        print input_file[-38:-34], input_file[-34:-32], input_file[-32:-30]
        date_img = datetime.date(int(input_file[-38:-34]),
                                 int(input_file[-34:-32]),
                                 int(input_file[-32:-30]))
        print "date_img: ", date_img
        image_list[date_img] = input_file
        date_list.append(date_img)
        print "il file di input e': ", input_file

    date_list.sort()

    ##Creo le coppie
    gap = datetime.timedelta(
        days=int(ciop.getparam('long_coherence_interval')))
    print "gap: ", gap
    cohe_list = []
    for date1 in date_list:
        for date2 in date_list:
            print date2 - date1
            if (date2 - date1) == gap:
                cohe_list.append((date1, date2))

    print cohe_list

    master_coreg_date = date_list[len(date_list) / 2]
    # ADESSO DEVO PUBBLICARE IL RISULTATO
    # RICOSTRUIRE LA COPPIA DELLE IMMAGINI
    image_pairs = []
    for i in cohe_list:
        image_pairs.append(
            str((image_list[i[0]], image_list[i[1]], 'coherence')))

    print "image_pairs: ", image_pairs
    print "master_coreg_date: ", master_coreg_date
    print "master_coreg: ", image_list[master_coreg_date]
    #adesso devo creare le coppie su cui coregistrare
    for i in date_list:
        #image_pairs.append(str((image_list[master_coreg_date],image_list[i], 'coregistration')))
        image_pairs.append(
            str((image_list[i], image_list[i], 'coregistration')))

    print image_pairs
    res = ciop.publish(image_pairs, mode='silent', metalink=True)
    print 'result from publish string: ', res
Esempio n. 51
0
    appname = None
    try:
        appname = subprocess.check_output([BIN_GETPACKNAME, fnapk])
    except Exception, e:
        print >> sys.stderr, "error occurred when executing getpackage.sh " + fnapk
    ret = string.split(appname.lstrip().rstrip(), '\t')
    if len(ret) < 2:
        print >> sys.stderr, "error in getting package name of %s: %s" % (
            fnapk, appname)
        sys.exit(-1)

    if not prefix:
        return ret[1]

    napk = fnapk
    ri = string.rfind(fnapk, '/')
    if ri != -1:
        napk = fnapk[ri + 1:]
    #napk = napk[0: string.rfind(napk, ".apk")]
    return napk + '.' + ret[1]


def getsha256(fnapk):
    try:
        sha = subprocess.check_output(['sha256sum', fnapk])
    except Exception, e:
        print >> sys.stderr, "error occurred when executing sha256sum " + fnapk
    ret = string.split(sha.lstrip().rstrip())
    if len(ret) < 2:
        print >> sys.stderr, "error in sha256sum of %s: %s" % (fnapk, sha)
        sys.exit(-1)
Esempio n. 52
0
 def SetLogDirectory(self, newDirectory):
     "Sets the list of directories to a single entry for the currently selected sampler."
     #self.directories = [ newDirectory ]
     self.logName = newDirectory[string.rfind(newDirectory, "/")+1:]
     self.FindDirectories(self.logName)
Esempio n. 53
0
def AddComponent(platform, project, component, dirs, files, dlls):

    temp_dir = os.path.join(project.src_root_path, project.module_dir)
    copydir = temp_dir
    temp_dir = os.path.join(temp_dir, temp_dir_name)

    ## Create directories for files to copy
    if sysinfo.host_type == 'mac':
        global g_rzt_apple_script
        g_rzt_apple_script.Append('verifypath("%s")' % (temp_dir))
    else:
        project.writeln('\t' + platform.mkdir.execute(temp_dir_name))

    for dir in dirs:
        if sysinfo.host_type == 'mac':
            global g_rzt_apple_script
            g_rzt_apple_script.Append('verifypath("%s")' %
                                      (os.path.join(temp_dir, dir)))
        else:
            project.writeln(
                '\t' +
                platform.mkdir.execute(os.path.join(temp_dir_name, dir)))
            AddRebaseDirectory(dir)

    str_copycommand = '\t' + platform.copy.cmd + ' '
    if (platform.name == 'win16'):
        str_copycommand = '\tdrpog /noerr_copy '
        project.writeln('\tdel CopyLogFile.txt')

    specfile_path = os.path.join(project.src_root_path, component[2])
    if platform.type == 'unix':
        specfile_path = string.translate(specfile_path,
                                         unix_path_translation_table)
        index = string.rfind(specfile_path, '/')
    else:
        index = string.rfind(specfile_path, '\\')

    specfile_name = specfile_path[index + 1:]
    specfile_out_path = os.path.join(temp_dir, specfile_name)
    if project.target_name != project.module_dir:
        specfile_name = specfile_name + '_' + project.target_name

    ## Copy spec file of a component
    if sysinfo.host_type == 'mac':
        spec_path = os.path.join(project.src_root_path, component[2])
        index = string.rfind(temp_dir, ':')
        spec_path = temp_dir[:index]
        spec_path = os.path.join(spec_path, specfile_name)
        corrected_file = string.translate(spec_path,
                                          mac_path_translation_table)
        global g_rzt_apple_script
        g_rzt_apple_script.Append(
            '-- Copy Files and Folders into Temporary Directory',
            'tell application "Finder"', 'with timeout of 99999 seconds',
            'Duplicate file "%s" to folder "%s" with replacing' %
            (corrected_file, temp_dir))
    else:
        project.writeln(str_copycommand + specfile_name + ' ' +
                        specfile_out_path)

    for dll in dlls:
        platform.versioning.version = ''
        dll_name = platform.versioning.create_dll_name(dll[0], dll[2])
        dll_location = os.path.join(dll[2], project.output_dir)
        dll_location = os.path.join(dll_location, dll_name)
        if dll[1] == '':
            dest = temp_dir
        else:
            dest = os.path.join(temp_dir, dll[1])
        if sysinfo.host_type == 'mac':
            global g_rzt_apple_script
            g_rzt_apple_script.Append(
                'Duplicate file "%s" to folder "%s" with replacing' %
                (os.path.join(project.src_root_path, dll_location), dest))
        else:
            project.writeln(str_copycommand +
                            os.path.join(project.src_root_path, dll_location) +
                            ' ' + dest)

    for file in files:
        if file[1] == '':
            dest = temp_dir
        else:
            dest = os.path.join(temp_dir, file[1])

        if sysinfo.host_type == 'mac':
            srcfile = string.translate(file[0], mac_path_translation_table)
            srcfile = project.src_root_path + ':' + srcfile
            corrected_dest = string.translate(dest, mac_path_translation_table)
            if os.path.isdir(srcfile):  # copy entire folder
                global g_rzt_apple_script
                g_rzt_apple_script.Append('Copy folder "%s" to folder "%s"' %
                                          (srcfile, corrected_dest))
            else:
                global g_rzt_apple_script
                g_rzt_apple_script.Append(
                    'Duplicate file "%s" to folder "%s" with replacing' %
                    (srcfile, corrected_dest))
        else:
            if platform.type == 'unix':
                srcfile = string.translate(file[0],
                                           unix_path_translation_table)
                temp = str_copycommand + '"' + os.path.join(
                    project.src_root_path, srcfile) + '" "' + dest + '"'
                project.writeln(string.replace(temp, '*', '"*"'))
            else:
                project.writeln(str_copycommand + '"' +
                                os.path.join(project.src_root_path, file[0]) +
                                '"  "' + dest + '"')

    if sysinfo.host_type == 'mac':
        global g_rzt_apple_script
        g_rzt_apple_script.Append('end timeout', 'end tell')
Esempio n. 54
0
 def GetLogFileName(self, logFilePath):
     "Given a full path to a log file, returns the name to the log file."
     return logFilePath[string.rfind(logFilePath, "/")+1:]
Esempio n. 55
0
default_supports = ['0.0025', '0.003', '0.0035', '0.0045', '0.0075']
datadir = '../data/'
f = open(datadir + 'datafiles', 'r')
datafiles = f.readlines()

d = shelve.open('space.dbm')

sum = 0
n = 0
for x in datafiles:
    xl = string.split(x)
    datafile = xl[0]
    numtxn = int(xl[1])
    path = string.split(datafile, '/')
    filename = path[len(path) - 1]
    dataname = filename[0:string.rfind(filename, '.')]
    if len(xl) > 2:
        supports = xl[2:]
    else:
        supports = default_supports
    for epsilon in supports:
        (d0, space1, d1) = d['fpgrowth-tiny:' + datafile + ':' + epsilon]
        (d0, space2, d2) = d['fpgrowth:' + datafile + ':' + epsilon]
        use = (float(space1) / float(space2)) * 100.0
        print 'memory use ', datafile, epsilon, 'is', use, '%'
        sum = sum + use
        n = n + 1
d.close()
print 'average memory use = ', sum / n

#        print 'memory saving for ', datafile, epsilon, 'is', (float(space2)/float(space1)-1) * 100.0 , '%'
Esempio n. 56
0
def api(path_info, repo_name, version, start_response, environ):

    slash = path_info.find('/')
    if (slash == -1):
        return cvmfs_api.bad_request(start_response, 'no slash in geo path')

    caching_string = path_info[0:slash]
    servers = string.split(path_info[slash + 1:], ",")

    # TODO(jblomer): Can this be switched to monotonic time?
    now = int(time.time())

    gir_rem = None
    if caching_string.find('.'):
        # might be a FQDN, use it if it resolves to a geo record
        gir_rem = name_geoinfo(now, caching_string)

    if gir_rem is None:
        if 'HTTP_X_FORWARDED_FOR' in environ:
            forwarded_for = environ['HTTP_X_FORWARDED_FOR']
            start = string.rfind(forwarded_for, ' ') + 1
            if (start == 0):
                start = string.rfind(forwarded_for, ',') + 1
            gir_rem = addr_geoinfo(forwarded_for[start:])
        if gir_rem is None and 'REMOTE_ADDR' in environ:
            gir_rem = addr_geoinfo(environ['REMOTE_ADDR'])

    if gir_rem is None:
        return cvmfs_api.bad_request(start_response,
                                     'remote addr not found in database')

    if '+PXYSEP+' in servers:
        # first geosort the proxies after the separator and if at least one
        # is good, sort the hosts before the separator relative to that
        # proxy rather than the client
        pxysep = servers.index('+PXYSEP+')
        onegood, pxyindexes = geosort_servers(now, gir_rem,
                                              servers[pxysep + 1:])
        if onegood:
            gir_pxy = name_geoinfo(now, servers[pxysep + 1 + pxyindexes[0]])
            if not gir_pxy is None:
                gir_rem = gir_pxy
        onegood, hostindexes = geosort_servers(now, gir_rem, servers[0:pxysep])
        indexes = hostindexes + list(pxysep + 1 + i for i in pxyindexes)
        # Append the index of the separator for backward compatibility,
        # so the client can always expect the same number of indexes as
        # the number of elements in the request.
        indexes.append(pxysep)
    else:
        onegood, indexes = geosort_servers(now, gir_rem, servers)

    if not onegood:
        # return a bad request only if all the server names were bad
        return cvmfs_api.bad_request(start_response,
                                     'no server addr found in database')

    response_body = string.join((str(i + 1) for i in indexes), ',') + '\n'

    status = '200 OK'
    response_headers = [('Content-Type', 'text/plain'),
                        ('Cache-Control',
                         'max-age=' + str(positive_expire_secs)),
                        ('Content-Length', str(len(response_body)))]
    start_response(status, response_headers)

    return [response_body]
Esempio n. 57
0
def LogReader(filename,tier,outfile):
	file = open(filename)
	raw = file.readline()
	file.close()
	
	if raw=='"log"': #https://github.com/Zarel/Pokemon-Showdown/commit/92a4f85e0abe9d3a9febb0e6417a7710cabdc303
		return
	log = json.loads(raw)

	#determine log type
	spacelog = True
	if 'log' in log.keys():
		if log['log'][0][0:2] != '| ':
			spacelog = False

	#check for log length
	longEnough = False
	if 'log' not in log.keys():
		if int(log['turns']) > 5: 
			longEnough = True
	else:
		for line in log['log']:
			if (spacelog and line[2:10] == 'turn | 6') or (not spacelog and line[1:7] == 'turn|6'):
				longEnough = True
				break
	if not longEnough:
		return

	#get info on the trainers & pokes involved
	ts = []
	teams = {}

	#get pokemon info
	for team in ['p1team','p2team']:

		if team == 'p1team':
			trainer = log['p1']
		else:
			trainer = log['p2']

		teams[team]=[]

		for i in range(len(log[team])):
			if 'species' in log[team][i].keys():
				species = log[team][i]['species']
			else: #apparently randbats usually don't contain the species field?
				species = log[team][i]['name']

			#very odd that these == needed--I've seen ".Species", "(Species)", "species", "Species)", "SPECIES"...
			if species[0] not in string.lowercase + string.uppercase:
				species=species[1:]
			while species[len(species)-1] in ')". ':
				species=species[:len(species)-1]
			if species[0] in string.lowercase or species[1] in string.uppercase:
				species = species.title()
			if species in replacements.keys():
				species = replacements[species]

			for s in aliases: #combine appearance-only variations and weird PS quirks
				if species in aliases[s]:
					species = s
					break
		
			ts.append([trainer,species])

			if 'item' in log[team][i].keys():
				item = keyify(log[team][i]['item'])
				if item == '':
					item = 'nothing'
			else:
				item = 'nothing'
			if 'nature' in log[team][i].keys():
				nature = keyify(log[team][i]['nature'])
				if nature not in nmod.keys(): #zarel said this is what PS does
					nature = 'hardy'
			else:
				nature = 'hardy'
			evs = {'hp': 0, 'atk': 0, 'def': 0, 'spa': 0, 'spd': 0, 'spe': 0}
			if 'evs' in log[team][i].keys():
				for stat in log[team][i]['evs']:
					evs[stat]=int(log[team][i]['evs'][stat])	
			ivs = {'hp': 31, 'atk': 31, 'def': 31, 'spa': 31, 'spd': 31, 'spe': 31}
			if 'ivs' in log[team][i].keys():
				for stat in log[team][i]['ivs']:
					ivs[stat]=int(log[team][i]['ivs'][stat])
			if 'moves' in log[team][i].keys():
				moves = log[team][i]['moves']
			else:
				moves = []
			while len(moves)<4:
				moves.append('')
			for j in range(len(moves)): #make all moves lower-case and space-free
				moves[j] = keyify(moves[j])
			#figure out Hidden Power from IVs
			if 'ability' in log[team][i].keys():
				ability = keyify(log[team][i]['ability'])
			else:
				ability = 'unknown'
			if 'level' in log[team][i].keys():
				level = int(log[team][i]['level'])
			else:
				level = 100
			teams[team].append({
				'species': keyify(species),
				'nature': nature,
				'item': item,
				'evs': {},
				'moves': [],
				'ability': ability,
				'level': level,
				'ivs': {}})
			for stat in evs:
				teams[team][len(teams[team])-1]['evs'][stat] = evs[stat]
				teams[team][len(teams[team])-1]['ivs'][stat] = ivs[stat]
			for move in moves:
				teams[team][len(teams[team])-1]['moves'].append(move)

			#write to moveset file
			outname = "Raw/moveset/"+tier+"/"+keyify(species)+".txt"
			d = os.path.dirname(outname)
			if not os.path.exists(d):
				os.makedirs(d)
			msfile=open(outname,'a')
			msfile.write(str(level)+'\t'+ability+'\t'+item+'\t'+nature+'\t')
			for iv in ivs:
				msfile.write(str(iv)+'\t')
			for ev in evs:
				msfile.write(str(ev)+'\t')
			for move in moves:
				msfile.write(str(move)+'\t')
			msfile.write('\n')
			msfile.close()

		if len(log[team]) < 6:
			for i in range(6-len(log[team])):
				ts.append([trainer,'empty'])
		analysis = analyzeTeam(teams[team])
		teams[team].append({'bias': analysis['bias'], 'stalliness' : analysis['stalliness'], 'tags' : analysis['tags']})


	#nickanmes
	nicks = []
	for i in range(0,6):
		if len(log['p1team'])>i:
			if 'name' in log['p1team'][i].keys():
				nicks.append("p1: "+log['p1team'][i]['name'])
			else:
				nicks.append("p1: "+log['p1team'][i]['species'])
		else:
			nicks.append("p1: empty")
		if len(log['p2team'])>i:
			if 'name' in log['p2team'][i].keys():
				nicks.append("p2: "+log['p2team'][i]['name'])
			else:
				nicks.append("p2: "+log['p2team'][i]['species'])
		else:		
			nicks.append("p1: empty")

	if ts[0][0] == ts[11][0]: #trainer battling him/herself? WTF?
		return


	#metrics get declared here
	turnsOut = [] #turns out on the field (a measure of stall)
	KOs = [] #number of KOs in the battle
	matchups = [] #poke1, poke2, what happened

	for i in range(0,12):
		turnsOut.append(0)
		KOs.append(0)

	if 'log' in log.keys():
		#determine initial pokemon
		active = [-1,-1]
		for line in log['log']:
			if (spacelog and line[0:14] == "| switch | p1:") or (not spacelog and line[0:11] == "|switch|p1:"):
				end = string.rfind(line,'|')-1*spacelog
				species = line[string.rfind(line,'|',12+3*spacelog,end-1)+1+1*spacelog:end]
				while ',' in species:
					species = species[0:string.rfind(species,',')]
				for s in aliases: #combine appearance-only variations and weird PS quirks
					if species in aliases[s]:
						species = s
						break
				active[0]=ts.index([ts[0][0],species])
			if (spacelog and line[0:14] == "| switch | p2:") or (not spacelog and line[0:11] == "|switch|p2:"):
				end = string.rfind(line,'|')-1*spacelog
				species = line[string.rfind(line,'|',12+3*spacelog,end-1)+1+1*spacelog:end]
				while ',' in species:
					species = species[0:string.rfind(species,',')]
				for s in aliases: #combine appearance-only variations and weird PS quirks
					if species in aliases[s]:
						species = s
						break
				active[1]=ts.index([ts[11][0],species])
				break
		start=log['log'].index(line)+1
		
		#metrics get declared here
		turnsOut = [] #turns out on the field (a measure of stall)
		KOs = [] #number of KOs in the battle
		matchups = [] #poke1, poke2, what happened

		for i in range(0,12):
			turnsOut.append(0)
			KOs.append(0)

		#parse the damn log

		#flags
		roar = False
		uturn = False
		ko = [False,False]
		switch = [False,False]
		uturnko = False
		mtemp = []

		for line in log['log'][start:]:
			#print line
			#identify what kind of message is on this line
			linetype = line[1+1*spacelog:string.find(line,'|',1+1*spacelog)-1*spacelog]

			if linetype == "turn":
				matchups = matchups + mtemp
				mtemp = []

				#reset for start of turn
				roar = uturn = uturnko = False
				ko = [False,False]
				switch = [False,False]

				#Mark each poke as having been out for an additional turn
				turnsOut[active[0]]=turnsOut[active[0]]+1
				turnsOut[active[1]]=turnsOut[active[1]]+1

			elif linetype == "win": 
				#close out last matchup
				if ko[0] or ko[1]: #if neither poke was KOed, match ended in forfeit, and we don't care
					pokes = [ts[active[0]][1],ts[active[1]][1]]
					pokes=sorted(pokes, key=lambda pokes:pokes)
					matchup=pokes[0]+' vs. '+pokes[1]+': '
					if ko[0] and ko[1]:
						KOs[active[0]] = KOs[active[0]]+1
						KOs[active[1]] = KOs[active[1]]+1
						matchup = matchup + "double down"
					else:
						KOs[active[ko[0]]] = KOs[active[ko[0]]]+1
						matchup = matchup + ts[active[ko[1]]][1] + " was "
						if uturnko: #would rather not use this flag...
							mtemp=mtemp[:len(mtemp)-1]
							matchup = matchup + "u-turn "
						matchup = matchup + "KOed"
					mtemp.append(matchup)
				matchups=matchups+mtemp
			

			elif linetype == "move": #check for Roar, etc.; U-Turn, etc.
				#identify attacker and skip its name
				found = False
				for nick in nicks:
					if line[6+3*spacelog:].startswith(nick):
						if found: #the trainer was a d-bag
							if len(nick) < len(found):
								continue	
						found = nick
				tempnicks = copy.copy(nicks)
				while not found: #PS f****d up the names. We fix by shaving a character at a time off the nicknames
					foundidx=-1	
					for i in range(len(tempnicks)):
						if len(tempnicks[i])>1:
							tempnicks[i]=tempnicks[i][:len(tempnicks[i])-1]
						if line[6+3*spacelog:].startswith(tempnicks[i]):
							if found:
								if len(tempnicks[i]) < len(found):
									continue
							found = tempnicks[i]
							foundidx = i
					if found:
						nicks[i]=found
					else:
						tryAgain = False
						for i in range(len(tempnicks)):
							if len(tempnicks[i])>1:
								tryAgain = True
								break
						if not tryAgain:
							sys.stderr.write("Nick not found.\n")
							sys.stderr.write("In file: "+sys.argv[1]+"\n")
							sys.stderr.write(line[6+3*spacelog:]+"\n")
							sys.stderr.write(str(nicks)+"\n")
							return
						
					
		
				move = line[7+5*spacelog+len(found):string.find(line,"|",7+5*spacelog+len(found))-1*spacelog]
				if move in ["Roar","Whirlwind","Circle Throw","Dragon Tail"]:
					roar = True
				elif move in ["U-Turn","U-turn","Volt Switch","Baton Pass"]:
					uturn = True

			elif linetype == "-enditem": #check for Red Card, Eject Button
				#search for relevant items
				if string.rfind(line,"Red Card") > -1:
					roar = True
				elif string.rfind(line,"Eject Button") > -1:
					uturn = True

			elif linetype == "faint": #KO
				#who fainted?
				ko[int(line[8+3*spacelog])-1]=1

				if uturn:
					uturn=False
					uturnko=True

			elif linetype in ["switch","drag"]: #switch out: new matchup!
				if linetype == "switch":
					p=9+3*spacelog
				else:
					p=7+3*spacelog	
				switch[int(line[p])-1]=True

				if switch[0] and switch[1]: #need to revise previous matchup
					matchup=mtemp[len(mtemp)-1][:string.find(mtemp[len(mtemp)-1],':')+2]
					if (not ko[0]) and (not ko[1]): #double switch
						matchup = matchup + "double switch"
					elif ko[0] and ko[1]: #double down
						KOs[active[ko[0]]] = KOs[active[ko[0]]]+1
						matchup = matchup + "double down"
					else: #u-turn KO (note that this includes hit-by-red-card-and-dies and roar-then-die-by-residual-dmg)
						KOs[active[ko[0]]] = KOs[active[ko[0]]]+1
						matchup = matchup + ts[active[ko[1]]][1]+" was u-turn KOed"
					mtemp[len(mtemp)-1]=matchup
				else:
					#close out old matchup
					pokes = [ts[active[0]][1],ts[active[1]][1]]
					pokes=sorted(pokes, key=lambda pokes:pokes)
					matchup=pokes[0]+' vs. '+pokes[1]+': '
					#if ko[0] and ko[1]: #double down
					if ko[0] or ko[1]:
						KOs[active[ko[0]]] = KOs[active[ko[0]]]+1
						matchup = matchup + ts[active[ko[1]]][1]+" was KOed"
					else:
						matchup = matchup + ts[active[switch[1]]][1]
						if roar:
							matchup = matchup + " was forced out"
						else:
							matchup = matchup + " was switched out"
					mtemp.append(matchup)
		
				#new matchup!
				uturn = roar = False
				#it matters whether the poke is nicknamed or not
				end = string.rfind(line,'|')-1*spacelog
				species = line[string.rfind(line,'|',0,end-1)+1+1*spacelog:end]
				while ',' in species:
					species = species[0:string.rfind(species,',')]
				for s in aliases: #combine appearance-only variations and weird PS quirks
					if species in aliases[s]:
						species = s
						break
				active[int(line[p])-1]=ts.index([ts[11*(int(line[p])-1)][0],species])

	#totalTurns = log['turns']
	#totalKOs = sum(KOs)

	teamtags = teams['p1team'][len(teams['p1team'])-1]

	outfile.write(ts[0][0].encode('ascii','replace'))
	outfile.write(' (bias:'+str(teamtags['bias'])+', stalliness:'+str(teamtags['stalliness'])+', tags:'+','.join(teamtags['tags'])+')')
	outfile.write("\n")
	i=0
	while (ts[i][0] == ts[0][0]):
		outfile.write(ts[i][1]+" ("+str(KOs[i])+","+str(turnsOut[i])+")\n")
		i = i + 1
		if i>=len(ts):
			sys.stderr.write("Something's wrong here.\n")
			sys.stderr.write("In file: "+sys.argv[1]+"\n")
			sys.stderr.write(str(ts)+"\n")
			return	
	outfile.write("***\n")
	teamtags = teams['p2team'][len(teams['p2team'])-1]
	outfile.write(ts[len(ts)-1][0].encode('ascii','replace'))
	outfile.write(' (bias:'+str(teamtags['bias'])+', stalliness:'+str(teamtags['stalliness'])+', tags:'+','.join(teamtags['tags'])+')')
	outfile.write("\n")
	for j in range(i,len(ts)):
		outfile.write(ts[j][1]+" ("+str(KOs[j])+","+str(turnsOut[j])+")\n")
	outfile.write("@@@\n")
	for line in matchups:
		outfile.write(line+"\n")
	outfile.write("---\n")
Esempio n. 58
0
def removeExtension(filename):
        p = string.rfind(filename,'.')
        if p != -1:
                return filename[:p]
        else:
                return filename
Esempio n. 59
0
    def initialize(self):
        global foreigndirs

        os.chdir(self.makefileDir)
        self.printname = string.replace(self.makefile, topdir, "")
        self.makefile = os.path.basename(self.makefile)

        if not posixpath.exists("Makefile.am"):
            raise self.NoMakefileAmFound, self.makefileDir

        for dir in foreigndirs:
            if dir.match(self.makefileDir):
                print 'leaving ' + self.makefileDir
                return 0

        f = open(self.makefile)
        self.lines = []

        while 1:
            line = f.readline()
            if not line: break
            self.lines.append(string.rstrip(line))

        f.close()

        # take out the
        self.restore()

        optionline = re.compile('^\s*(\w+)\s*=\s*([^\n]*)$')
        linecontinued = re.compile('\\\s*\n')
        lastline = ''

        index = 0
        while index < len(self.lines):
            line = self.lines[index]
            if linecontinued.search(line):
                self.lines[index] = linecontinued.sub(
                    ' ', line) + self.lines[index + 1]
                continue
            else:
                index = index + 1

            match = optionline.search(line)
            if match:
                self.options[match.group(1)] = match.group(2)

        if self.options.has_key('KDE_OPTIONS'):
            options = string.split(self.options['KDE_OPTIONS'])
            if 'foreign' in options:
                foreigndirs.append(re.compile(self.makefileDir + "/.*"))
                return 0

        self.cxxsuffix = ""
        suffixes = re.compile('^\.SUFFIXES:(.*)$')

        for line in self.lines:
            match = suffixes.match(line)
            if match:
                existing_suffixes = string.split(match.group(1))
                for suffix in existing_suffixes:
                    # leave out the .
                    if suffix[1:] in cppsuffixes:
                        self.cxxsuffix = suffix[1:]
                        break
                if self.cxxsuffix:
                    break

        search_real_programs = {}

        for option in self.options.keys():
            if string.rfind(option, '_OBJECTS') > 0:

                program = option[0:string.find(option, '_OBJECTS')]
                objs = self.options[option]

                variable_in_objects = 0

                objlist = string.split(objs)
                variable = re.compile('\$\((\w+)\)')
                for obj in objlist:
                    match = variable.match(obj)
                    if match and not match.group(1) == 'OBJEXT':
                        variable_in_objects = 1
                        break

                if variable_in_objects:
                    continue

                if len(program) > 3 and program[3] == 'am_':
                    program = program[3:]

                if verbose:
                    print "found program " + program

                self.programs.append(program)
                self.realobjs[program] = objs

                if self.options.has_key(program + "_SOURCES"):
                    self.sources[program] = self.options[program + "_SOURCES"]
                else:
                    self.sources[program] = ""
                    sys.stderr.write("found program with no _SOURCES: " +
                                     program + '\n')

                # unmask to regexp
                realprogram = string.replace(program, '_', '.')
                search_real_programs[program] = re.compile(
                    '.*(' + realprogram + ')(\$\(EXEEXT\)?)?:.*\$\(' +
                    program + '_OBJECTS\).*')

                self.realname[program] = ""

        for line in self.lines:
            if string.find(
                    line, '_OBJECTS'
            ) > 0:  # just a random piece to not use at _every_ line
                for program in self.programs:
                    match = search_real_programs[program].match(line)
                    if match:
                        self.realname[program] = match.group(1)
Esempio n. 60
0
def webimport(url, kind='web', wantpackagename=None):
    print kind, 'import ', url
    if kind == 'web':
        if url[:6] == 'file:/':
            kind = 'fs'
            url = url[5:]
        else:
            if url[:7] != 'http://':
                sys.stderr.write('HTTP URL not recognised\n')
                sys.exit(1)
            endofhostname = 7 + string.find(url[7:], '/')
            hostname = url[7:7 + string.find(url[7:], '/')]
            path = url[endofhostname:string.rfind(url, '/')]
            indexname = url[string.rfind(url, '/') + 1:]
            print hostname, path, indexname
            content = webget(hostname, path + '/' + indexname)
    if kind == 'fs':
        o = open(url, 'r')
        content = o.read()
        o.close()
        (path, _) = os.path.split(url)
    spl = string.split(content, '\n')
    if len(spl) > 4 and len(spl[-3]) > 10 and spl[-3][:8] == 'summary ':
        info = eval(spl[-3][8:])
    else:
        sys.stderr.write('Patch description page not recognised\n')
        sys.stderr.write(content)
        sys.exit(1)
    for line in spl:
        if line[:5] == '<H2> ':
            splitpoint = string.find(line[5:], ':')
            packagename = line[5:splitpoint + 5]
            title = line[splitpoint + 6:] + '\n'
            #print packagename, title
            desc_map[packagename] = title
        if line[:5] == '<H1> ':
            print 'Project ', line[:5]
            desc_map['project'] = line[5:] + '\n'

    if not os.path.exists(patch_tree):
        init()

    print 'Description map now ' + ` desc_map `
    packagelist = read_archive()

    packmap = {}
    for (package, branchlist, firstbranch) in packagelist:
        packmap[package] = {}
        for (branch, datae, patchlevel) in branchlist:
            packmap[package][branch] = patchlevel
    print packmap
    for (package, branchlist, firstbranch) in info:
        if wantpackagename and package != wantpackagename:
            print 'Skipping package ', package, '; we are looking for', wantpackagename
            continue
        fromlevel = None
        if not packmap.has_key(package):
            print 'No package ', package, '; creating it'
            create_package(package, firstbranch)
            packmap[package] = {}
            packmap[package][firstbranch] = 0
        # XXX; for the moment only handle one branch called live
        actions = 1
        while actions:
            actions = 0
            for (branch, setup, pl) in branchlist:
                if setup != None:
                    print 'Dont know how to handle setup', setup
                    continue
                if not packmap[package].has_key(branch):
                    print 'Dont know how to create new branch', branch
                    sys.exit(1)
                else:
                    lpl = packmap[package][branch]
                    print package, branch, 'Local patchlevel ', lpl, 'Remote patchlevel ', pl

                    for i in range(lpl + 1, pl + 1):
                        if kind == 'web':
                            print 'Downloading ', package, branch, i
                            patch = webget(
                                hostname, path + '/pubpatch/' + package + '/' +
                                branch + '/' + ` i `)
                        else:
                            o = open(path + '/pubpatch/' + package + '/' +
                                     branch + '/' + ` i `)
                            patch = o.read()
                            o.close()
                        tempfilename = tempfile.mktemp()
                        o = open(tempfilename, 'w')
                        o.write(patch)
                        o.close()
                        print 'Commiting ', i
                        commit(tempfilename, package, [(branch, None)])
                        print 'Done ', i
                        os.unlink(tempfilename)