コード例 #1
0
def readFiles (path, pNum):

	#Set path based on particle number
	pPath = path + "/particles/particleNum" + str(pNum) + ".mel"
	rManPath = path + "/rmanCurves/rManCurve"
	# Open file for reading based on path, read content
	filePart = open(pPath, 'r')
	fileLength = len(filePart.readlines())
	
	# Initialize counter for getLine command
	line = 0
	
	# Initialize array for storing point positions
	readPart = []
	
	# Cycle through individual particle files
	# Read each line to get position information
	for n in range(fileLength):
		
		line = n + 1
		readPart.append(linecache.getline(pPath, line))
		
		# Remove end of line character from entry
		if readPart[n][-1] == '\n':
			 readPart[n] = readPart[n][:-1]
		
		# Clear cache for future values
		linecache.clearcache()
	
	# Call on drawRmanCurves to generate curves, pass array
	drawRmanCurves(readPart, pNum, rManPath)
コード例 #2
0
ファイル: test.py プロジェクト: huashan/scripts
def get_nth_line(f,n):
	# with open(f) as ff:
	line = linecache.getline(f,n)
	[key, gt, conf] = line.split("\t")
	if n % 10 == 0 and n > 10:
		linecache.clearcache()
	return key,gt,conf.strip()
コード例 #3
0
ファイル: merge.py プロジェクト: zlxstc/Codes
def Merger(dir,oldname,newname):
    linecache.clearcache()
#    num_lines = file_len(dir+oldname)
#    print num_lines    
    lineNo = 1
    text=[]
    while(lineNo < 265000):
#        print lineNo   
        line1 = linecache.getline(dir+oldname, lineNo)        
        line2 = linecache.getline(dir+oldname, lineNo+1)
        
#        if len(line1)<2 and len(line2)<2 and len(line3)<2 and len(line4)<2:
#            break
    #        print line2        
        if len(line1.split(','))>3:            
            if len(line2.split(','))<3:            
                line1=line1.strip()+line2
                text.append(line1)
                lineNo=lineNo+2            
            else:
                text.append(line1)
    #            text.append(line2)
                lineNo=lineNo+1
        else:
#            print "1"+text[-1]
            text[-1]=(text[-1].strip())
#            print "2"+text[-1]          
            text[-1]=text[-1]+line1
#            print "3"+text[-1]
            lineNo=lineNo+1
    for item in text:            
        new_file = open(newname,'a+')
        new_file.write(item)
コード例 #4
0
ファイル: template.py プロジェクト: vuamitom/tornado
 def generate_async(self, **kwargs):
     namespace = self._get_namespace(**kwargs)
     exec_in(self.compiled, namespace)
     execute = gen.coroutine(namespace["_tt_execute"])
     linecache.clearcache()
     result = yield execute()
     return result
コード例 #5
0
ファイル: split_texts.py プロジェクト: alexanderpanchenko/stc
def split_csv_texts(input_fpath, output1_fpath, output2_fpath, split_percent):
    # Check the parameters 
    if (not os.path.exists(input_fpath)) or (split_percent < 0) or (split_percent > 1):
        print "Error: wrong input arguments."
        return
	
    # Open the files
    input_file = open(input_fpath, "r")
    output1_file = open(output1_fpath,"w")
    output2_file = open(output2_fpath,"w")
    
    # Get number of lines 
    input_number = len(input_file.readlines())
    output1_number = int(input_number * split_percent)
    print input_fpath, ":", input_number, "texts"
    print output1_fpath, ":", output1_number, "texts"
    print output2_fpath, ":", input_number - output1_number, "texts"    

    # Get a random sample of line numbers
    input_lines = range(1, input_number + 1)
    output1_lines = random.sample(input_lines, output1_number)
    
    # Save the lines in two separate files
    for line in input_lines:
        if(line in output1_lines):
            output1_file.write(linecache.getline(input_fpath, line))
        else:
            output2_file.write(linecache.getline(input_fpath, line))
    
    linecache.clearcache()    
    input_file.close()
    output1_file.close()
    output2_file.close()
コード例 #6
0
ファイル: winsvFuncs.py プロジェクト: Weber-JC/PythonEggs
def ReadTailLines(sFileName, iNum=5):
    # 读取文件尾部几行
    import linecache
    linecache.checkcache(sFileName)
    lsText = linecache.getlines(sFileName)[-iNum:]
    linecache.clearcache()
    return lsText
コード例 #7
0
def Get_nRxn(nRxn_file,Z,N,nFission_filename): # this goes through each neutron reaction file, ID's reaction types, and daughter products. nRxnTypes, fission products and yields returned as dictionaries and nRxns not tracked are returned as a list.
    nRxnType = {}
    FissP_ID = {}
    FissP_Yield = {}
    Rxns_not_Tracked = []
    with open(nRxn_file,'r') as file1:
        tmp = linecache.getline(nRxn_file,5,None) #pulls line 2 from current_file into cache
        a = tmp.split()
        skip = a[4] # grabs the number of records of descriptive text (NWD in ENDF7.1 manual)
        for i in range(int(skip)+5): #skip over the descriptive records...
            file1.__next__()
        for line in file1: # ...and go straight to the (MF, MT, NC, MOD) descriptive lines
            a = line.split()
            if (a[0]=='3'): # ID's which neutron reactions are tabulated.
                if (a[1]=='18'): #or (a[1]=='19') or (a[1]=='20') or (a[1]=='21') or (a[1]=='38')
                    fissType, FissP_ID, FissP_Yield = trls.MT_fission(int(a[1]),nFission_filename)
                else:
                    Ztmp,Ntmp,RxnType,noTrack = trls.MT(int(a[1]),Z,N) # using base Z & N values of isotope, get new Z & N for isotope post neutron reaction
                    if noTrack == ' ':
                        pass
                    else:
                        Rxns_not_Tracked.append(noTrack) # append which reactions are tabulated but not tracked
                    if RxnType == ' ':
                        pass
                    else:
                        nRxnType[RxnType] = trls.NuclideIdentifier(Ztmp,Ntmp)

            elif ('1  099999' in line): # if you hit the end of the (MF, MT, NC, MOD) records, quit out of function and move onto next nuclide file
                linecache.clearcache()
                return(nRxnType,FissP_ID, FissP_Yield,Rxns_not_Tracked)
コード例 #8
0
 def __getitem__(self, key):
     if key in self.deprecation_messages:
         import warnings
         import linecache
         # DeprecationWarnings are ignored by default. Clear the filter so
         # they are not:
         previous_warning_filters = warnings.filters[:]
         try:
             warnings.resetwarnings()
             # Hacky stuff to get it to work from within execfile() with
             # correct line data:
             linecache.clearcache()
             caller = sys._getframe(1)
             globals = caller.f_globals
             lineno = caller.f_lineno
             module = globals['__name__']
             filename = globals.get('__file__')
             fnl = filename.lower()
             if fnl.endswith((".pyc", ".pyo")):
                 filename = filename[:-1]
             message = self.deprecation_messages[key]
             warnings.warn_explicit(message, DeprecationWarning, filename, lineno, module)
         finally:
             # Restore the warnings filter:
             warnings.filters[:] = previous_warning_filters
     return dict.__getitem__(self, key)
コード例 #9
0
ファイル: BatchCells.py プロジェクト: smallwave/Noah-Tibet
 def RunNoahModel(self,strtxtForcePathIn):
    #1 run model
    fileset = formic.FileSet(include="**/*.txt", directory=strtxtForcePathIn)
    nFile   = 0 # print process file ID
    os.chdir('F:\\worktemp\\Permafrost(Change)\\Run(S)\\')
    outputPath = None
    for file_name in fileset:
        nFile+=1
        print "Current file is : " + file_name + "; It is the " + str(nFile)
        print "################################################################"
        # get array of lines
        if outputPath is None:
            with open(file_name, 'r') as txtFile:
                updateLine  =  5   # from 1 ~  not 0
                lineTxt     =  linecache.getline(file_name, updateLine)
                lineTxtS    =  lineTxt.split("=")  
                outputPath  =  lineTxtS[1].strip()[1:-1]
                linecache.clearcache()   #very important 
        InputFileDir,InputFile   = os.path.split(file_name)
        filename, file_extension = os.path.splitext(InputFile)
        items  = [outputPath,filename,".nc"]
        outputFileName = ''.join(items)
        if os.path.isfile(outputFileName) and os.access(outputFileName, os.R_OK):
            continue
        #... updateOutputDirLine
        command   =  'simple_driver.exe '+ file_name
        subprocess.call(command, shell=True)  #call 7z command
コード例 #10
0
def check_words(files,text,counter,result_list):
	#three different scopes, 
	#1.pick a tv show and a season and search through them
	for file in files:
		fp = open (file, 'r')
		for i, line in enumerate(fp):
			if re.search(r'\b%s\b'%(text),line.lower()):
				#'\bfoo\b' matches 'foo', 'foo.', '(foo)', 'bar foo baz' but not 'foobar' or 'foo3'.
				counter = counter + 1
				filename = os.path.basename(fp.name)
				lines_presented = -5			
				result_list.append(
				"<Number %d match>\n<In line %d in the file>\n<In file '%s'>" % (counter,i,filename),
				)
				while lines_presented <= 5:
					if re.search(r'^\w',linecache.getline(file, i+lines_presented)):
						if re.search(r'^\D',linecache.getline(file, i+lines_presented)):
							result_list.append(linecache.getline(file, i+lines_presented))
					if re.search(r'-->',linecache.getline(file, i+lines_presented)):
						result_list.append(linecache.getline(file, i+lines_presented))
					lines_presented = lines_presented + 1
				result_list.append(
				"------------------------------------------------------\n"
				)
				linecache.clearcache()
		fp.close()	
	for item in result_list:
		print item
	print "Total %d matches for \"%s\"" % (counter,text)
	print_or_not(counter,text,result_list)
コード例 #11
0
ファイル: project_module.py プロジェクト: 5aket/pyspider
    def load_module(self, fullname):
        if self.mod is None:
            mod = self.mod = imp.new_module(self.name)
        else:
            mod = self.mod

        log_buffer = []
        mod.logging = mod.logger = logging.Logger(self.name)
        mod.logger.addHandler(SaveLogHandler(log_buffer))
        mod.log_buffer = log_buffer
        mod.__file__ = '<%s>' % self.name
        mod.__loader__ = self
        mod.__project__ = self.project
        mod.__package__ = ''

        code = self.get_code(fullname)
        exec code in mod.__dict__
        linecache.clearcache()

        if '__handler_cls__' not in mod.__dict__:
            for each in mod.__dict__.values():
                if inspect.isclass(each) and each is not base_handler.BaseHandler \
                        and issubclass(each, base_handler.BaseHandler):
                    mod.__dict__['__handler_cls__'] = each

        return mod
コード例 #12
0
 def setUp(self):
     linecache.clearcache()
     zipimport._zip_directory_cache.clear()
     self.path = sys.path[:]
     self.meta_path = sys.meta_path[:]
     self.path_hooks = sys.path_hooks[:]
     sys.path_importer_cache.clear()
コード例 #13
0
ファイル: InvertedIndex.py プロジェクト: wakewalker/eecs767
    def update(self):
        '''Write the in-memory tnodes to file.'''
        with open(self.tdict_path, 'r+') as tdict_file:
            tdata = tdict_file.readlines()
            line_num = len(tdata) + 1

        # Get updated / new terms
        terms = {}
        for term in self:
            if (term in self) and (self[term]['tnode'] is not None):
                terms[term] = self[term]

        # Modify existing term data in tdata
        for term in terms.keys():
            if self[term]['loc'] is not None:
                tdata[self[term]['loc']-1] = (
                        '%s\n' % self[term]['tnode'].serialize()
                )
                del terms[term]

        # Write changes to file
        with open(self.tdict_path, 'w+') as tdict_file:
            # Write modified
            tdict_file.writelines(tdata)
            # Add new
            for term in terms:
                tdict_file.write('%s\n' % self[term]['tnode'].serialize())
                self[term]['loc'] = line_num
                line_num += 1
        linecache.clearcache()
コード例 #14
0
ファイル: gmsio.py プロジェクト: pengfeili1/pymsmt
def get_crds_from_gms(logfile):

    unit = 'angs' #Coordinates will use angs unit in default

    ln = 1
    fp = open(logfile, 'r')
    for line in fp:
        if ' ATOM      ATOMIC                      COORDINATES (BOHR)' in line:
            bln = ln + 2
            unit = 'bohr'    #Means using Bohr unit
        elif ' ATOM      ATOMIC                      COORDINATES (ANGS' in line:
            bln = ln + 2
            unit = 'angs'
        elif '          INTERNUCLEAR DISTANCES' in line:
            eln = ln - 2
        ln = ln + 1
    fp.close()

    crdl = []
    for i in range(bln, eln+1):
        line = linecache.getline(logfile, i)
        line = line.strip('\n')
        line = line.split()
        if unit == 'bohr':
            crdl.append(float(line[2]))
            crdl.append(float(line[3]))
            crdl.append(float(line[4]))
        elif unit == 'angs':
            crdl.append(float(line[2])/B_TO_A)
            crdl.append(float(line[3])/B_TO_A)
            crdl.append(float(line[4])/B_TO_A)
    linecache.clearcache()

    return crdl
コード例 #15
0
ファイル: regrtest.py プロジェクト: alkorzt/pypy
def dash_R_cleanup(fs, ps, pic):
    import gc, copy_reg
    import _strptime, linecache, dircache
    import urlparse, urllib, urllib2, mimetypes, doctest
    import struct, filecmp
    from distutils.dir_util import _path_created

    # Restore some original values.
    warnings.filters[:] = fs
    copy_reg.dispatch_table.clear()
    copy_reg.dispatch_table.update(ps)
    sys.path_importer_cache.clear()
    sys.path_importer_cache.update(pic)

    # Clear assorted module caches.
    _path_created.clear()
    re.purge()
    _strptime._regex_cache.clear()
    urlparse.clear_cache()
    urllib.urlcleanup()
    urllib2.install_opener(None)
    dircache.reset()
    linecache.clearcache()
    mimetypes._default_mime_types()
    struct._cache.clear()
    filecmp._cache.clear()
    doctest.master = None

    # Collect cyclic trash.
    gc.collect()
コード例 #16
0
ファイル: template.py プロジェクト: hmax/tornado
 def generate(self, **kwargs):
     """Generate this template with the given arguments."""
     namespace = {
         "escape": escape.xhtml_escape,
         "xhtml_escape": escape.xhtml_escape,
         "url_escape": escape.url_escape,
         "json_encode": escape.json_encode,
         "squeeze": escape.squeeze,
         "linkify": escape.linkify,
         "datetime": datetime,
         "_utf8": escape.utf8,  # for internal use
         "_string_types": (unicode, bytes_type),
         # __name__ and __loader__ allow the traceback mechanism to find
         # the generated source code.
         "__name__": self.name.replace('.', '_'),
         "__loader__": ObjectDict(get_source=lambda name: self.code),
     }
     namespace.update(self.namespace)
     namespace.update(kwargs)
     exec self.compiled in namespace
     execute = namespace["_execute"]
     # Clear the traceback module's cache of source data now that
     # we've generated a new template (mainly for this module's
     # unittests, where different tests reuse the same name).
     linecache.clearcache()
     try:
         return execute()
     except Exception:
         formatted_code = _format_code(self.code).rstrip()
         logging.error("%s code:\n%s", self.name, formatted_code)
         raise
コード例 #17
0
ファイル: CardinalBot.py プロジェクト: Shawn-Smith/Cardinal
    def _load_plugins(self, plugins, first_run=False):
        # A dictionary of loaded plugins
        loaded_plugins = {}

        # A list of plugins that failed to load
        failed_plugins = []

        # Turn this into a list if it isn't one
        if isinstance(plugins, basestring):
            plugins = [plugins]

        linecache.clearcache()

        for plugin in plugins:
            loaded_plugins[plugin] = {}

            # Import each plugin with a custom _import_module function.
            try:
                module = self._import_module(self.loaded_plugins[plugin]['module'] if plugin in self.loaded_plugins else plugin)
            except Exception, e:
                print >> sys.stderr, "ERROR: Could not load plugin module: %s (%s)" % (plugin, e)
                failed_plugins.append(plugin)

                continue

            # Import each config with the same _import_module function.
            try:
                self.config[plugin] = self._import_module(self.config[plugin] if plugin in self.config else plugin, config=True)
            except ImportError:
                self.config[plugin] = None
            except Exception, e:
                self.config[plugin] = None
                print >> sys.stderr, "WARNING: Could not load plugin config: %s (%s)" % (plugin, e)
コード例 #18
0
ファイル: test_linecache.py プロジェクト: isaiah/jython3
 def test_lazycache_provide_after_failed_lookup(self):
     linecache.clearcache()
     lines = linecache.getlines(NONEXISTENT_FILENAME, globals())
     linecache.clearcache()
     linecache.getlines(NONEXISTENT_FILENAME)
     linecache.lazycache(NONEXISTENT_FILENAME, globals())
     self.assertEqual(lines, linecache.updatecache(NONEXISTENT_FILENAME))
コード例 #19
0
ファイル: tail.py プロジェクト: lijianwei123/python_diy
def litterRowsTail():
    # 文件行数
    fileRowNums = util.countFileRows(filepath)
    
    # 开始行数
    start = max(fileRowNums - 10, 0) + 1
    
    while True:
        try:
            import linecache, time
            line_str = linecache.getline(filepath, start)
            if line_str:
                print line_str,
                start = start + 1
                time.sleep(0.01)
            else:
                linecache.clearcache()
            
        except ImportError:
            print ''
            print 'error!'
            sys.exit(1)
        except KeyboardInterrupt:
            print ''
            print 'quit!'
            sys.exit(1)
コード例 #20
0
ファイル: test_linecache.py プロジェクト: isaiah/jython3
 def test_lazycache_already_cached(self):
     linecache.clearcache()
     lines = linecache.getlines(NONEXISTENT_FILENAME, globals())
     self.assertEqual(
         False,
         linecache.lazycache(NONEXISTENT_FILENAME, globals()))
     self.assertEqual(4, len(linecache.cache[NONEXISTENT_FILENAME]))
コード例 #21
0
ファイル: scriptutils.py プロジェクト: LPRD/build_tools
def GetActiveFileName(bAutoSave = 1):
	"""Gets the file name for the active frame, saving it if necessary.
	
	Returns None if it cant be found, or raises KeyboardInterrupt.
	"""
	pathName = None
	active = GetActiveView()
	if active is None:
		return None
	try:
		doc = active.GetDocument()
		pathName = doc.GetPathName()

		if bAutoSave and \
			(len(pathName)>0 or \
			doc.GetTitle()[:8]=="Untitled" or \
			doc.GetTitle()[:6]=="Script"): # if not a special purpose window
			if doc.IsModified():
				try:
					doc.OnSaveDocument(pathName)
					pathName = doc.GetPathName()
					
					# clear the linecache buffer
					linecache.clearcache()

				except win32ui.error:
					raise KeyboardInterrupt

	except (win32ui.error, AttributeError):
		pass
	if not pathName:
		return None
	return pathName
コード例 #22
0
ファイル: HYSPLIT.py プロジェクト: duncanwp/cis_plugins
def get_file_metadata(fname):
    import linecache

    metadata = {}

    grid_metadata = linecache.getline(fname, 1).split()
    n_grids = int(grid_metadata[0])

    # Trajectory metadata present after grid metadata
    trajectory_metadata = linecache.getline(fname, n_grids+2).split()
    metadata['n_trajectories'] = int(trajectory_metadata[0])

    # Get starting lat/lon/alt of each trajectory
    metadata['trajectories'] = {}
    for t in range(metadata['n_trajectories']):
        tstart = linecache.getline(fname, n_grids+3+t).split()
        # Save trajectories according to numbering in file
        metadata['trajectories'][t+1] = (tstart[-3], tstart[-2], tstart[-1])

    metadata['data_start'] = n_grids + metadata['n_trajectories'] + 3

    # Get custom variable names
    variable_names = linecache.getline(fname, metadata['data_start']).split()[2:]
    metadata['labels'] = hysplit_default_var + variable_names
    metadata['custom_labels'] = variable_names

    linecache.clearcache()
    return metadata
コード例 #23
0
ファイル: on_time.py プロジェクト: gogostart/ansible_release
 def check_line_exist(self, line_no):
     if linecache.getline(self.filename, line_no) == "":
         linecache.clearcache()
         return False
     else:
         linecache.clearcache()
         return True
コード例 #24
0
    def load_module(self, fullname):
        if self.mod is None:
            mod = self.mod = imp.new_module(self.name)
        else:
            mod = self.mod

        log_buffer = []
        mod.logging = mod.logger = logging.Logger(self.name)
        handler = SaveLogHandler(log_buffer)
        handler.setFormatter(LogFormatter(color=False))
        mod.logger.addHandler(handler)
        mod.log_buffer = log_buffer
        mod.__file__ = '<%s>' % self.name
        mod.__loader__ = self
        mod.__project__ = self.project
        mod.__package__ = ''

        code = self.get_code(fullname)
        six.exec_(code, mod.__dict__)
        linecache.clearcache()

        if '__handler_cls__' not in mod.__dict__:
            BaseHandler = mod.__dict__.get('BaseHandler', base_handler.BaseHandler)
            for each in list(six.itervalues(mod.__dict__)):
                if inspect.isclass(each) and each is not BaseHandler \
                        and issubclass(each, BaseHandler):
                    mod.__dict__['__handler_cls__'] = each

        return mod
コード例 #25
0
ファイル: startQT.py プロジェクト: Arunav666/apkinspector
    def forwardon(self):
        import linecache
        import Global
        print "actionforward"
        if Global.NAV_P == Global.NAV_NO:
            print "no method forward!"
        else:
            linecache.clearcache()

            Global.NAV_P += 1
            i = 2*Global.NAV_P
            classname = linecache.getline('1.txt',i)
            pathindex = linecache.getline('1.txt',i-1)
            pathindex = pathindex.strip()
            classname = linecache.getline('1.txt',i)
            classname = classname[:-1]
    
            print "get from 1.txt"
            print pathindex
            print classname
            method = self.path2method[pathindex]
            print "the type of method is %s, the type of classname is %s" %(type(method),type(classname))
            self.displayMethod(method,classname)
            Global.currentclass = classname
            QMessageBox.information(self ,'Current Class', classname)
            Global.currentmethod = method
            QMessageBox.information(self ,'Current method', method)
            print method
            print classname
コード例 #26
0
ファイル: server.py プロジェクト: johngoodleaf/webalchemy
    def __dreload(mdl):
        """Recursively reload modules."""
        nonlocal _s
        nonlocal _base_path
        nonlocal _reloaded_files

        for name in dir(mdl):
            mm = getattr(mdl, name)
            if type(mm) is not ModuleType:
                if (hasattr(mm, '__module__') and
                        mm.__module__ is not None):
                    mm = sys.modules[mm.__module__]

            if (not hasattr(mm, '__file__') or
                    not os.path.realpath(mm.__file__).startswith(_base_path) or
                    mm.__name__[0] == '_' or
                    '._' in mm.__name__ or
                    mm.__name__ in _s or
                    any(mm.__name__.startswith(bln) for bln in dreload_blacklist_starting_with)):
                continue

            _s.add(mm.__name__)
            __dreload(mm)
        _reloaded_files.append(os.path.realpath(mdl.__file__))
        if not just_visit:
            log.info('reloading: ' + str(mdl.__name__))
            linecache.clearcache()
            imp.reload(mdl)
        else:
            log.info('visiting: ' + str(mdl.__name__))
コード例 #27
0
ファイル: module.py プロジェクト: fdr/pg-python
def _pl_eox():
	try:
		import linecache
		linecache.clearcache()
	except (ImportError, AttributeError):
		# ignore if linecache doesn't exist
		pass
コード例 #28
0
ファイル: template.py プロジェクト: justzx2011/tornado
 def generate(self, **kwargs):
     """Generate this template with the given arguments."""
     namespace = {
         "escape": escape.xhtml_escape,
         "xhtml_escape": escape.xhtml_escape,
         "url_escape": escape.url_escape,
         "json_encode": escape.json_encode,
         "squeeze": escape.squeeze,
         "linkify": escape.linkify,
         "datetime": datetime,
         "_tt_utf8": escape.utf8,  # for internal use
         "_tt_string_types": (unicode_type, bytes_type),
         # __name__ and __loader__ allow the traceback mechanism to find
         # the generated source code.
         "__name__": self.name.replace(".", "_"),
         "__loader__": ObjectDict(get_source=lambda name: self.code),
     }
     namespace.update(self.namespace)
     namespace.update(kwargs)
     exec_in(self.compiled, namespace)
     execute = namespace["_tt_execute"]
     # Clear the traceback module's cache of source data now that
     # we've generated a new template (mainly for this module's
     # unittests, where different tests reuse the same name).
     linecache.clearcache()
     return execute()
コード例 #29
0
ファイル: CSData.py プロジェクト: caglars/VoroModel
 def readWellData(self, dataType):
     #print("readWellRates")
     start = self.readDataFor("WELLS")
     end = self.readDataFor("ENDWELLS")
     #print ("start %s end %s" % (start, end))
     wellData = numpy.zeros(self.particles)
     for lineCounter in range(start+2, end+1):
         #print("here")
         line = linecache.getline(self.myDataFile, lineCounter)
         #print("line %s start %s end %s lineCounter %s" % (line, start, end, lineCounter))
         propertyList = line.split()
         if dataType=="FLOWRATE":
             if propertyList[1] == "FLOWRATE":
                 wellData[int(propertyList[0])] = propertyList[2]
         if dataType=="SKIN":
             if propertyList[1] == "SKIN":
                 wellData[int(propertyList[0])] = propertyList[2]
         if dataType=="RW":
             if propertyList[1] == "RW":
                 wellData[int(propertyList[0])] = propertyList[2]
         if dataType=="PERFTHICK":
             if propertyList[1] == "PERFTHICK":
                 wellData[int(propertyList[0])] = propertyList[2]
         linecache.clearcache()
     return wellData
コード例 #30
0
ファイル: startQT.py プロジェクト: Arunav666/apkinspector
    def backon(self):
        import linecache
        import Global
        print "actionbackactionback" 
        if Global.NAV_P == 0 or Global.NAV_P == 1:
            print "no history!"
            QMessageBox.warning(self ,'warning', 'no history!')
        else:
            linecache.clearcache()
            Global.NAV_P -= 1
            i = 2*Global.NAV_P
            print "NAV_P="
            print Global.NAV_P
            print "NAV_NO="
            print Global.NAV_NO
#            method = self.path2method[1]
            pathindex = linecache.getline('1.txt',i-1)
            pathindex = pathindex.strip()
            classname = linecache.getline('1.txt',i)
            classname = classname[:-1]
            Global.current
            print "get from 1.txt"
            print pathindex
            print classname
            method = self.path2method[pathindex]
            Global.currentmethod = method
            QMessageBox.information(self ,'Current Method', method)
            print "the type of method is %s, the type of classname is %s" %(type(method),type(classname))
            self.displayMethod(method,classname)
            Global.currentclass = classname
            QMessageBox.information(self ,'Current Class', classname)
            Global.currentmethod = method
            QMessageBox.information(self ,'Current method', method)
            print method
            print classname
コード例 #31
0
 def test_lazy_lines(self):
     linecache.clearcache()
     f = traceback.FrameSummary("f", 1, "dummy", lookup_line=False)
     self.assertEqual(None, f._line)
     linecache.lazycache("f", globals())
     self.assertEqual('"""Test cases for traceback module"""', f.line)
コード例 #32
0
 def __del__(self):
     self.epicsfile.close()
     linecache.clearcache()
コード例 #33
0
def show_func(filename,
              start_lineno,
              func_name,
              timings,
              unit,
              output_unit=None,
              stream=None,
              stripzeros=False):
    """ Show results for a single function.
    """
    if stream is None:
        stream = sys.stdout

    template = '%6s %9s %12s %8s %8s  %-s'
    d = {}
    total_time = 0.0
    linenos = []
    for lineno, nhits, time in timings:
        total_time += time
        linenos.append(lineno)

    if stripzeros and total_time == 0:
        return

    if output_unit is None:
        output_unit = unit
    scalar = unit / output_unit

    stream.write("Total time: %g s\n" % (total_time * unit))
    if os.path.exists(filename) or filename.startswith("<ipython-input-"):
        stream.write("File: %s\n" % filename)
        stream.write("Function: %s at line %s\n" % (func_name, start_lineno))
        if os.path.exists(filename):
            # Clear the cache to ensure that we get up-to-date results.
            linecache.clearcache()
        all_lines = linecache.getlines(filename)
        sublines = inspect.getblock(all_lines[start_lineno - 1:])
    else:
        stream.write("\n")
        stream.write("Could not find file %s\n" % filename)
        stream.write(
            "Are you sure you are running this program from the same directory\n"
        )
        stream.write("that you ran the profiler from?\n")
        stream.write("Continuing without the function's contents.\n")
        # Fake empty lines so we can see the timings, if not the code.
        nlines = max(linenos) - min(min(linenos), start_lineno) + 1
        sublines = [''] * nlines
    for lineno, nhits, time in timings:
        d[lineno] = (nhits, '%5.1f' % (time * scalar),
                     '%5.1f' % (float(time) * scalar / nhits),
                     '%5.1f' % (100 * time / total_time))
    linenos = range(start_lineno, start_lineno + len(sublines))
    empty = ('', '', '', '')
    header = template % ('Line #', 'Hits', 'Time', 'Per Hit', '% Time',
                         'Line Contents')
    stream.write("\n")
    stream.write(header)
    stream.write("\n")
    stream.write('=' * len(header))
    stream.write("\n")
    for lineno, line in zip(linenos, sublines):
        nhits, time, per_hit, percent = d.get(lineno, empty)
        txt = template % (lineno, nhits, time, per_hit, percent,
                          line.rstrip('\n').rstrip('\r'))
        stream.write(txt)
        stream.write("\n")
    stream.write("\n")
コード例 #34
0
 def setUp(self):
     # We're reusing the zip archive path, so we must clear the
     # cached directory info and linecache
     linecache.clearcache()
     zipimport._zip_directory_cache.clear()
     ImportHooksBaseTestCase.setUp(self)
コード例 #35
0
    def show_func(self,
                  filename,
                  start_lineno,
                  func_name,
                  timings,
                  stream=None,
                  stripzeros=False):
        """ Show results for a single function.
        """
        if stream is None:
            stream = sys.stdout

        template = '%6s %8s %8s  %-s'
        d = {}
        total_hits = 0.0

        linenos = []
        for lineno, nhits in six.iteritems(timings):
            total_hits += nhits
            linenos.append(lineno)

        if stripzeros and total_hits == 0:
            return

        stream.write("Total hits: %g s\n" % total_hits)
        if os.path.exists(filename) or filename.startswith("<ipython-input-"):
            stream.write("File: %s\n" % filename)
            stream.write("Function: %s at line %s\n" %
                         (func_name, start_lineno))
            if os.path.exists(filename):
                # Clear the cache to ensure that we get up-to-date results.
                linecache.clearcache()
            all_lines = linecache.getlines(filename)
            try:
                sublines = inspect.getblock(all_lines[start_lineno - 1:])
            except tokenize.TokenError:
                # inspect.getblock fails on multi line dictionary comprehensions
                sublines = all_lines[start_lineno - 1:max(linenos)]
        else:
            stream.write("\n")
            stream.write("Could not find file %s\n" % filename)
            stream.write(
                "Are you sure you are running this program from the same directory\n"
            )
            stream.write("that you ran the profiler from?\n")
            stream.write("Continuing without the function's contents.\n")
            # Fake empty lines so we can see the timings, if not the code.
            nlines = max(linenos) - min(min(linenos), start_lineno) + 1
            sublines = [''] * nlines
        for lineno, nhits in six.iteritems(timings):
            d[lineno] = (nhits, '%5.1f' % (100 * nhits / total_hits))
        linenos = range(start_lineno, start_lineno + len(sublines))
        empty = ('', '')
        header = template % ('Line #', 'Hits', '% Hits', 'Line Contents')
        stream.write("\n")
        stream.write(header)
        stream.write("\n")
        stream.write('=' * len(header))
        stream.write("\n")
        for lineno, line in zip(linenos, sublines):
            nhits, percent = d.get(lineno, empty)
            txt = template % (lineno, nhits, percent,
                              line.rstrip('\n').rstrip('\r'))
            stream.write(txt)
            stream.write("\n")
        stream.write("\n")
コード例 #36
0
def readCSV(File, Line):
    lines = linecache.getline(File, Line)
    linecache.clearcache()
    data = lines.strip().split(";")  
    return data
コード例 #37
0
ファイル: compile_shell.py プロジェクト: pu17/manim_project
import sys
import os
import linecache

file_relative_dir=sys.argv[1]
linenumber=int(sys.argv[2])


while linenumber:
    text=linecache.getline(file_relative_dir,linenumber)
    if 'class' in text:
        escape_index=text.find(' ')
        brackets_index=text.find('(')
        class_name=text[escape_index:brackets_index]
        linecache.clearcache()
        break
    else:
        linenumber-=1

commands=[
    '/Library/Frameworks/Python.framework/Versions/3.8/bin/python3',  # my anaconda dir -> to replace it with yours
    'manim.py',
    file_relative_dir,
    class_name
]
os.system(' '.join(commands))
コード例 #38
0
 def test_lazycache_no_globals(self):
     lines = linecache.getlines(FILENAME)
     linecache.clearcache()
     self.assertEqual(False, linecache.lazycache(FILENAME, None))
     self.assertEqual(lines, linecache.getlines(FILENAME))
コード例 #39
0
 def setUp(self):
     linecache.clearcache()
     zipimport._zip_directory_cache.clear()
     ImportHooksBaseTestCase.setUp(self)
コード例 #40
0
ファイル: refleak.py プロジェクト: marwahaha/ZZ-FORK-cpython
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
    import gc, copyreg
    import _strptime, linecache
    import urllib.parse, urllib.request, mimetypes, doctest
    import struct, filecmp, collections.abc
    from distutils.dir_util import _path_created
    from weakref import WeakSet

    # Clear the warnings registry, so they can be displayed again
    for mod in sys.modules.values():
        if hasattr(mod, '__warningregistry__'):
            del mod.__warningregistry__

    # Restore some original values.
    warnings.filters[:] = fs
    copyreg.dispatch_table.clear()
    copyreg.dispatch_table.update(ps)
    sys.path_importer_cache.clear()
    sys.path_importer_cache.update(pic)
    try:
        import zipimport
    except ImportError:
        pass  # Run unmodified on platforms without zipimport support
    else:
        zipimport._zip_directory_cache.clear()
        zipimport._zip_directory_cache.update(zdc)

    # clear type cache
    sys._clear_type_cache()

    # Clear ABC registries, restoring previously saved ABC registries.
    for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
        if not isabstract(abc):
            continue
        for obj in abc.__subclasses__() + [abc]:
            obj._abc_registry = abcs.get(obj, WeakSet()).copy()
            obj._abc_cache.clear()
            obj._abc_negative_cache.clear()

    # Flush standard output, so that buffered data is sent to the OS and
    # associated Python objects are reclaimed.
    for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
        if stream is not None:
            stream.flush()

    # Clear assorted module caches.
    _path_created.clear()
    re.purge()
    _strptime._regex_cache.clear()
    urllib.parse.clear_cache()
    urllib.request.urlcleanup()
    linecache.clearcache()
    mimetypes._default_mime_types()
    filecmp._cache.clear()
    struct._clearcache()
    doctest.master = None
    try:
        import ctypes
    except ImportError:
        # Don't worry about resetting the cache if ctypes is not supported
        pass
    else:
        ctypes._reset_cache()

    # Collect cyclic trash and read memory statistics immediately after.
    func1 = sys.getallocatedblocks
    func2 = sys.gettotalrefcount
    gc.collect()
    return func1(), func2(), fd_count()
コード例 #41
0
	async def processPacket(self, packetData):
		print('Processing packet')
		# Packet data comes in as hex, need to convet to binary to parse
		binaryDataLength = len(packetData) * 4
		print('bin data len' + str(binaryDataLength))
		binaryData = format(int(packetData,16), 'b').zfill(binaryDataLength)
		secretKey = b'SECRETKEY'

		if binaryData[0:8] == '00000000':
			# This is a TX Schedule packet.
			print("TX Schedule Packet")

			# Get window start delta T
			windowStartBinary = binaryData[8:40]
			windowStartDecimal = int(windowStartBinary,2)
			print("Window start in seconds: ", windowStartDecimal)

			# Get window duration
			windowDurationBinary = binaryData[40:56]
			windowDurationDecimal = int(windowDurationBinary,2)
			print("Window duration in seconds: ", windowDurationDecimal)

			# Get data type
			dataTypeBinary = binaryData[56:64]
			dataTypeDecimal = int(dataTypeBinary,2)
			print("Data type: ", dataTypeDecimal)

			# Get picture number
			pictureNumberBinary = binaryData[64:80]
			pictureNumberDecimal = int(pictureNumberBinary,2)
			print("Picture number: ", pictureNumberDecimal)

			#Get index
			print("index will be:", int(binaryData[80:112], 2))
			if int(binaryData[80:112], 2) == 0:
				index = -1
			else:
				index = int(binaryData[80:112], 2)
			print("Indexing to:", index)

			# Get the appended hash - it is a 16 byte (128 bit) value
			receivedHash = binaryData[112:]
			print("Received Hash: ", receivedHash)

			# Generated hash from received data
			generatedHash = hmac.new(secretKey, bytes(binaryData[0:112], 'utf-8'), digestmod=hashlib.md5)
			generatedHashHex = generatedHash.hexdigest()
			generatedHashLength = len(generatedHashHex) * 4
			generatedHashBinary = format(int(generatedHashHex,16), 'b').zfill(generatedHashLength)
			print("Generated hash: ", generatedHashBinary)
			if receivedHash == generatedHashBinary:
				print("Hashes match! Writing window")
				self.writeTXWindow(windowStartDecimal, windowDurationDecimal, dataTypeDecimal, pictureNumberDecimal, index)

			else:
				print("Hashes do not match, will not save window!")
		else:
			# This is a command packet
			print("Command packet")

			# Validate HMAC Hash
			# Note, hash is 16 bytes (128 bits). Command packet is 1 byte (8 bits)
			receivedHash = binaryData[-128:]
			print("Received Hash: ", receivedHash)

			# Generated hash from received data
			generatedHash = hmac.new(secretKey, bytes(binaryData[0:-128], 'utf-8'), digestmod=hashlib.md5)
			generatedHashHex = generatedHash.hexdigest()
			generatedHashLength = len(generatedHashHex) * 4
			generatedHashBinary = format(int(generatedHashHex,16), 'b').zfill(generatedHashLength)
			print("Generated hash: ", generatedHashBinary)
			if receivedHash == generatedHashBinary:
				print("Hashes match! Executing commands")

				if binaryData[8:16] == '00000000':
					# Turn off Transmitter
					print("Turn off Transmissions")
					self.disableTransmissions()
				else:
					#Turn on Transmitter
					print("Turn on Transmitter")
					self.enableTransmissions()

				if binaryData[16:24] == '00000000':
					# DO NOT Clear TX Schedule and Progress
					print("Do NOT Clear TX Schedule and Progress")
				else:
					# Clear TX Schedule & Progress
					print("Clear TX Schedule and Progress")
					self.clearTXFile()
					self.clearTXProgress()

				if binaryData[24:32] == '00000000':
					# Do not take picture
					print("Do not take picture")
				else:
					# Take picture
					print("Take picture")
					self.__cam.takePicture()

				if binaryData[32:40] == '00000000':
					# Do not deploy boom
					print("Do not deploy boom")
				else:
					# Deploy boom
					print("Deploy boom")
					deployer = boomDeployer.BoomDeployer()
					await deployer.deploy()

				if binaryData[40:48] == '00000000':
					# Do not reboot
					print("Do not reboot")
				else:
					#Send reboot command to Beetle
					print("Reboot")
					os.system("sudo reboot")

				fileChecker.checkFile(self.__bootRecordsPath)
				reboots = int(linecache.getline(self.__bootRecordsPath, 1))
				skip = int(linecache.getline(self.__bootRecordsPath, 3))
				linecache.clearcache()
				if skip != 4:
					if binaryData[56:64] == '00000000':
						#Chose whether or not to skip to post boom deploy
						print("Running flight logic normally.")
					else:
						print("Skipping to post boom deploy.")
						bootRecords = open(self.__bootRecordsPath, 'w+')
						bootRecords.write(str(reboots) + "\n1\n4\n")
						bootRecords.close()
						os.system("sudo reboot")

				if binaryData[64:72] == '00000000':
					# Keep Pictures
					print("Keeping Pictures")
				else:
					#Delete Pictures
					print("Deleting Pictures")
					self.deletePictures()

				if binaryData[72:80] == '00000000':
					# Keep Data
					print("Keeping Data")
				else:
					# Delete Data
					print("Deleting Data")
					self.deleteData()
				
				#Beacon commands have been removed and now send directly to the transceiver
					
			else:
				print("Hashes do not match, will not execute commands!")
 def calculateFitness(self, population):
     rraCMCTaskSetFilePath = 'H:\\Northwestern-RIC\\Modeling\\OpenSim\\GenericFiles\\gait2392_RRA_CMCTaskSet.xml'
     dom = parse(rraCMCTaskSetFilePath)
     cmcJointElements = dom.getElementsByTagName('CMC_Joint')
     fitnesses = []
     logReport = []
     for individual in population:
         logReportLine = [
             str(self.currentGen),
             str(population.index(individual) + 1)
         ]
         chromosomes = individual.split('-')
         # Write weights to XML
         for i in range(len(chromosomes)):
             index = self.decode(chromosomes[i])
             value = self.variableValues[index]
             name = self.variableNames[i]
             logReportLine.append(value)
             for elem in cmcJointElements:
                 if elem.getAttribute('name') == name:
                     elem.getElementsByTagName(
                         'weight')[0].firstChild.nodeValue = ' ' + value
                     break
         xmlString = dom.toxml('UTF-8')
         xmlFile = open(rraCMCTaskSetFilePath, 'w')
         xmlFile.write(xmlString)
         xmlFile.close()
         # Specify trial
         subDir = self.subDir
         trialName = self.trialName
         # Run simulation
         subprocess.Popen((subDir + 'Run.bat'), shell=True, cwd=subDir)
         startTime = time.time()
         while True:
             # Check for simulation result file
             if os.access(subDir + trialName + '_RRA_controls.xml',
                          os.F_OK):
                 break
             # Timeout after 2 minutes if file doesn't exist yet (simulation probably failed)
             elif (time.time() - startTime) > 120:
                 break
             # Wait
             else:
                 time.sleep(5)
         # Process simulation output
         try:
             weightSum = 0
             # Residuals
             txtline = linecache.getline(
                 subDir + trialName + '_RRA_Actuation_force.sto', 23)
             headerList = txtline.rstrip().split('\t')
             residualNames = headerList[1:7]
             linecache.clearcache()
             residuals = numpy.loadtxt(subDir + trialName +
                                       '_RRA_Actuation_force.sto',
                                       skiprows=23,
                                       usecols=(1, 2, 3, 4, 5, 6))
             maxResiduals = residuals.__abs__().max(0)
             rmsResiduals = numpy.sqrt(
                 numpy.sum(numpy.square(residuals), 0) /
                 numpy.size(residuals, 0))
             ###residuals = numpy.genfromtxt(subDir+trialName+'_RRA_Actuation_force.sto',dtype=float,skip_header=22,usecols=(1,2,3,4,5,6),names=True)
             ###residualNames = residuals.dtype.names
             ###residualsArray = residuals.view((float, len(residuals.dtype.names)))
             for k in range(len(residualNames)):
                 if residualNames[k] == 'FX' or residualNames[
                         k] == 'FY' or residualNames[k] == 'FZ':
                     # Max
                     if maxResiduals[k] <= 10:
                         weightSum += 0.02
                     elif maxResiduals[k] <= 25:
                         weightSum += 0.25
                     else:
                         weightSum += 10
                     # RMS
                     if rmsResiduals[k] <= 5:
                         weightSum += 0.02
                     elif rmsResiduals[k] <= 10:
                         weightSum += 0.25
                     else:
                         weightSum += 10
                 elif residualNames[k] == 'MX' or residualNames[
                         k] == 'MY' or residualNames[k] == 'MZ':
                     # Max
                     if maxResiduals[k] <= 50:
                         weightSum += 0.02
                     elif maxResiduals[k] <= 75:
                         weightSum += 0.25
                     else:
                         weightSum += 10
                     # RMS
                     if rmsResiduals[k] <= 30:
                         weightSum += 0.02
                     elif rmsResiduals[k] <= 50:
                         weightSum += 0.25
                     else:
                         weightSum += 10
             # Position Errors
             txtline = linecache.getline(
                 subDir + trialName + '_RRA_pErr.sto', 7)
             headerList = txtline.rstrip().split('\t')
             posErrNames = headerList[1:]
             linecache.clearcache()
             # (Column indices to remove -- after removing first column)
             removeIndices = [
                 posErrNames.index('subtalar_angle_r'),
                 posErrNames.index('mtp_angle_r'),
                 posErrNames.index('subtalar_angle_l'),
                 posErrNames.index('mtp_angle_l')
             ]
             removeIndices.reverse()
             for k in removeIndices:
                 del posErrNames[k]
             posErrors = numpy.loadtxt(subDir + trialName + '_RRA_pErr.sto',
                                       skiprows=7)
             posErrors = numpy.delete(posErrors, 0, 1)
             for k in removeIndices:
                 posErrors = numpy.delete(posErrors, k, 1)
             maxPosErr = posErrors.__abs__().max(0)
             rmsPosErr = numpy.sqrt(
                 numpy.sum(numpy.square(posErrors), 0) /
                 numpy.size(posErrors, 0))
             for k in range(len(posErrNames)):
                 if posErrNames[k] == 'pelivs_tx' or posErrNames[
                         k] == 'pelvis_ty' or posErrNames[k] == 'pelvis_tz':
                     # Convert from m to cm
                     maxPosErr[k] *= 100
                     rmsPosErr[k] *= 100
                     # Max
                     if maxPosErr[k] <= 2:
                         weightSum += 0.02
                     elif maxPosErr[k] <= 5:
                         weightSum += 0.25
                     elif maxPosErr[k] <= 15:
                         weightSum += 10
                     else:
                         weightSum += 25
                     # RMS
                     if rmsPosErr[k] <= 2:
                         weightSum += 0.02
                     elif rmsPosErr[k] <= 4:
                         weightSum += 0.25
                     else:
                         weightSum += 10
                 else:
                     # Convert from rad to deg
                     maxPosErr[k] *= 180 / numpy.pi
                     rmsPosErr[k] *= 180 / numpy.pi
                     # Max
                     if maxPosErr[k] <= 2:
                         weightSum += 0.02
                     elif maxPosErr[k] <= 5:
                         weightSum += 0.25
                     else:
                         weightSum += 10
                     # RMS
                     if rmsPosErr[k] <= 2:
                         weightSum += 0.02
                     elif rmsPosErr[k] <= 5:
                         weightSum += 0.25
                     else:
                         weightSum += 10
             # Update log
             maxResiduals = maxResiduals.tolist()
             rmsResiduals = rmsResiduals.tolist()
             maxPosErr = maxPosErr.tolist()
             rmsPosErr = rmsPosErr.tolist()
             # FX, FY, FZ
             for k in range(3):
                 logReportLine.append(str(maxResiduals[k]))
             for k in range(3):
                 logReportLine.append(str(rmsResiduals[k]))
             # MX, MY, MZ
             for k in range(3, 6):
                 logReportLine.append(str(maxResiduals[k]))
             for k in range(3, 6):
                 logReportLine.append(str(rmsResiduals[k]))
             # Translations
             for k in range(3):
                 logReportLine.append(str(maxPosErr[k]))
             for k in range(3):
                 logReportLine.append(str(rmsPosErr[k]))
             # Angles
             for k in range(3, len(maxPosErr)):
                 logReportLine.append(str(maxPosErr[k]))
             for k in range(3, len(rmsPosErr)):
                 logReportLine.append(str(rmsPosErr[k]))
             # Individual fitness
             fitnesses.append(1 / weightSum)
             logReportLine.append(str(1 / weightSum))
             # Remove simulation output files
             try:
                 os.remove(subDir + trialName + '_RRA.log')
                 os.remove(subDir + trialName + '_AdjustedCOM.osim')
                 os.remove(subDir + 'err.log')
                 os.remove(subDir + 'out.log')
             except:
                 pass
             rraSpecifiers = ('Actuation_force.sto', 'Actuation_power.sto',
                              'Actuation_speed.sto',
                              'BodyKinematics_acc_global.sto',
                              'BodyKinematics_pos_global.sto',
                              'BodyKinematics_vel_global.sto',
                              'Kinematics_dudt.sto', 'Kinematics_q.sto',
                              'Kinematics_u.sto', 'avgResiduals.txt',
                              'controls.sto', 'controls.xml', 'pErr.sto',
                              'states.sto')
             for fspec in rraSpecifiers:
                 try:
                     os.remove(subDir + trialName + '_RRA_' + fspec)
                 except:
                     break
         # If simulation failed
         except:
             fitnesses.append(0.0000000001)
         # Append to log
         logReport.append('\t'.join(logReportLine) + '\n')
     # Write to output file
     logFile = open(self.log, 'a')
     logFile.writelines(logReport)
     logFile.close()
     # Summary
     summaryFile = open(self.summary, 'a')
     summaryFile.write('\t'.join([
         str(self.currentGen),
         str(numpy.mean(fitnesses)),
         str(numpy.max(fitnesses)),
         time.strftime('%H:%M:%S', time.localtime())
     ]) + '\n')
     summaryFile.close()
     # Print to screen
     print('Generation ' + str(self.currentGen) +
           ' is complete. Max fitness is ' + str(numpy.max(fitnesses)) +
           '.')
     # Return
     return fitnesses
コード例 #43
0
def fuse_duplicate_clients_to_file(file_paths):

    ###EXPERIMENT#####----->
    ip_alla = list()
    ip_list = list()
    for clients_file in file_paths:
        ip_list.append(set())

        file = open(clients_file, 'r')
        for f in file:
            f_json = json.loads(f)
            ip_list[-1].add(f_json['ipaddress'])
        for ip in ip_list[-1]:
            ip_alla.append(ip)

    ip_alla.sort()

    ip_dublicates = set()
    last_ip = ''
    for ip in ip_alla:
        if ip in last_ip:
            ip_dublicates.add(ip)
        last_ip = ip

    for ip in ip_dublicates:
        print(ip)

    ####<-------EXPERIMENT

    for clients_file in file_paths:
        dub_list = list()
        # get all ip addresses of client_objects
        numbers_lines = 0

        file = open(clients_file, 'r')
        ip_list = list()

        for f in file:
            f_json = json.loads(f)
            ip_list.append(f_json['ipaddress'])
            numbers_lines += 1
        file.close()
        file = None

        added_list = list()
        for number in range(1, numbers_lines):
            if number not in added_list:
                added_list.append(number)
                line = linecache.getline(clients_file, number)
                dub_list.append(list())
                dub_list[-1].append(number)
                line_json = json.loads(line)
                ip_main = line_json['ipaddress']

                count = 0

                for ip in ip_list:
                    count += 1
                    #If matches add that number to the dublist.
                    if (ip_main in ip) and (count not in added_list):
                        dub_list[-1].append(count)
                        added_list.append(count)

        #fuse all cow_client dublicates and send to file
        file = open(clients_file + '.clog', 'w')
        file.write('')
        file.close()

        for dub in dub_list:
            if len(dub) != 0:
                count = 0
                while count < len(dub):
                    if count == 0:
                        line = linecache.getline(clients_file, dub[count])
                        client_new = make_client_from_text(line)

                    elif count > 0:
                        line = linecache.getline(clients_file, dub[count])
                        client_tmp = make_client_from_text(line)
                        sessions = client_tmp.get_sessions()
                        for ses in sessions:
                            client_new.add_new_session(ses)
                    count += 1

                with open(clients_file + '.clog', 'a') as file:
                    jdump = json.dumps(client_new,
                                       default=lambda x: x.__dict__)
                    file.write(jdump + '\n')
                file.close()
                file = None
        linecache.clearcache()
        #delete tmp file
        os.remove(clients_file)
コード例 #44
0
ファイル: lib.py プロジェクト: zhuoqinyu/pymsmt
def read_frcmod_file(frcmodf):

    #Get range of each parameter part in the frcmodf
    rfrcmodf = open(frcmodf, 'r')
    cardlist = ['MASS', 'BOND', 'ANGL', 'DIHE', 'IMPR', 'NONB']
    lnlist1 = []
    lnlist2 = []
    ln = 1
    for line in rfrcmodf:
      for card in cardlist:
        if line[0:len(card)] == card:
          lnlist1.append(card)
          lnlist2.append(ln)
      ln = ln + 1
    tln = ln - 1
    rfrcmodf.close()

    lndict = {}
    for i in range(0, len(lnlist1)-1):
      lndict[lnlist1[i]] = (lnlist2[i]+1, lnlist2[i+1])

    lndict[lnlist1[-1]] = (lnlist2[-1]+1, tln)

    #Read the parameter into dicts
    massparms = {}
    bondparms = {}
    angparms = {}
    dihparms = {}
    impparms = {}
    nbparms = {}

    for i in list(lndict.keys()):
      if i == "MASS":
        for j in range(lndict[i][0],lndict[i][1]):
          line = linecache.getline(frcmodf, j)
          massparms = readmass(massparms, line)
      elif i == "BOND":
        for j in range(lndict[i][0], lndict[i][1]):
          line = linecache.getline(frcmodf, j)
          bondparms = readbond(bondparms, line)
      elif i == "ANGL":
         for j in range(lndict[i][0], lndict[i][1]):
           line = linecache.getline(frcmodf, j)
           angparms = readang(angparms, line)
      elif i == "DIHE":
         for j in range(lndict[i][0], lndict[i][1]):
           line = linecache.getline(frcmodf, j)
           dihparms = readdih(dihparms, line)
      elif i == "IMPR":
         for j in range(lndict[i][0], lndict[i][1]):
           line = linecache.getline(frcmodf, j)
           impparms = readimp(impparms, line)
      elif i == "NONB":
         for j in range(lndict[i][0], lndict[i][1]):
           line = linecache.getline(frcmodf, j)
           nbparms = readnb(nbparms, line)

    linecache.clearcache()
    parmdict = Parms(massparms, bondparms, angparms, dihparms, impparms, nbparms)

    return parmdict
コード例 #45
0
'''
#---enter # of nodes in mesh
nnodes = 165290 

#----nodes of interest
node_list = [60643, 63290]

#---number of recordings 
tsteps = 289

#directories to loop through if desired (one only should work fine)
dirs = ['P:\\02\\LakeOntario\\Storm\\20080203','P:\\02\\LakeOntario\\Storm\\19701120',
        'P:\\02\\LakeOntario\\Storm\\19710210','P:\\02\\LakeOntario\\Storm\\19731103',
        'P:\\02\\LakeOntario\\Storm\\19710124']

clearcache()        
a = datetime.now()        
for i, d in enumerate(dirs):
    print d           
    os.chdir(d)   
    #----Read fort.63
    clearcache()
    
    for cur_node in node_list:
        name = str(cur_node)
        with open('fort'+str(i) +'_'+ name +'.txt', 'w') as f:
            for j in range(cur_node,(tsteps-1)*nnodes,nnodes+1):
                if j == cur_node:
                    f.write(getline('fort.63',j+3).rstrip() + '\n')
                else:
                    #print i, getline('fort.63',i+3).rstrip()
コード例 #46
0
ファイル: lib.py プロジェクト: zhuoqinyu/pymsmt
def get_parm_dict(ffchoice, gaff, frcmodfs):

    #-------------------------------------------------------------------------
    #1. Read the parm*.dat file
    #-------------------------------------------------------------------------

    global add

    if ffchoice == 'ff94': #parm94
      parmf = add + 'parm94.dat'
      lndict = {'MASS': (2, 57), 'BOND': (59, 142), 'ANGL': (143, 334),
                'DIHE': (335, 416), 'IMPR': (417, 448), 'EQUA': (451, 453),
                'NONB': (455, 489)}
    elif ffchoice in ['ff99', 'ff99SB', 'ff03', 'ff03.r1']: 
      parmf = add + 'parm99.dat'
      lndict = {'MASS': (2, 66), 'BOND': (68, 184), 'ANGL': (185, 466),
                'DIHE': (467, 631), 'IMPR': (632, 670), 'EQUA': (673, 675),
                'NONB': (677, 719)}
    elif ffchoice in ['ff10', 'ff12SB', 'ff14SB']:
      parmf = add + 'parm10.dat'
      lndict = {'MASS': (2, 65), 'BOND': (67, 218), 'ANGL': (219, 619),
                'DIHE': (620, 895), 'IMPR': (896, 955), 'EQUA': (958, 960),
                'NONB': (962, 1001)}

    #define the parameter dicts
    massparms = {}
    bondparms = {}
    angparms = {}
    dihparms = {}
    impparms = {}
    eqdict = {}
    nbparms = {}

    for i in list(lndict.keys()):
      if i == "MASS":
        for j in range(lndict[i][0],lndict[i][1]):
          line = linecache.getline(parmf, j)
          massparms = readmass(massparms, line)
      elif i == "BOND":
        for j in range(lndict[i][0], lndict[i][1]):
          line = linecache.getline(parmf, j)
          bondparms = readbond(bondparms, line)
      elif i == "ANGL":
         for j in range(lndict[i][0], lndict[i][1]):
           line = linecache.getline(parmf, j)
           angparms = readang(angparms, line)
      elif i == "DIHE":
         for j in range(lndict[i][0], lndict[i][1]):
           line = linecache.getline(parmf, j)
           dihparms = readdih(dihparms, line)
      elif i == "IMPR":
         for j in range(lndict[i][0], lndict[i][1]):
           line = linecache.getline(parmf, j)
           impparms = readimp(impparms, line)
      elif i == "EQUA":
         for j in range(lndict[i][0], lndict[i][1]):
           line = linecache.getline(parmf, j)
           eqdict = readeqnb(eqdict, line)
      elif i == "NONB":
         for j in range(lndict[i][0], lndict[i][1]):
           line = linecache.getline(parmf, j)
           nbparms = readnb(nbparms, line)

    linecache.clearcache()

    #Deal with the equil atoms
    for i in list(eqdict.keys()):
      for j in eqdict[i]:
        if len(i) == 1:
          nbparms[j] = nbparms[i + ' ']
        else:
          nbparms[j] = nbparms[i]

    parmdict = Parms(massparms, bondparms, angparms, dihparms, impparms, nbparms)

    #-------------------------------------------------------------------------
    #2. Read the frcmod file for each force field
    #-------------------------------------------------------------------------
    if ffchoice in ['ff03', 'ff03.r1', 'ff99SB', 'ff12SB', 'ff14SB']:
      if ffchoice in ['ff03', 'ff03.r1']: #Year: 2003
        parmf1 = add + 'frcmod.ff03'
      elif ffchoice == 'ff99SB': #Year: 2006
        parmf1 = add + 'frcmod.ff99SB'
      elif ffchoice == 'ff12SB': #Year: 2012
        parmf1 = add + 'frcmod.ff12SB'
      elif ffchoice == 'ff14SB': #Year: 2014
        parmf1 = add + 'frcmod.ff14SB'
      parmdict1 = read_frcmod_file(parmf1)
      parmdict.combine(parmdict1)

    #-------------------------------------------------------------------------
    #3. GAFF
    #-------------------------------------------------------------------------
    if gaff == 1:

      parmf2 = add + 'gaff.dat'
      gaff_vsinfo = linecache.getline(parmf2, 1)

      if 'Version 1.7, Nov 2013' in gaff_vsinfo:
        lndict2 = {'MASS': (2, 73), 'BOND': (75, 882), 'ANGL': (883, 5131),
                   'DIHE': (5132, 5848), 'IMPR': (5849, 5887),
                   'NONB': (5892, 5963)}
      elif 'Version 1.8, Mar 2015' in gaff_vsinfo:
        lndict2 = {'MASS': (2, 73), 'BOND': (75, 907), 'ANGL': (908, 5526),
                   'DIHE': (5527, 6267), 'IMPR': (6268, 6306),
                   'NONB': (6311, 6382)}

      massparms2 = {}
      bondparms2 = {}
      angparms2 = {}
      dihparms2 = {}
      impparms2 = {}
      nbparms2 = {}

      for i in list(lndict2.keys()):
        if i == "MASS":
          for j in range(lndict2[i][0], lndict2[i][1]):
            line = linecache.getline(parmf2, j)
            massparms2 = readmass(massparms2, line)
        elif i == "BOND":
          for j in range(lndict2[i][0], lndict2[i][1]):
            line = linecache.getline(parmf2, j)
            bondparms2 = readbond(bondparms2, line)
        elif i == "ANGL":
           for j in range(lndict2[i][0], lndict2[i][1]):
             line = linecache.getline(parmf2, j)
             angparms2 = readang(angparms2, line)
        elif i == "DIHE":
           for j in range(lndict2[i][0], lndict2[i][1]):
             line = linecache.getline(parmf2, j)
             dihparms2 = readgaffdih(dihparms2, line)
        elif i == "IMPR":
           for j in range(lndict2[i][0], lndict2[i][1]):
             line = linecache.getline(parmf2, j)
             impparms2 = readimp(impparms2, line)
        elif i == "NONB":
           for j in range(lndict2[i][0], lndict2[i][1]):
             line = linecache.getline(parmf2, j)
             nbparms2 = readnb(nbparms2, line)

      linecache.clearcache()
      parmdict2 = Parms(massparms2, bondparms2, angparms2, dihparms2,
                        impparms2, nbparms2)
      parmdict.combine(parmdict2)

    #-------------------------------------------------------------------------
    #4. Additional frcmod file
    #-------------------------------------------------------------------------

    for i in frcmodfs:
      parmdict3 = read_frcmod_file(i)
      parmdict.combine(parmdict3)

    return parmdict
コード例 #47
0
def data_processor(case):
    # First, we call the structure of the model:
    table1 = xlrd.open_workbook(
        "0_Model Structure/" +
        str(structural_list[case]))  # works for all scenarios
    print("$$$$$$")
    print(str(structural_list[case]))
    print("$$$$$$")
    sheet_sets_structure = table1.sheet_by_index(0)
    sheet_params_structure = table1.sheet_by_index(1)
    sheet_vars_structure = table1.sheet_by_index(2)
    #
    S_DICT_sets_structure = {
        'set': [],
        'initial': [],
        'number_of_elements': [],
        'elements_list': []
    }
    for col in range(1, 11 + 1):
        S_DICT_sets_structure['set'].append(
            sheet_sets_structure.cell_value(rowx=0, colx=col))
        S_DICT_sets_structure['initial'].append(
            sheet_sets_structure.cell_value(rowx=1, colx=col))
        S_DICT_sets_structure['number_of_elements'].append(
            int(sheet_sets_structure.cell_value(rowx=2, colx=col)))
        #
        element_number = int(sheet_sets_structure.cell_value(rowx=2, colx=col))
        this_elements_list = []
        if element_number > 0:
            for n in range(1, element_number + 1):
                this_elements_list.append(
                    sheet_sets_structure.cell_value(rowx=2 + n, colx=col))
        S_DICT_sets_structure['elements_list'].append(this_elements_list)
    #
    S_DICT_params_structure = {
        'category': [],
        'parameter': [],
        'number_of_elements': [],
        'index_list': []
    }
    param_category_list = []
    for col in range(1, 30 + 1):
        if str(sheet_params_structure.cell_value(rowx=0, colx=col)) != '':
            param_category_list.append(
                sheet_params_structure.cell_value(rowx=0, colx=col))
        S_DICT_params_structure['category'].append(param_category_list[-1])
        S_DICT_params_structure['parameter'].append(
            sheet_params_structure.cell_value(rowx=1, colx=col))
        S_DICT_params_structure['number_of_elements'].append(
            int(sheet_params_structure.cell_value(rowx=2, colx=col)))
        #
        index_number = int(sheet_params_structure.cell_value(rowx=2, colx=col))
        this_index_list = []
        for n in range(1, index_number + 1):
            this_index_list.append(
                sheet_params_structure.cell_value(rowx=2 + n, colx=col))
        S_DICT_params_structure['index_list'].append(this_index_list)
    #
    S_DICT_vars_structure = {
        'category': [],
        'variable': [],
        'number_of_elements': [],
        'index_list': []
    }
    var_category_list = []
    for col in range(1, 43 + 1):
        if str(sheet_vars_structure.cell_value(rowx=0, colx=col)) != '':
            var_category_list.append(
                sheet_vars_structure.cell_value(rowx=0, colx=col))
        S_DICT_vars_structure['category'].append(var_category_list[-1])
        S_DICT_vars_structure['variable'].append(
            sheet_vars_structure.cell_value(rowx=1, colx=col))
        S_DICT_vars_structure['number_of_elements'].append(
            int(sheet_vars_structure.cell_value(rowx=2, colx=col)))
        #
        index_number = int(sheet_vars_structure.cell_value(rowx=2, colx=col))
        this_index_list = []
        for n in range(1, index_number + 1):
            this_index_list.append(
                sheet_vars_structure.cell_value(rowx=2 + n, colx=col))
        S_DICT_vars_structure['index_list'].append(this_index_list)
    #-------------------------------------------#
    #
    all_vars = S_DICT_vars_structure['variable']
    #
    all_vars_output_dict = [{} for e in range(len(txt_list))]
    #
    output_header = [
        'Run.ID', 'Fuel', 'Fuel.DESCRIPTION', 'Technology',
        'Technology.DESCRIPTION', 'Emission', 'Emission.DESCRIPTION', 'Year'
    ]
    #-------------------------------------------------------#
    for v in range(len(all_vars)):
        output_header.append(all_vars[v])
    #-------------------------------------------------------#
    vars_as_appear = []
    case_name = txt_list[case].replace('.txt', '')
    #
    data_name = './2_Scenarios_Outputs/' + str(case_name) + '_output.txt'
    print(data_name)
    #
    ini_line = 5741012 + 2
    end_line = 11438802
    #
    for n in range(ini_line, end_line, 2):
        structure_line_raw = linecache.getline(data_name, n)
        structure_list_raw = structure_line_raw.split(' ')
        # print( structure_line_raw, data_name, n, ini_line, end_line )
        # time.sleep(20)
        structure_list_raw_2 = [
            s_line for s_line in structure_list_raw if s_line != ''
        ]
        structure_line = structure_list_raw_2[1]
        structure_list = structure_line.split('[')
        the_variable = structure_list[0]
        #
        if the_variable in all_vars:
            set_list = structure_list[1].replace(']',
                                                 '').replace('\n',
                                                             '').split(',')
            #--%
            index = S_DICT_vars_structure['variable'].index(the_variable)
            this_variable_indices = S_DICT_vars_structure['index_list'][index]
            #
            if 'y' in this_variable_indices:
                data_line = linecache.getline(data_name, n + 1)
                data_line_list_raw = data_line.split(' ')
                data_line_list = [
                    data_cell for data_cell in data_line_list_raw
                    if data_cell != ''
                ]
                useful_data_cell = data_line_list[1]
                #--%
                if useful_data_cell != '0':
                    #
                    if the_variable not in vars_as_appear:
                        vars_as_appear.append(the_variable)
                        all_vars_output_dict[case].update({the_variable: {}})
                        all_vars_output_dict[case][the_variable].update(
                            {the_variable: []})
                        #
                        for n in range(len(this_variable_indices)):
                            all_vars_output_dict[case][the_variable].update(
                                {this_variable_indices[n]: []})
                    #--%
                    this_variable = vars_as_appear[-1]
                    all_vars_output_dict[case][this_variable][
                        this_variable].append(useful_data_cell)
                    for n in range(len(this_variable_indices)):
                        all_vars_output_dict[case][the_variable][
                            this_variable_indices[n]].append(set_list[n])
                #
            #
            elif 'y' not in this_variable_indices:
                data_line = linecache.getline(data_name, n + 1)
                data_line_list_raw = data_line.split(' ')
                data_line_list = [
                    data_cell for data_cell in data_line_list_raw
                    if data_cell != ''
                ]
                useful_data_cell = data_line_list[1]
                #--%
                if useful_data_cell != '0':
                    #
                    if the_variable not in vars_as_appear:
                        vars_as_appear.append(the_variable)
                        all_vars_output_dict[case].update({the_variable: {}})
                        all_vars_output_dict[case][the_variable].update(
                            {the_variable: []})
                        #
                        for n in range(len(this_variable_indices)):
                            all_vars_output_dict[case][the_variable].update(
                                {this_variable_indices[n]: []})
                    #--%
                    this_variable = vars_as_appear[-1]
                    all_vars_output_dict[case][this_variable][
                        this_variable].append(useful_data_cell)
                    for n in range(len(this_variable_indices)):
                        all_vars_output_dict[case][the_variable][
                            this_variable_indices[n]].append(set_list[n])
        #--%
        else:
            pass
    #
    linecache.clearcache()
    #%%
    output_adress = './2_Scenarios_Outputs'
    combination_list = []  # [fuel, technology, emission, year]
    data_row_list = []
    for var in range(len(vars_as_appear)):
        this_variable = vars_as_appear[var]
        this_var_dict = all_vars_output_dict[case][this_variable]
        #--%
        index = S_DICT_vars_structure['variable'].index(this_variable)
        this_variable_indices = S_DICT_vars_structure['index_list'][index]
        #--------------------------------------#
        for k in range(len(this_var_dict[this_variable])):
            this_combination = []
            #
            if 'f' in this_variable_indices:
                this_combination.append(this_var_dict['f'][k])
            else:
                this_combination.append('')
            #
            if 't' in this_variable_indices:
                this_combination.append(this_var_dict['t'][k])
            else:
                this_combination.append('')
            #
            if 'e' in this_variable_indices:
                this_combination.append(this_var_dict['e'][k])
            else:
                this_combination.append('')
            #
            if 'l' in this_variable_indices:
                this_combination.append('')
            else:
                this_combination.append('')
            #
            if 'y' in this_variable_indices:
                this_combination.append(this_var_dict['y'][k])
            else:
                this_combination.append('')
            #
            if this_combination not in combination_list:
                combination_list.append(this_combination)
                data_row = ['' for n in range(len(output_header))]
                # print('check', len(data_row), len(run_id) )
                data_row[0] = 0
                data_row[1] = this_combination[0]
                data_row[3] = this_combination[1]
                data_row[5] = this_combination[2]
                # data_row[7] = this_combination[3]
                data_row[7] = this_combination[4]
                #
                var_position_index = output_header.index(this_variable)
                data_row[var_position_index] = this_var_dict[this_variable][k]
                data_row_list.append(data_row)
            else:
                ref_index = combination_list.index(this_combination)
                this_data_row = deepcopy(data_row_list[ref_index])
                #
                var_position_index = output_header.index(this_variable)
                #
                if 'l' in this_variable_indices:
                    #
                    if str(this_data_row[var_position_index]) != '' and str(
                            this_var_dict[this_variable][k]) != '' and (
                                'Rate' not in this_variable):
                        this_data_row[var_position_index] = str(
                            float(this_data_row[var_position_index]) +
                            float(this_var_dict[this_variable][k]))
                    elif str(this_data_row[var_position_index]) == '' and str(
                            this_var_dict[this_variable][k]) != '':
                        this_data_row[var_position_index] = str(
                            float(this_var_dict[this_variable][k]))
                    elif str(this_data_row[var_position_index]) != '' and str(
                            this_var_dict[this_variable][k]) == '':
                        pass
                else:
                    this_data_row[var_position_index] = this_var_dict[
                        this_variable][k]
                #
                data_row_list[ref_index] = deepcopy(this_data_row)
    #
    non_year_combination_list = []
    non_year_combination_list_years = []
    for n in range(len(combination_list)):
        this_combination = combination_list[n]
        this_non_year_combination = [
            this_combination[0], this_combination[1], this_combination[2]
        ]
        if this_combination[
                4] != '' and this_non_year_combination not in non_year_combination_list:
            non_year_combination_list.append(this_non_year_combination)
            non_year_combination_list_years.append([this_combination[4]])
        elif this_combination[
                4] != '' and this_non_year_combination in non_year_combination_list:
            non_year_combination_list_years[non_year_combination_list.index(
                this_non_year_combination)].append(this_combination[4])
    #
    # complete_years = [ '2015', '2019', '2025', '2030', '2035', '2040', '2045', '2050' ]
    complete_years = [str(a_year) for a_year in range(2015, 2050 + 1, 1)]
    #
    for n in range(len(non_year_combination_list)):
        if len(non_year_combination_list_years[n]) != len(complete_years):
            #
            this_existing_combination = non_year_combination_list[n]
            # print('flag 1', this_existing_combination )
            this_existing_combination.append('')
            # print('flag 2', this_existing_combination )
            this_existing_combination.append(
                non_year_combination_list_years[n][0])
            # print('flag 3', this_existing_combination )
            ref_index = combination_list.index(this_existing_combination)
            this_existing_data_row = deepcopy(data_row_list[ref_index])
            #
            for n2 in range(len(complete_years)):
                #
                if complete_years[n2] not in non_year_combination_list_years[
                        n]:
                    #
                    data_row = ['' for n in range(len(output_header))]
                    data_row[0] = 0
                    data_row[1] = non_year_combination_list[n][0]
                    data_row[3] = non_year_combination_list[n][1]
                    data_row[5] = non_year_combination_list[n][2]
                    data_row[7] = complete_years[n2]
                    #
                    for n3 in range(len(vars_as_appear)):
                        this_variable = vars_as_appear[n3]
                        this_var_dict = all_vars_output_dict[case][
                            this_variable]
                        index = S_DICT_vars_structure['variable'].index(
                            this_variable)
                        this_variable_indices = S_DICT_vars_structure[
                            'index_list'][index]
                        #
                        var_position_index = output_header.index(this_variable)
                        #
                        print_true = False
                        #
                        if (
                                'f' in this_variable_indices
                                and str(non_year_combination_list[n][0]) != ''
                        ):  # or ( 'f' not in this_variable_indices and str(non_year_combination_list[n][0]) == '' ):
                            print_true = True
                        else:
                            pass
                        #
                        if (
                                't' in this_variable_indices
                                and str(non_year_combination_list[n][1]) != ''
                        ):  # or ( 't' not in this_variable_indices and str(non_year_combination_list[n][1]) == '' ):
                            print_true = True
                        else:
                            pass
                        #
                        if (
                                'e' in this_variable_indices
                                and str(non_year_combination_list[n][2]) != ''
                        ):  # or ( 'e' not in this_variable_indices and str(non_year_combination_list[n][2]) == '' ):
                            print_true = True
                        else:
                            pass
                        #
                        if 'y' in this_variable_indices and (
                                str(this_existing_data_row[var_position_index])
                                != '') and print_true == True:
                            data_row[var_position_index] = '0'
                            #
                        else:
                            pass
                    #
                    data_row_list.append(data_row)

    with open(output_adress + '/' + case_name + '_Output' + '.csv',
              'w',
              newline='') as csvfile:
        csvwriter = csv.writer(csvfile,
                               delimiter=',',
                               quotechar='|',
                               quoting=csv.QUOTE_MINIMAL)
        csvwriter.writerow(output_header)
        for n in range(len(data_row_list)):
            csvwriter.writerow(data_row_list[n])

    gc.collect(generation=2)
    time.sleep(0.05)
コード例 #48
0
def readLine(path, line=0):
    re_Line = linecache.getline(path, line)
    linecache.clearcache()
    return re_Line
コード例 #49
0
def facesConstructor(facesFile,boundaryFile,ownerFile,neighbourFile,points):
	# construct an array of object faces
	faces = []
	
	# read how many faces
	n = int(linecache.getline(facesFile,19))
	print 'there is ' + str(n) + ' faces'

	for i in range(0,n):
		line = linecache.getline(facesFile,21+i)
		newFace = face(i) # construct new face object.
		for j in range(0,4):
			pointNumber = int(line[2:-2].split()[j])
			#print pointNumber
			newPoint = points[pointNumber]
			#print newPoint.x,newPoint.y,newPoint.z,newFace.faceID
			newFace.addPoint(newPoint)
		faces.append(newFace)
	
	linecache.clearcache()
	
	# read owner file
	for i in range(0,n):
		line = linecache.getline(ownerFile,22+i)
		cellID = int(line)				
		faces[i].ownerCell(cellID)
	
	# read neighbour file
	n = int(linecache.getline(neighbourFile,20))
	print 'there is ' + str(n) + ' faces in neighbour file'
	for i in range(0,n):
		line = linecache.getline(neighbourFile,22+i)
		cellID = int(line)				
		faces[i].neighbourCell(cellID)
		
	# now, add boundary information into face in faces.
	n = int(linecache.getline(boundaryFile,18))
	print 'there is ' + str(n) + ' boundaries'

	# patches contains all patches as a patch object
	patches = []
	lineIndicator = 20
	# 
	for i in range(0,n):
		newPatch = patch(i)
		line = linecache.getline(boundaryFile,lineIndicator)
		# adding patch name
		newPatch.patchName = line.strip()
		
		lineIndicator = lineIndicator +2
		line = linecache.getline(boundaryFile,lineIndicator)
		# adding patch type
		newPatch.addPatchType(line[:-2].split()[1])
		
		lineIndicator = lineIndicator +1
		line = linecache.getline(boundaryFile,lineIndicator)
		newPatch.nFaces = int(line[:-2].split()[1])
		
		
		lineIndicator = lineIndicator +1
		line = linecache.getline(boundaryFile,lineIndicator)
		newPatch.startFace = int(line[:-2].split()[1])
	
		if(newPatch.ifCyclic):
			lineIndicator = lineIndicator +2
			line = linecache.getline(boundaryFile,lineIndicator)
			# adding neighbour patch name
			newPatch.neighbourPatchName = line[:-2].split()[1]
			print 'neighbourPatchName is ',newPatch.neighbourPatchName
		
		lineIndicator = lineIndicator +2
		patches.append(newPatch)
	
	# now, all patches has been added to paches.
	# find cyclic neighbour patch.
	for i in range(0,n):
		for j in range(0,n):
			if (patches[i].neighbourPatchName == patches[j].patchName):
				patches[i].neighbourPatch = patches[j]
	
	
	for i in range(0,n):
		nFaces = patches[i].nFaces
		startFace = patches[i].startFace
		
		for j in range(startFace,startFace+nFaces):
			faces[j].ifBoundary = True
			faces[j].addPatchType(patches[i].patchType) # forward patchType from patch to face
			
			if(faces[j].ifCyclic):	# if this face is cyclic, find its cyclic face ID
				faces[j].pFaceID = (j - startFace)+ patches[i].neighbourPatch.startFace
				
	linecache.clearcache()
	return faces	
コード例 #50
0
 def __del__(self):
     linecache.clearcache()
コード例 #51
0
ファイル: AllinOne1py3.py プロジェクト: xjlnancy/RushCoupon
def AllinOneExit1():
    print("程序5秒后自动退出")
    linecache.clearcache()
    time.sleep(5)
    os._exit(0)
コード例 #52
0
    def __init__(self,
                 intervals_file,
                 fasta_file,
                 dnase_file,
                 cell_line=None,
                 mappability_file=None,
                 GENCODE_dir=None,
                 use_linecache=True):

        # intervals
        if use_linecache:
            linecache.clearcache()
            BT = BedToolLinecache
        else:
            BT = BedTool

        self.bt = BT(intervals_file)

        # Fasta
        self.fasta_file = fasta_file
        self.fasta_extractor = None  # initialize later
        # DNase
        self.dnase_file = dnase_file
        self.dnase_extractor = None
        # mappability
        if mappability_file is None:
            # download the mappability file if not existing
            mappability_file = os.path.join(
                this_dir, "../../template/dataloader_files",
                "wgEncodeDukeMapabilityUniqueness35bp.bigWig")
            if not os.path.exists(mappability_file):
                print("Downloading the mappability file")
                urlretrieve(
                    "http://hgdownload.cse.ucsc.edu/goldenPath/hg19/encodeDCC/wgEncodeMapability/wgEncodeDukeMapabilityUniqueness35bp.bigWig",
                    mappability_file)
                print("Download complete")

        self.mappability_file = mappability_file
        self.mappability_extractor = None
        # Gencode features
        if GENCODE_dir is None:
            gp = os.path.join(this_dir, "dataloader_files/gencode_features/")
        else:
            gp = GENCODE_dir
        self.gencode_beds = [
            ("cpg", BedTool(gp + '/cpgisland.bed.gz')),
            ("cds",
             BedTool(gp + '/wgEncodeGencodeBasicV19.cds.merged.bed.gz')),
            ("intron",
             BedTool(gp + '/wgEncodeGencodeBasicV19.intron.merged.bed.gz')),
            ("promoter",
             BedTool(gp + '/wgEncodeGencodeBasicV19.promoter.merged.bed.gz')),
            ("utr5",
             BedTool(gp + '/wgEncodeGencodeBasicV19.utr5.merged.bed.gz')),
            ("utr3",
             BedTool(gp + '/wgEncodeGencodeBasicV19.utr3.merged.bed.gz')),
        ]
        # Overlap beds - could be done incrementally
        print("Overlapping all the bed-files")
        # The BT() and .fn are there in order to leverage BedToolLinecache
        self.overlap_beds = [(b, BT(self.bt.intersect(v, wa=True, c=True).fn))
                             for b, v in self.gencode_beds]
        print("Assesing the file")
        assert len(self.overlap_beds[1][1]) == len(self.bt)
コード例 #53
0
ファイル: output.py プロジェクト: danielsylvinson/OverlApp
def vmd_network_creator(filename,
                        cube_files=None,
                        render=False,
                        iso=(-0.01, 0.01),
                        abspath=False,
                        **kwargs):
    '''Creates a VMD script file from a list of cube files provided.
  
  **Parameters:**
  
  filename : str
    Contains the base name of the output file.
  cube_files : None or list of str
    Specifies the cube files which serve as input for the VMD script.
    If None, searches the directory for '.cb' and '.cube' files.
  render : bool
    If True, the VMD script will automatically create '.tga' files for each 
    cube file.
  iso : tuple
    Specifies the isovalue for the blue and the red isosurface, respectively.
  abspath : bool
    If True, the paths of the cube files will be expanded to absolute file paths.
  '''
    from os import path, listdir
    import linecache
    from orbkit import vmd_network_draft
    if cube_files is None:
        display(
            'No list of cube (.cb or .cube) filenames provided. Checking the directory'
            + ' of the outputfile...')
        cube_files = []
        for fid in listdir(path.dirname(filename)):
            if fid.endswith('.cb') or fid.endswith('.cube'):
                cube_files.append(fid)
        if cube_files == []:
            raise IOError('Could not find valid cube files in %s' %
                          path.dirname(filename))
    elif isinstance(cube_files, str):
        cube_files = [cube_files]
    elif not isinstance(cube_files, list):
        raise IOError('`cube_files` has to be a list of strings.')

    title = []
    mo = ''
    for i, f in enumerate(cube_files):
        title = linecache.getline(f, 2)
        if title.split() == []:
            title = path.splitext(path.basename(f))[0]
        else:
            title = title.replace('\n', '').replace(' ', '')
        linecache.clearcache()
        pid = path.abspath(f) if abspath else path.relpath(
            f, path.dirname(filename))
        mo += vmd_network_draft.mo_string % {
            'c': i,
            'n1': pid,
            'n2': title,
            'isored': iso[0],
            'isoblue': iso[1],
            'render': '' if render else '#'
        }

    f = open('%(f)s.vmd' % {'f': filename}, 'w')
    f.write(vmd_network_draft.vmd_string % {'mo': mo})
    f.close()
コード例 #54
0
ファイル: rebuild.py プロジェクト: 18636800170/videoWebsie
def rebuild(module, doLog=1):
    """
    Reload a module and do as much as possible to replace its references.
    """
    global lastRebuild
    lastRebuild = time.time()
    if hasattr(module, 'ALLOW_TWISTED_REBUILD'):
        # Is this module allowed to be rebuilt?
        if not module.ALLOW_TWISTED_REBUILD:
            raise RuntimeError("I am not allowed to be rebuilt.")
    if doLog:
        log.msg('Rebuilding {}...'.format(str(module.__name__)))

    # Safely handle adapter re-registration
    from twisted.python import components
    components.ALLOW_DUPLICATES = True

    d = module.__dict__
    _modDictIDMap[id(d)] = module
    newclasses = {}
    classes = {}
    functions = {}
    values = {}
    if doLog:
        log.msg('  (scanning {}): '.format(str(module.__name__)))
    for k, v in d.items():
        if _isClassType(type(v)):
            # ClassType exists on Python 2.x and earlier.
            # Failure condition -- instances of classes with buggy
            # __hash__/__cmp__ methods referenced at the module level...
            if v.__module__ == module.__name__:
                classes[v] = 1
                if doLog:
                    log.logfile.write("c")
                    log.logfile.flush()
        elif type(v) == types.FunctionType:
            if v.__globals__ is module.__dict__:
                functions[v] = 1
                if doLog:
                    log.logfile.write("f")
                    log.logfile.flush()
        elif isinstance(v, type):
            if v.__module__ == module.__name__:
                newclasses[v] = 1
                if doLog:
                    log.logfile.write("o")
                    log.logfile.flush()

    values.update(classes)
    values.update(functions)
    fromOldModule = values.__contains__
    newclasses = newclasses.keys()
    classes = classes.keys()
    functions = functions.keys()

    if doLog:
        log.msg('')
        log.msg('  (reload   {})'.format(str(module.__name__)))

    # Boom.
    reload(module)
    # Make sure that my traceback printing will at least be recent...
    linecache.clearcache()

    if doLog:
        log.msg('  (cleaning {}): '.format(str(module.__name__)))

    for clazz in classes:
        if getattr(module, clazz.__name__) is clazz:
            log.msg("WARNING: class {} not replaced by reload!".format(
                reflect.qual(clazz)))
        else:
            if doLog:
                log.logfile.write("x")
                log.logfile.flush()
            clazz.__bases__ = ()
            clazz.__dict__.clear()
            clazz.__getattr__ = __getattr__
            clazz.__module__ = module.__name__
    if newclasses:
        import gc
    for nclass in newclasses:
        ga = getattr(module, nclass.__name__)
        if ga is nclass:
            log.msg("WARNING: new-class {} not replaced by reload!".format(
                reflect.qual(nclass)))
        else:
            for r in gc.get_referrers(nclass):
                if getattr(r, '__class__', None) is nclass:
                    r.__class__ = ga
    if doLog:
        log.msg('')
        log.msg('  (fixing   {}): '.format(str(module.__name__)))
    modcount = 0
    for mk, mod in sys.modules.items():
        modcount = modcount + 1
        if mod == module or mod is None:
            continue

        if not hasattr(mod, '__file__'):
            # It's a builtin module; nothing to replace here.
            continue

        if hasattr(mod, '__bundle__'):
            # PyObjC has a few buggy objects which segfault if you hash() them.
            # It doesn't make sense to try rebuilding extension modules like
            # this anyway, so don't try.
            continue

        changed = 0

        for k, v in mod.__dict__.items():
            try:
                hash(v)
            except Exception:
                continue
            if fromOldModule(v):
                if _isClassType(type(v)):
                    if doLog:
                        log.logfile.write("c")
                        log.logfile.flush()
                    nv = latestClass(v)
                else:
                    if doLog:
                        log.logfile.write("f")
                        log.logfile.flush()
                    nv = latestFunction(v)
                changed = 1
                setattr(mod, k, nv)
            else:
                # Replace bases of non-module classes just to be sure.
                if _isClassType(type(v)):
                    for base in v.__bases__:
                        if fromOldModule(base):
                            latestClass(v)
        if doLog and not changed and ((modcount % 10) == 0):
            log.logfile.write(".")
            log.logfile.flush()

    components.ALLOW_DUPLICATES = False
    if doLog:
        log.msg('')
        log.msg('   Rebuilt {}.'.format(str(module.__name__)))
    return module
コード例 #55
0
 def test_lazycache_check(self):
     linecache.clearcache()
     linecache.lazycache(NONEXISTENT_FILENAME, globals())
     linecache.checkcache()
コード例 #56
0
ファイル: dataloader.py プロジェクト: yynst2/models-1
    def __init__(self,
                 intervals_file,
                 fasta_file,
                 dnase_file,
                 cell_line=None,
                 RNAseq_PC_file=None,
                 mappability_file=None,
                 use_linecache=True):

        # intervals
        if use_linecache:
            linecache.clearcache()
            BT = BedToolLinecache
        else:
            BT = BedTool

        self.bt = BT(intervals_file)

        # Fasta
        self.fasta_file = fasta_file
        self.fasta_extractor = None  # initialize later
        # DNase
        self.dnase_file = dnase_file
        self.dnase_extractor = None
        # mappability
        if mappability_file is None:
            # download the mappability file if not existing
            common_dl_dir = os.path.join(
                this_dir, "../../template/downloaded/dataloader_files")
            makedir_exist_ok(common_dl_dir)
            rf = RemoteFile(
                url=
                "http://hgdownload.cse.ucsc.edu/goldenPath/hg19/encodeDCC/wgEncodeMapability/wgEncodeDukeMapabilityUniqueness35bp.bigWig",
                md5="1d15ddafe2c8df51cf08495db96679e7")
            mappability_file = os.path.join(
                common_dl_dir, "wgEncodeDukeMapabilityUniqueness35bp.bigWig")
            if not os.path.exists(mappability_file) or not rf.validate(
                    mappability_file):
                # download the path
                rf.get_file(mappability_file)
        self.mappability_file = mappability_file
        self.mappability_extractor = None
        # Get the metadata features
        if cell_line is None:
            if RNAseq_PC_file is None:
                raise ValueError(
                    "RNAseq_PC_file has to be specified when cell_line=None")
            assert os.path.exists(RNAseq_PC_file)
        else:
            # Using the pre-defined cell-line
            output_dir = os.path.join(
                this_dir,
                "../../template/downloaded/dataloader_files/RNAseq_features/")
            makedir_exist_ok(output_dir)
            RNAseq_PC_file = os.path.join(output_dir, cell_line, "meta.txt")
            url_template = (
                'https://s3.eu-central-1.amazonaws.com/kipoi-models/dataloader_files/'
                'FactorNet/dataloader_files/RNAseq_features/{}/meta.txt')
            # rf = RemoteFile(url=url_template.format(cell_line))
            if not os.path.exists(
                    RNAseq_PC_file):  # or not rf.validate(mappability_file):
                # download the path
                download_url(url_template.format(cell_line),
                             os.path.join(output_dir, cell_line), "meta.txt")
                # rf.get_file(RNAseq_PC_file)

        self.meta_feat = pd.read_csv(RNAseq_PC_file, sep="\t",
                                     header=None)[0].values
コード例 #57
0
 def test_lazycache_bad_filename(self):
     linecache.clearcache()
     self.assertEqual(False, linecache.lazycache('', globals()))
     self.assertEqual(False, linecache.lazycache('<foo>', globals()))
コード例 #58
0
 def test_lazycache_already_cached(self):
     linecache.clearcache()
     lines = linecache.getlines(NONEXISTENT_FILENAME, globals())
     self.assertEqual(False,
                      linecache.lazycache(NONEXISTENT_FILENAME, globals()))
     self.assertEqual(4, len(linecache.cache[NONEXISTENT_FILENAME]))
コード例 #59
0
ファイル: MESAoutput1.py プロジェクト: michael-wise/MESAplot
    def __init__(self, aPath):
        ##    ''' Purging routines '''
        def prune():
            print "prune iteration"
            self.model_numbers = self.original_history[:,
                                                       self.history_labels.
                                                       index("model_number")]
            i = len(self.model_numbers) - 1
            while (self.model_numbers[i] > self.model_numbers[i - 1]
                   and i > 0):
                i = i - 1
            # print i
            # Reference line numbers as log output
            print "Model#", self.model_numbers[
                i], "is greater than or equal to model#", self.model_numbers[i
                                                                             -
                                                                             1]
            if i == 0:
                return 0
            Drop2 = i - 1
            # print "Drop2 " + str(Drop2)
            FindDrop1 = self.model_numbers[i]
            i = i - 1
            while self.model_numbers[i] != FindDrop1:
                i = i - 1
            Drop1 = i - 1
            # print "Drop1 " + str(Drop1)
            print "Model Cleaning: Dropping model number #" + str(Drop2)
            piece1 = self.original_history[0:Drop1]
            piece2 = self.original_history[Drop2 + 1:-1]
            self.original_history = np.concatenate([piece1, piece2])
            return 1
##        ''' END Purging routines ''''

        def get_columns_names(filename):
            return linecache.getline(filename, 6).split()

        def get_GlobalInfo_names(filename):
            return linecache.getline(filename, 2).split()

        def get_GlobalInfo_values(filename):
            return linecache.getline(filename, 3).split()

        """Collects the output from a MESA run"""
        print aPath
        ## Checks if load_all_profiles is true or false in default_settings.py, then handles situation appropriately
        self.num_profiles = len(
            np.loadtxt(aPath + os.path.sep + 'profiles.index', skiprows=1))
        if ds.load_all_profiles:
            print "Loading all profiles(" + str(
                self.num_profiles
            ) + ") (load_all_profiles=False in default_settings.py.)"
        else:
            if ds.num_profiles_to_load <= self.num_profiles:
                self.num_profiles = ds.num_profiles_to_load
                print "Loading " + str(
                    self.num_profiles
                ) + " profiles (load_all_profiles=True in default_settings.py.)"
            else:
                print "num_profiles_to_load greater than actual number of profiles, loading all profiles(" + str(
                    self.num_profiles) + ")."

#        profile=np.loadtxt(aPath+os.path.sep+'profiles.index',skiprows=1)

# if os.path.isfile(FM.path_folder + os.path.sep + 'star.log'):
        if os.path.isfile(aPath + os.path.sep + 'star.log'):
            self.history_path = aPath + os.path.sep + 'star.log'
            profileversion = 'log'
        # elif os.path.isfile(FM.path_folder + os.path.sep + 'history.data'):
        elif os.path.isfile(aPath + os.path.sep + 'history.data'):
            self.history_path = aPath + os.path.sep + 'history.data'
            profileversion = 'profile'
        else:
            print 'Non-standard MESA data names. We will eventually support this, so please let us know there\'s a need'
        self.profile_labels = get_columns_names(aPath + os.path.sep +
                                                profileversion + '1.data')
        self.profile_GlobalInfo_names = get_GlobalInfo_names(aPath +
                                                             os.path.sep +
                                                             profileversion +
                                                             '1.data')
        self.history_size = len(np.loadtxt(self.history_path, skiprows=6))
        self.history_labels = get_columns_names(self.history_path)

        self.history_GlobalInfo_names = get_GlobalInfo_names(self.history_path)
        self.star_age_index = self.profile_GlobalInfo_names.index('star_age')
        ##History_star_data = np.loadtxt(history_path,skiprows=6, usecols=(history_labels.index('star_age'),)).tolist()
        History_star_data = np.loadtxt(
            self.history_path,
            skiprows=6,
            usecols=(self.history_labels.index('star_age'), ))
        self.original_history = np.loadtxt(
            self.history_path,
            skiprows=6)  #this is the history file before purging
        original_history_length = len(self.original_history)
        self.model_numbers = self.original_history[:,
                                                   self.history_labels.
                                                   index("model_number")]

        # print type(self.model_numbers)

        #Builds an array called profile with
        #the hystory file as its zero entry and profile_n as its nth entry.

        while prune():
            pass
        final_history_length = len(self.original_history)
        N_deleted_lines = original_history_length - final_history_length

        self.profile = [self.original_history]
        self.profile_GlobalInfo_values = [
            get_GlobalInfo_values(self.history_path)
        ]
        self.profileAge = [0]
        self.listAges = [[0, 0]]
        self.profile_information = [""]
        ## profile_age_in_History is a list of the approximated values of the profile ages as they appear in the history file
        ## the nth position in the list corresponds to the the age (as it appear in the history file) of the nth profile
        ## profile_age_index_in_History is a list of positions in the history file of the profile ages.
        ##the nth element of the list corresponds to the position in history where the age of the profile n appears.

        self.profile_age_in_History = [0]
        self.profile_age_index_in_History = [0]

        profile_size = [self.history_size]
        print 'Imported history file'

        ##Loads the content of the profiles
        for i in range(1, self.num_profiles + 1):
            newPath = aPath + os.path.sep + profileversion + str(i) + '.data'
            self.profile.append(np.loadtxt(newPath, skiprows=6))
            profile_size.append(len(np.loadtxt(newPath, skiprows=6)))
            self.profile_GlobalInfo_values.append(
                get_GlobalInfo_values(newPath))
            self.profileAge.append(
                self.profile_GlobalInfo_values[i][self.star_age_index])
            self.listAges.append([
                float(self.profile_GlobalInfo_values[i][self.star_age_index]),
                i
            ])
            self.profile_age_in_History.append(
                self.find_nearest(History_star_data,
                                  float(self.profileAge[i])))
            self.profile_age_index_in_History.append(
                History_star_data.tolist().index(
                    self.profile_age_in_History[i]))
            self.new_profile_information = ""
            for j in range(len(self.profile_GlobalInfo_names)):
                self.new_profile_information = self.new_profile_information + str(
                    self.profile_GlobalInfo_names[j]) + ":  " + str(
                        self.profile_GlobalInfo_values[i][j]) + "\n"
            self.profile_information.append(self.new_profile_information)
            print 'Imported profile #' + str(i) + ' / ' + str(
                self.num_profiles)

        ##the function sorted_profile_number takes n and returns the number of the nth profie in order of age.
        ##Example, sorted_profile_number(1) returns the profile number of the youngest profile.

        self.max_profile_size = max(profile_size[1:])

        ##  This is the column of the history file corresponding to the star age
        ##History_star_data = profile[0][:,history_labels.index('star_age')].tolist()

        ##History_star_data = np.loadtxt(history_path,skiprows=6, usecols=(history_labels.index('star_age'),)).tolist()
        # print type(History_star_data)
        ##print History_star_data.tolist()
        ##History_star_data = np.loadtxt(history_path,skiprows=6, usecols=(1,))

        ##History_star_data.where(array==item)

        ##Clean the cash
        linecache.clearcache()

        # print 'Downloaded '+ str(self.num_profiles)+ ' profiles + 1 hystory file'
        print 'Original history file has ' + str(
            self.history_size) + ' lines (excluding the header)'
        print 'deleted ' + str(
            N_deleted_lines) + ' lines from the original history file'

        ##        print "HERE"
        ##        print "now "+str(findAnomly(self.profile[0][:,self.history_labels.index("model_number")]))
        ##        print type(self.profile)

        ##print history_labels.index('star_age')
        ##print type(History_star_data)
        ##print History_star_data

        print self.profile_age_in_History[4]
        print self.profile_age_index_in_History[4]
        ##print type(profileAge[3])

        self.star_information = ""
        for i in range(len(self.history_GlobalInfo_names)):
            self.star_information = self.star_information + str(
                self.history_GlobalInfo_names[i]) + ":  " + str(
                    self.profile_GlobalInfo_values[0][i]) + "\n"

        print self.profile[0][0:len(self.profile[0]) - 2,
                              self.history_labels.index("model_number")]
コード例 #60
0
    def parse_variable(self):

        lineNumber = 0
        start_flag = True
        end_flag   = False

        with open(self.var_path, 'r') as f:
            lines = f.readlines()

            for line in lines:
                # print(line)
                if line.startswith('#') and len(line)>1 and start_flag:
                    # print('start line:', lineNumber, line)
                    lineNumber += 1
                    start_flag = False
                    end_flag   = True
                    self.sign_numb_list.append(lineNumber)
                    continue

                elif line.startswith('#') and end_flag:
                    lineNumber += 1
                    if len(line) == 2 or len(line) == 1:
                        start_flag = True
                        end_flag   = False
                        self.sign_numb_list.append(lineNumber)
                        # print('end line:', lineNumber, line)
                    else:
                        raise Exception('Please make sure the component name, DLC, variables defined in #...#!\n'
                                        'Error occurs in line %s' %lineNumber)
                else:
                    lineNumber += 1
                # print(start_flag, end_flag)
            print('line number:', self.sign_numb_list)

        if len(self.sign_numb_list)%2 == 0:

            for j in range(len(self.sign_numb_list)):
                # 清除缓存
                linecache.clearcache()
                if j%2 == 0:
                    start = self.sign_numb_list[j]
                    end   = self.sign_numb_list[j+1]

                    file_name = linecache.getline(self.var_path, start).strip()[1:]
                    comp_name = file_name.split(',')[0]
                    dlc_list  = [lc.strip() for lc in linecache.getline(self.var_path, start+1).strip().split(',')]
                    # print(dlc_list)

                    self.component_list.append(comp_name)
                    self.component_dlcs[comp_name] = dlc_list
                    self.dlc_list += dlc_list
                    self.component_type[comp_name] = [_.strip() for _ in file_name.split(',')[1:]]

                    # hub, gearbox and so on
                    if 'section' not in comp_name.lower():
                        # read variable
                        var_lines = linecache.getlines(self.var_path)[start+1:end-1]

                        vars_list   = []
                        unit_list   = []
                        var_no_unit = []
                        # tower_list  = []
                        # tower_sec   = []

                        for var in var_lines:
                            if "!" not in var:
                                temp = var.strip().split(',')
                                if len(temp)>=2:
                                    vars_list.append(temp[0].strip())
                                    unit_list.append(temp[1].strip())
                                    # if 'tower' in temp[0].lower:
                                    #     tower_list.append(temp[0])
                                    #     tower_sec.append(temp[2])

                                else:
                                    var_no_unit.append(var.strip())
                                    print(var.strip())

                        vars_list.insert(0, 'Time from start of output')
                        unit_list.insert(0, 's')

                        self.component_vars[comp_name] = vars_list
                        self.component_unit[comp_name] = unit_list
                        # self.component_secs[comp_name] = tower_sec

                        if var_no_unit:
                            self.comp_varwounit[comp_name] = var_no_unit
                            raise Exception('Please define unit for the variables:\n%s'
                                            % (','.join([var for k, v in self.comp_varwounit.items() for var in v])))

                    # blade and tower
                    else:
                        # from third line to the last three line
                        var_lines  = linecache.getlines(self.var_path)[start+1:end-2]
                        vars_list = []
                        unit_list = []

                        # the last two line
                        sec_line  = linecache.getlines(self.var_path)[end-2:end-1][0]
                        self.section_comp.append(comp_name)

                        # tower-offshore and onshore
                        if 'tower' in comp_name.lower():
                            # section or mbr
                            sec_mbr  = sec_line.strip().split(',')[0].strip()
                            # tower type: bottom to top or top to bottom, t2b/b2t
                            tr_type  = sec_line.strip().split(',')[1].strip()
                            # section list or mbr list in %25 file
                            sec_list = sec_line.strip().split(',')[2:]
                            if sec_list[0] != 'a':
                                sec_list = [sec.strip() for sec in sec_list]
                            # print(sec_list)
                            self.component_secs[comp_name] = [sec_mbr, tr_type, sec_list]

                        # blade
                        else:
                            sec_list = sec_line.strip().split(',')[1:]

                            # for blade, all or [1,2,...]
                            if sec_list[0] != 'a':
                                self.component_secs[comp_name] = [sec.strip() for sec in sec_list]
                            else:
                                self.component_secs[comp_name] = sec_list

                        var_no_unit = []

                        for var in var_lines:
                            if "!" not in var:
                                temp = var.strip().split(',')
                                if len(temp)>=2:
                                    vars_list.append(temp[0].strip())
                                    unit_list.append(temp[1].strip())
                                    # print(temp[0].strip(), temp[1].strip())
                                else:
                                    var_no_unit.append(var.strip())

                        vars_list.insert(0, 'Time from start of output')
                        unit_list.insert(0, 's')

                        self.component_vars[comp_name] = vars_list
                        self.component_unit[comp_name] = unit_list

                        if var_no_unit:
                            self.comp_varwounit[comp_name] = var_no_unit
                            raise Exception('Please define unit for the variables:\n%s'
                                            %(','.join([var for k,v in self.comp_varwounit.items() for var in v])))

        else:
            raise Exception('Please make sure the variable defined between #!')

        self.dlc_list = list(set(self.dlc_list))
        self.dlc_list.sort()