def count(path): """Count a path""" class _Handler(WalkHandler): def handle_file(self, file_path, context): count_file(file_path, context.data) def handle_dir_pre(self, dir_path, context): context.short_circuit = os.path.basename( dir_path) in config.IGNORED_FOLDERS def check_short_circuit(self, _, context): short_circuit = context.short_circuit context.short_circuit = False return short_circuit walk_context = DynamicObject() walk_context.data = DynamicObject() walk_context.short_circuit = False all_data = DynamicObject() update_data(all_data, config.LEX_TOTAL, 0) update_data(walk_context.data, config.TAG_ALL, all_data) walk(path, _Handler(), context=walk_context) logger.debug(dump_data(walk_context.data)) return walk_context.data
def test_walking_dict_nested_path(): d = {"a": {"b": 1, "c": 2}} mock = Mock() walk(d, mock) assert mock.call_count == 2 mock.assert_has_calls([call("a.b", 1), call("a.c", 2)])
def perform_walk(): for path in get_path_map(): try: with oscar.context(path) as context: walk.walk(context, path) except IOError: oscar.log.error("IOError (share deleted, perhaps)")
def test_walk_dict_keys(): d = {"b": 1, "c": 2} mock = Mock() walk(d, mock) assert mock.call_count == 2 mock.assert_has_calls([call("b", 1), call("c", 2)])
def test_context(): class _Handler(WalkHandler): def handle_dir_pre(self, path, context): context.count = context.count + 1 if context.count else 1 print('%d: %s' % (context.count, path)) walk('..', _Handler(), DynamicObject())
def last_timestep_ondisk(self): if self.log.is_parallel: proc_dir = os.path.join(self.path, "processor0") if not os.path.exists(proc_dir): return 0 r, ds, _ = next(walk(proc_dir)) rems = ["constant", "TDAC"] for rem in rems: if rem in ds: ds.remove(rem) ds = [float(d) for d in ds] if ds: return max(ds) else: return 0 else: ts = [] r, ds, _ = next(walk(self.path)) for t in ds: try: tsf = float(t) ts.append(tsf) except: pass if ts: return max(ts) else: return 0.0
def test_array_keys(): d = {"arr": [1, 2, 3]} mock = Mock() walk(d, mock) assert mock.call_count == 3 mock.assert_has_calls( [call("arr.[0]", 1), call("arr.[1]", 2), call("arr.[2]", 3)])
def masswalk(L, p): ncount = 0 perc = [] while (len(perc) == 0): ncount = ncount + 1 if (ncount > 1000): print("Couldn't make percolation cluster...") mass = 0 break z = rand(L, L) < pc lw, num = measurements.label(z) perc_x = intersect1d(lw[0, :], lw[-1, :]) perc = perc_x[where(perc_x > 0)] if len(perc) > 0: labelList = arange(num + 1) area = measurements.sum(z, lw, index=labelList) areaImg = area[lw] maxArea = area.max() zz = (lw == perc[0]) l, r = walk(zz) zzz = l * r # Find points where both l and r are non-zero zadd = zz + zzz mass = count_nonzero(zzz) return mass
def _Populate_t(self, gender, path, key, callBack): log.LogInfo('ResData: populating from ', path) if gender not in self.genderData: self.CreateGenderData(gender) genderData = self.genderData[gender] entries = genderData.GetEntries(key) pathsToEntries = genderData.GetPathsToEntries(key) path = path.lower() def processAsync(root, respath, dirs, files): self._AddEntry(gender, entries, pathsToEntries, root, respath, dirs, files) try: i = 0 for root, dirs, files in walk.walk(path): if dirs or files: pdCf.BeFrameNice(300) respath = root.split(path)[1] t = uthread.new(processAsync, root, respath, dirs, files) i += 1 self._tasklets[t] = True finally: log.LogInfo('ResData:', i, ' entries populated from ', path) if callBack: uthread.new(callBack)
def CollectCharacterSelectionPaths(): respaths = [] for root, dirs, files in walk.walk('res:/graphics/character/dnafiles/characterselect/'): for f in files: respaths.append(root + '/' + f) return respaths
def main(): data_file = ROOT.TFile.Open("hist/data15_13TeV/hadd.root") ttbar_file = ROOT.TFile.Open("hist/mc15_13TeV.410007.ttbar_allhad/hadd.root") if not os.path.isdir("hist/qcd"): os.makedirs("hist/qcd") outfile = ROOT.TFile.Open("hist/qcd/qcd.root", "recreate") for fullpath, dirnames, objnames, _ in walk.walk(data_file): if not "2tag" in fullpath: continue filepath, dirpath = fullpath.split(":/") rootdir = outfile.mkdir(dirpath) for name in objnames: histpath = os.path.join(dirpath, name) data_hist = data_file.Get(histpath) ttbar_hist = ttbar_file.Get(histpath) qcd_hist = copy.copy(data_hist) ROOT.SetOwnership(qcd_hist, False) if ttbar_hist: qcd_hist.Add(ttbar_hist, -1.0) rootdir.cd() qcd_hist.Write()
def PopulateShaderLibraryFromFiles(): global _isShaderLibraryPopulated global _isShaderLibraryInitialized if _isShaderLibraryInitialized: return _isShaderLibraryInitialized = True def _AddToShaderLibrary(filepath): highLevelShader = blue.resMan.LoadObject(filepath) if highLevelShader is not None: try: shaderManager.shaderLibrary.append(highLevelShader) except Exception: logger.exception('Exception loading High Level Shader: %s', filepath) sys.exc_clear() else: logger.error('Unable to find shader library object: %s', filepath) filesToLoad = set() for path, dirs, files in walk.walk('res:/Graphics/Shaders/ShaderDescriptions'): for f in files: filename, extension = os.path.splitext(f) if extension in ('.red', '.black'): filepath = path + '/' + filename + '.red' filesToLoad.add(filepath) uthread2.map(_AddToShaderLibrary, filesToLoad) _isShaderLibraryPopulated = True
def _Populate_t(self, gender, path, key, callBack): log.LogInfo('ResData: populating from ', path) if gender not in self.genderData: self.CreateGenderData(gender) genderData = self.genderData[gender] entries = genderData.GetEntries(key) pathsToEntries = genderData.GetPathsToEntries(key) path = path.lower() def processAsync(root, respath, dirs, files): self._AddEntry(gender, entries, pathsToEntries, root, respath, dirs, files) try: i = 0 for root, dirs, files in walk.walk(path): if dirs or files: pdCf.BeFrameNice(300) respath = root.split(path)[1] t = uthread.new(processAsync, root, respath, dirs, files) i += 1 self._tasklets[t] = True finally: log.LogNotice('ResData:', i, ' entries populated from ', path) if callBack: uthread.new(callBack)
def __init__(self, name=None, commit=None, *args, **kwargs): """ Construct a blueprint. Extra arguments are used to create a `dict` which is then sent through the `blueprint`(5) algorithm to be injested into this `Blueprint` object with the proper types. (The structure makes heavy use of `defaultdict` and `set`). """ self.name = name self._commit = commit def file(pathname, f): self.add_file(pathname, **f) def package(manager, package, version): self.add_package(manager, package, version) def service(manager, service): self.add_service(manager, service) def service_file(manager, service, pathname): self.add_service_file(manager, service, pathname) def service_package(manager, service, package_manager, package): self.add_service_package(manager, service, package_manager, package) def service_source(manager, service, dirname): self.add_service_source(manager, service, dirname) def source(dirname, filename, gen_content, url): if url is not None: self.add_source(dirname, url) elif gen_content is not None: self.add_source(dirname, filename) walk.walk(dict(*args, **kwargs), file=file, package=package, service=service, service_file=service_file, service_package=service_package, service_source=service_source, source=source)
def build_comments(url): if url[-1] != '/': url += '/' page = requests.get(url) content_type = page.headers['Content-Type'] if 'application/rss' in content_type: from rss import rss return rss(page) return walk(page)
def gather_files_to_prefetch(folder, file_set): """ Gathers files to prefetch from the given folder. Files that do not exist locally are added to the file_set. """ for path, dirs, files in walk.walk(folder): for f in files: filename = path + '/' + f if not blue.paths.FileExistsLocally(filename): file_set.add(filename)
def find_logs(self, log_format): """ returns a list of filenames and ctimes """ # print(self.path) r, d, files = next(walk(self.path)) # TODO use regex to find logs files = list(filter(lambda x: log_format in x, files)) files = [os.path.join(r, f) for f in files] ctimes = [os.path.getctime(os.path.join(self.path, f)) for f in files] # print(self.path, files) return list(zip(ctimes, files))
def visit_assoc(self, query): if (len(query) > 2): self.visit(query[2]) assoc = query[1] self.parent_key = assoc # TODO: investigate if eager computation here is necessary self.parent_iter = iter(list(leaf_it(self.root))) self.iter = walk(self.root) self._visit_driver_assoc(self.iter, assoc, self.driver_assoc) self.iter = iter([self.root])
def test(): from walk import walk from numpy.random import multivariate_normal, seed from numpy import vstack, ones, eye seed(2) # Remove uncertainty on tests # Set a number of good and bad chains Ngood,Nbad = 25,2 # Make chains mean-reverting chains with widely separated values for # bad and good; put bad chains first. chains = walk(1000, mu=[1]*Nbad+[5]*Ngood, sigma=0.45, alpha=0.1) # Check IQR and Grubbs assert (identify_outliers('IQR',chains,None) == range(Nbad)).all() assert (identify_outliers('Grubbs',chains,None) == range(Nbad)).all() # Put points for 'bad' chains at [-1,...,-1] and 'good' chains at [1,...,1] x = vstack( (multivariate_normal(-ones(4),.1*eye(4),size=Nbad), multivariate_normal(ones(4),.1*eye(4),size=Ngood)) ) assert identify_outliers('Mahal',chains,x)[0] in range(Nbad) # Put points for _all_ chains at [1,...,1] and check that mahal return [] xsame = multivariate_normal(ones(4),.2*eye(4),size=Ngood+Nbad) assert identify_outliers('Mahal',chains,xsame) == [] # Check again with large variance x = vstack( (multivariate_normal(-3*ones(4),eye(4),size=Nbad), multivariate_normal(ones(4),10*eye(4),size=Ngood)) ) assert identify_outliers('Mahal',chains,x) == [] # ===================================================================== # Test replacement # Construct a state object from numpy.linalg import norm from state import MCMCDraw Ngen, Npop = chains.shape Npop, Nvar = x.shape state = MCMCDraw(Ngen=Ngen, Nthin=Ngen, Nupdate=0, Nvar=Nvar, Npop=Npop, Ncr=0, thin_rate=0) # Fill it with chains for i in range(Ngen): state._generation(new_draws=Npop, x=x, logp=chains[i], accept=Npop) # Make a copy of the current state so we can check it was updated nx, nlogp = x+0,chains[-1]+0 # Remove outliers remove_outliers(state, nx, nlogp, test='IQR', portion=0.5) # Check that the outliers were removed outliers = state.outliers() assert outliers.shape[0] == Nbad for i in range(Nbad): assert nlogp[outliers[i,1]] == chains[-1][outliers[i,2]] assert norm(nx[outliers[i,1],:] - x[outliers[i,2],:]) == 0
def load_configmap(pathstr): path = Path(pathstr) map_contents = {} def configmap_callback(path): nonlocal map_contents # Canonicalize the path if path.is_symlink(): try: path = path.resolve(strict=True) except: # We silently skip unresolveable symlinks return # Skip directories if path.is_dir(): return with path.open() as f: map_contents[path.name] = f.read() walk(path, callback=configmap_callback) return map_contents
def get_inputdata(token_id, filename, number_of_walk=5, max_block=10): unk = max(token_id.values()) + 1 f = open(filename, "rb") funcname, g1, g2, dic = pickle.load(f) f.close() inputseq_1 = [] inputseq_2 = [] seqs = walk(dic, g1, num=number_of_walk, maxlen=max_block) for ins_seq in seqs: seq = [] for instructions in ins_seq: operator, operand1, operand2, instr = get_opts(instructions) if operator != None: seq.append(operator) if operand1 != None: seq.append(operand1) if operand2 != None: seq.append(operand2) token_seq = seq_to_token(token_id, seq, unk) inputseq_1.append(token_seq) seqs = walk(dic, g2, num=number_of_walk, maxlen=max_block) for ins_seq in seqs: seq = [] for instructions in ins_seq: operator, operand1, operand2, instr = get_opts(instructions) if operator != None: seq.append(operator) if operand1 != None: seq.append(operand1) if operand2 != None: seq.append(operand2) token_seq = seq_to_token(token_id, seq, unk) inputseq_2.append(token_seq) return inputseq_1, inputseq_2, funcname
def gather_files_conditionally_to_prefetch(folder, condition, file_set, dependency_map): """ Gathers files to prefetch from the given folder. Files that do not exist locally are added to the file_set. """ for path, dirs, files in walk.walk(folder): for f in files: if not condition(f): continue filename = path + '/' + f add_file_if_needs_download(file_set, filename) dependencies = dependency_map.get(filename, []) for each in dependencies: add_file_if_needs_download(filename, each)
def gather_files_conditionally_to_prefetch(folder, condition, file_set, dependency_map): for path, dirs, files in walk.walk(folder): for f in files: blue.pyos.BeNice() basename, extension = os.path.splitext(f) if extension == '.red': f = basename + '.black' if not condition(f): continue filename = path + '/' + f add_file_if_needs_download(file_set, filename) dependencies = dependency_map.get(filename, []) for each in dependencies: blue.pyos.BeNice() add_file_if_needs_download(filename, each)
def code(wagofile, **kwargs): ''' >>> tests = yaml.load(open('test/code.yaml').read()) >>> for t in tests: ... actual = code(t['wagofile']) ... # print (actual) ... # print (t['expected']) ... assert actual == t['expected'] ''' com = Community() filepath = wagopath2filepath(wagofile) baby = yaml.load(open(filepath).read()) walk(baby, com.bring_up) writer = CodeWriter() walk(baby, writer.write) if 'verbose' in kwargs and kwargs['verbose']: print (yaml.dump(baby)) print (writer.result) if 'output_file' in kwargs and kwargs['output_file']: with open(kwargs['output_file'], 'w') as fp: print (writer.result, file=fp) print ('WRITE: %s' % kwargs['output_file']) return baby
def PopulateShaderLibrary(): for path, dirs, files in walk.walk('res:/Graphics/Shaders/ShaderDescriptions'): for f in files: filepath = path + '/' + f if filepath.endswith('.red') or filepath.endswith('.black'): highLevelShader = Load(filepath) if highLevelShader is not None: try: shaderManager.shaderLibrary.append(highLevelShader) except blue.error as e: log.general.Log('Exception loading High Level Shader: %s' % filepath, log.LGERR) log.LogException() sys.exc_clear() else: log.general.Log('Unable to find shader library object: %s' % filepath, log.LGERR)
def P_sc(produce_data=False): pc = 0.59275 nsamp = 100 p_vals = np.linspace(pc, 0.7, 10) pmpc = abs(p_vals - pc) L_vals = [2**k for k in range(6, 10)] if produce_data: P_vals = np.zeros((len(p_vals), len(L_vals))) for i, p in tqdm(enumerate(p_vals)): for j, L in enumerate(L_vals): M_sc = 0 for k in range(nsamp): perc = generate_percolating_cluster(L, p) left, right = walk(perc) bonds = np.array(left * right, dtype=bool) M_sc += np.sum(bonds) P_vals[i, j] = M_sc / nsamp / L**2 np.save("data/singly_connected/P_SC.npy", P_vals) else: P_vals = np.load("data/singly_connected/P_SC.npy") for i, L in enumerate(L_vals): plt.plot(pmpc, P_vals[:, i], label=f"L={L}") plt.xlabel(r"$p-p_c$") plt.ylabel(r"$P_{SC}$") plt.legend() plt.show() print(np.log10(P_vals[:, -1])) print(pmpc) print(np.log10(pmpc)) a, b = np.polyfit(np.log10(pmpc[1:]), np.log10(P_vals[1:, -1]), deg=1) print(f"Exponent x={a}") plt.plot(np.log10(pmpc), np.log10(P_vals[:, -1]), 'ro', label='data') plt.plot(np.log10(pmpc), np.log10(10**b * pmpc**a), 'b--', label=r'fit of $P_{SC} \propto |p-p_c|^{%.4f}$' % a) plt.xlabel(r"$\log_{10}(p-p_c)$") plt.ylabel(r"$\log_{10}(P_{SC})$") plt.legend() plt.show()
def Preload_t(self, rootFolder, extensions = None, yamlFilter = None): yamlFiles = [] extensions = extensions or ['.yaml'] for root, dirs, files in walk.walk(rootFolder): for fileName in files: fileNameLower = fileName.lower() if fileNameLower.endswith(extensions): path = '{0}/{1}'.format(root.lower(), fileNameLower) if path not in self.cache: yamlFiles.append(path) BeFrameNice() for i in xrange(len(yamlFiles)): path = yamlFiles[i] t = uthread.new(self._ReadAndAddToCache, path, yamlFilter) self._tasklets[t] = True if rootFolder: log.LogInfo('YamlPreloader:', len(yamlFiles), 'yaml files preloaded from', rootFolder)
def Preload_t(self, rootFolder, extensions=None, yamlFilter=None): yamlFiles = [] extensions = extensions or ['.yaml'] for root, dirs, files in walk.walk(rootFolder): for fileName in files: fileNameLower = fileName.lower() if fileNameLower.endswith(extensions): path = '{0}/{1}'.format(root.lower(), fileNameLower) if path not in self.cache: yamlFiles.append(path) BeFrameNice() for i in xrange(len(yamlFiles)): path = yamlFiles[i] t = uthread.new(self._ReadAndAddToCache, path, yamlFilter) self._tasklets[t] = True if rootFolder: log.LogInfo('YamlPreloader:', len(yamlFiles), 'yaml files preloaded from', rootFolder)
def listdir(dir_path,filters=(fs_filters.no_hidden,fs_filters.no_system),full_path=False,recursed=False,followlinks=True,base=None): with_base = os.path.join(base or "",dir_path) prefix = len(dir_path) if dir_path[-1] != "/": prefix += 1 r = [] if recursed: r = [] for top,dirs,nondirs in walk(dir_path,use_nlink = followlinks and 2 or 1,filters=filters,base=base): r.extend([(top[prefix:],nd) for nd in nondirs]) else: def check(name): #TODO consider (base, dir_path,name, ...) return fs_filters.check_filters(dir_path, "", name, os.lstat(os.path.join(base or "",dir_path,name)),filters) r = [("",name) for name in os.listdir(with_base) if check(name)] if full_path: return [os.path.join(dir_path,rel,name) for rel,name in r] return [os.path.join(rel,name) for rel,name in r]
def PopulateShaderLibrary(): for path, dirs, files in walk.walk( 'res:/Graphics/Shaders/ShaderDescriptions'): for f in files: filepath = path + '/' + f if filepath.endswith('.red') or filepath.endswith('.black'): highLevelShader = Load(filepath) if highLevelShader is not None: try: shaderManager.shaderLibrary.append(highLevelShader) except blue.error as e: log.general.Log( 'Exception loading High Level Shader: %s' % filepath, log.LGERR) log.LogException() sys.exc_clear() else: log.general.Log( 'Unable to find shader library object: %s' % filepath, log.LGERR)
def SetupNebulas(self): res = walk.walk(NEBULA_RES_PATH) for dirpath, dirnames, filenames in res: for filename in filenames: if not filename.lower().endswith(('_blur.dds', '_refl.dds')): if '.dds' in filename.lower(): resPath = os.path.join(dirpath, filename).replace('\\', '/') self.nebulaPaths.append(str(resPath.lower())) scene = sm.GetService('sceneManager').GetActiveScene() for i, resource in enumerate(scene.backgroundEffect.resources): if resource.name == 'NebulaMap': self.currentNebulaPath = resource.resourcePath.replace( '\\', '/') self.sceneResourceIndex = i break if self.currentNebulaPath is not None: self.currentNebulaIndex = self.nebulaPaths.index( self.currentNebulaPath)
def M_SC_one_sample(p, L): binary_matrix = np.random.uniform(size=(L, L)) < p label_matrix, num_clusters = measurements.label(binary_matrix) intersect_labels = np.intersect1d(label_matrix[0, :], label_matrix[-1, :]) percolating_cluster = intersect_labels.max() if percolating_cluster == 0: label_matrix = label_matrix.T intersect_labels = np.intersect1d(label_matrix[0, :], label_matrix[-1, :]) percolating_cluster = intersect_labels.max() if percolating_cluster == 0: return 0 percolating_matrix = label_matrix == percolating_cluster left, right = walk(percolating_matrix) return np.count_nonzero(np.logical_and(left, right))
def CreateEntries(path): """ This method walks the file and folder structure in path and generates entries of valid modifiers that can be used in the PaperDoll system. """ entries = {} for root, dirs, files in walk.walk(path): if len(root) > len(path): key = str(root[len(path) + 1:]).lower() key = key.replace('\\', '/') layer = key.split('/')[0] for every in iter(files): every = str(every) if MODIFIERNAMEFILE in every: f = blue.ResFile() f.Open(root + '/' + every) data = f.read() f.Close() data = data.lower() modifierNames = data.split('\r\n') for modifierName in iter(modifierNames): entryKey = key + '/' + modifierName entries[entryKey] = [modifierName] if '/' not in key: continue if layer not in DOLL_PARTS + DOLL_EXTRA_PARTS + HEAD_CATEGORIES + BODY_CATEGORIES + HAIR_CATEGORIES + ACCESSORIES_CATEGORIES: continue entries[key] = [] for every in iter(files): if every not in ('_', MODIFIERNAMEFILE): try: entries[key].append(str(root + '/' + every)) except: entries[key].append(str(root.replace('resTest', 'res') + '/' + every)) if not entries[key]: del entries[key] return entries
def plot_geometry(perc): left, right = walk(perc) singly_connected = left * right backbone = left + right plt.figure() plt.imshow(perc) plt.figure() plt.imshow(singly_connected) plt.figure() plt.imshow(backbone) plt.figure() plt.imshow(right) plt.figure() plt.imshow(left) plt.show()
def CreateEntries(path): entries = {} for root, dirs, files in walk.walk(path): if len(root) > len(path): key = str(root[len(path) + 1:]).lower() key = key.replace('\\', '/') layer = key.split('/')[0] for every in iter(files): every = str(every) if MODIFIERNAMEFILE in every: f = blue.ResFile() f.Open(root + '/' + every) data = f.read() f.Close() data = data.lower() modifierNames = data.split('\r\n') for modifierName in iter(modifierNames): entryKey = key + '/' + modifierName entries[entryKey] = [modifierName] if '/' not in key: continue if layer not in DOLL_PARTS + DOLL_EXTRA_PARTS + HEAD_CATEGORIES + BODY_CATEGORIES + HAIR_CATEGORIES + ACCESSORIES_CATEGORIES: continue entries[key] = [] for every in iter(files): if every not in ('_', MODIFIERNAMEFILE): try: entries[key].append(str(root + '/' + every)) except: entries[key].append(str(root.replace('resTest', 'res') + '/' + every)) if not entries[key]: del entries[key] return entries
def estimate_Dsc(produce_data=False): pc = 0.59275 nsamp = 100 if produce_data: L_vals = [2**k for k in range(4, 11)] M_vals = np.zeros(len(L_vals)) for i, L in tqdm(enumerate(L_vals)): for n in tqdm(range(nsamp)): perc = generate_percolating_cluster(L, pc) left, right = walk(perc) bonds = np.array(left * right, dtype=bool) M_cs = np.sum(bonds) M_vals[i] += M_cs M_vals /= nsamp np.save("data/singly_connected/M_SC.npy", np.vstack((L_vals, M_vals))) else: data = np.load("data/singly_connected/M_SC.npy") L_vals = data[0] M_vals = data[1] a, b = np.polyfit(np.log10(L_vals), np.log10(M_vals), deg=1) print(f"D_CS = {a}") plt.loglog(L_vals, M_vals, 'ro', label='data') plt.loglog(L_vals, 10**b * L_vals**a, 'b--', label=r'fit of $M_{SC} \propto L^{%.4f}$' % a) plt.xlabel(r"$L$") plt.ylabel(r"$M_{SC}$") plt.legend() plt.show()
def test_apply_typemissmatch(): with pytest.raises(AssertionError): walk(1, lambda p, c: c) class MyList(list): pass try: walk(MyList(), lambda p, c: c) except AssertionError: raise AssertionError("Should work for list ancestors") class MyDict(dict): pass try: walk(MyDict(), lambda p, c: c) except AssertionError: raise AssertionError("Should work for list ancestors")
def walk(self, **kwargs): import walk walk.walk(self, **kwargs)
if blue.win32.IsTransgaming(): blue.SetCrashKeyValues(u'OS', u'Mac') else: import ctypes try: wine = ctypes.windll.ntdll.wine_get_version blue.SetCrashKeyValues(u'OS', u'Linux') except AttributeError: blue.SetCrashKeyValues(u'OS', u'Win') except RuntimeError: pass if not blue.pyos.packaged: import walk for root, subdirs, files in walk.walk('resBin:/python'): for f in files: if f.endswith('.zip'): sys.path.append(blue.paths.ResolvePath('/'.join([root, f]))) logdestination = prefs.ini.GetValue('networkLogging', '') if logdestination: networklogport = prefs.ini.GetValue('networkLoggingPort', 12201) networklogThreshold = prefs.ini.GetValue('networkLoggingThreshold', 1) blue.EnableNetworkLogging(logdestination, networklogport, boot.role, networklogThreshold) fileLoggingDirectory = prefs.ini.GetValue('fileLogDirectory', '') if fileLoggingDirectory: if not hasattr(blue, 'EnableFileLogging'): print 'File Logging configured but not supported' else: fileLoggingDirectory = os.path.normpath(fileLoggingDirectory)
def testRmDir(self): walk.walk(self.base_dir) delete.delete(self.base_dir, "総務省")
def testWalk(self): walk.walk(self.base_dir) shutil.rmtree("%s/総務省" % self.base_dir) walk.walk(self.base_dir)
def gather_files_to_prefetch(folder, file_set): for path, dirs, files in walk.walk(folder): for f in files: filename = path + '/' + f if not blue.paths.FileExistsLocally(filename): file_set.add(filename)
def main(): qcd = ROOT.TFile.Open("hist/qcd/qcd.root") for fullpath, dirnames, objnames, _ in walk.walk(qcd): filepath, dirpath = fullpath.split(":/") if not "2tag" in fullpath: continue if dirpath not in ["sb_2tag77", "cr_2tag77", "sr_2tag77"]: continue for name in objnames: histpath = os.path.join(dirpath, name) qcd_nomin = qcd.Get(histpath) qcd_tight = qcd.Get(histpath.replace("_2tag77", "_2tag77_4tag97")) qcd_loose = qcd.Get(histpath.replace("_2tag77", "_2tag77_N4tag97")) if isinstance(qcd_nomin, ROOT.TH2): continue qcd_nomin.Scale(mu_qcd_4b_nomin) qcd_tight.Scale(mu_qcd_4b_tight) qcd_loose.Scale(mu_qcd_4b_loose) if "m_JJ" in histpath: bins = qcd_nomin.GetNbinsX()+1 error = ROOT.Double(0) print print " nomin: %9.3f (%.3f)" % (qcd_nomin.IntegralAndError(0, bins, error), error) print " tight: %9.3f (%.3f)" % (qcd_tight.IntegralAndError(0, bins, error), error) print " loose: %9.3f (%.3f)" % (qcd_loose.IntegralAndError(0, bins, error), error) print for hist in [qcd_nomin, qcd_tight, qcd_loose]: hist.Rebin(rebin(name)) helpers.show_overflow(hist) hist.SetMinimum(0.0) hist.SetMaximum(ymax(histpath)) hist.GetXaxis().SetTitle(xtitle(name)) hist.GetYaxis().SetTitle("Events") hist.SetLineWidth(3) hist.SetLabelSize(0.05, "xyz") hist.SetTitleSize(0.05, "xyz") hist.GetXaxis().SetNdivisions(505) for bin in xrange(0, qcd_nomin.GetNbinsX()+1): qcd_nomin.SetBinError(bin, 0.00001) qcd_nomin.SetLineColor(ROOT.kBlack) qcd_tight.SetLineColor(ROOT.kBlue) qcd_loose.SetLineColor(ROOT.kRed) canvas = ROOT.TCanvas(name, name, 800, 800) canvas.Draw() for hist in [qcd_nomin, qcd_tight, qcd_loose]: hist.Draw("histesame" if hist!=qcd_nomin else "histsame") if True: ratio = helpers.ratio(name = canvas.GetName()+"_ratio", numers = [qcd_tight, qcd_loose], denom = qcd_nomin, min = 0.1, max = 1.9, ytitle = "varied / nom." ) share = helpers.same_xaxis(name = canvas.GetName()+"_share", top_canvas = canvas, bottom_canvas = ratio, ) canvas.SetName(canvas.GetName()+"_noratio") share.SetName(share.GetName().replace("_share", "")) canvas = share ks = qcd_tight.KolmogorovTest(qcd_loose) xatlas, yatlas = 0.19, 0.96 xleg, yleg = 0.68, 0.84 atlas = ROOT.TLatex(xatlas, yatlas, "ATLAS Internal") kolmo = ROOT.TLatex(xatlas+0.4, yatlas, "KS(tight, loose): %.3f" % ks) legend_nomin = ROOT.TLatex(xleg, yleg, "nominal") legend_tight = ROOT.TLatex(xleg, yleg-0.05, "tight 2-tag") legend_loose = ROOT.TLatex(xleg, yleg-0.10, "loose 2-tag") legend_nomin.SetTextColor(ROOT.kBlack) legend_tight.SetTextColor(ROOT.kBlue) legend_loose.SetTextColor(ROOT.kRed) for logo in [atlas, kolmo, legend_nomin, legend_tight, legend_loose]: logo.SetTextSize(0.035 if logo in [atlas, kolmo] else 0.04) logo.SetTextFont(42) logo.SetNDC() logo.Draw() outdir = os.path.join("plot", dirpath) if not os.path.isdir(outdir): os.makedirs(outdir) canvas.SaveAs(os.path.join(outdir, canvas.GetName()+".pdf")) canvas.Close()
def testSearch(self): walk.walk(self.base_dir) update.update(self.base_dir, concurrency=2) print search.search(self.base_dir, "総務省/subdir", "xls")
from walk import walk, pprint from operator import itemgetter from itertools import groupby nested_ls = [1,[2,[3,[[[4,5],6]]]],7] pprint(nested_ls) d = dict.fromkeys(range(6), 0) levels = (lvl for obj, lvl in walk(nested_ls)) for lvl, grp in groupby(walk(nested_ls), key=itemgetter(1)): print lvl, list(grp)
def walk(self, **kwargs): walk.walk(self, **kwargs)
def find_cases(self): while self.running: # TODO store modification dates and traverse only # on updated folder top = self.path c = Case(top) subfold = top.split("/")[-1] if c.is_valid: exists = False for existing in self.cases[subfold]: if c.path == existing.path: exists = True if not exists: self.cases[subfold].append(c) root_mdate = False # rescan only if not scanned before or folder has changed if root_mdate and os.path.getmtime(top) <= root_mdate: # wait 10 seconds for i in range(10): if not self.running: return time.sleep(10) continue root_mdate = os.path.getmtime(top) for r, dirs, _ in walk(self.path): ignore = [ "boundaryData", "uniform", "processor", "constant", "TDAC", "lagrangian", "postProcessing", "dynamicCode", "system" ] for d in deepcopy(dirs): for i in ignore: if d.startswith(i): dirs.remove(d) full_path = os.path.join(r, d) last_mdate = self.mdates.get(full_path) if last_mdate and os.path.getmtime( full_path) <= last_mdate: dirs.remove(d) for d in dirs: try: c = Case(os.path.join(r, d)) subfold = r.split("/")[-1] if c.is_valid: exists = False for existing in self.cases[subfold]: if c.path == existing.path: exists = True if not exists: full_path = os.path.join(r, d) self.mdates[full_path] = os.path.getmtime( full_path) self.cases[subfold].append(c) except Exception as e: print("innner", e, r, d) pass for i in range(10): if not self.running: return time.sleep(1)
ncount = ncount + 1 if (ncount >100): print "Couldn't make percolation cluster..." break z=rand(lx,ly)<p lw,num = measurements.label(z) perc_x = intersect1d(lw[0,:],lw[-1,:]) perc = perc_x[where(perc_x > 0)] print ncount if len(perc) > 0: zz = (lw == perc[0]) # zz now contains the spanning cluster figure() imshow(zz, interpolation='nearest', origin='upper', cmap=colormap) # Display spanning cluster savefig("results/1m/exwalk-" + idString + ".pdf", dpi=300) #show() #% Run walk on this cluster l,r = walk(zz) figure() imshow(l, interpolation='nearest', origin='upper', cmap=colormap) savefig("results/1m/leftwalk-" + idString + ".pdf", dpi=300) figure() imshow(r, interpolation='nearest', origin='upper', cmap=colormap) savefig("results/1m/rightwalk-" + idString + ".pdf", dpi=300) zzz = (l*r > 0) # Find points where both l and r are non-zero figure() imshow(zzz * 2 + zz, interpolation='nearest', origin='upper', cmap=colormap) savefig("results/1m/bothwalk-" + idString + ".pdf", dpi=300) show()