def _messages_at_path(self, path): messages_path = os.path.join(path, "Messages") messages = [] # logging.debug("looking for messages in %s", messages_path) if os.path.exists(messages_path): if os.path.isdir(messages_path): for dirent in os.scandir(messages_path): if dirent.is_file(): (name, ext) = os.path.splitext(dirent.name) if ext == ".emlx": (msgid, partial) = os.path.splitext(name) msg = AMMessageRef(self, msgid, partial=(len(partial) != 0)) messages.append(msg) # logging.debug("FOUND MESSAGE: %s", msg) else: logging.debug("%s: not a directory; not considering for messages", messages_path) # Scan for tries and get their messages for dirent in os.scandir(path): if len(dirent.name) == 1 and dirent.name[0] in "0123456789": # logging.debug(" inspecting trie %s", dirent.name) trie_branch = os.path.join(path, dirent.name) messages.extend(self._messages_at_path(trie_branch)) # logging.debug("found %d messages at %s", len(messages), messages_path) return messages
def move_defective_images(): try: os.makedirs('ptrials/trial_1506650000/color/') except OSError: pass for trial in os.scandir('ptrials'): if trial.is_dir() and trial.name.startswith('trial') and not trial.name in 'trial_1506650000': # print(trial.name) for img in os.scandir(trial.path+('/label')): try: if img.name.endswith('.png'): label = cv2.imread(img.path) if label is None: color = img.path.replace('label','color') if os.path.exists(color): path = 'ptrials/trial_1506650000/color/'+trial.name+img.name shutil.copyfile(color, path) print('moving image to',path) cnt +=1 try: os.remove(img.path) os.remove(color) except OSError: pass except cv2.error as e: pass print('Total number of corrupted images',cnt)
def reload_device_types(path): global ignore_list, device_types device_types = {} errors = [] if not os.path.isdir(path): return errors if path not in sys.path: sys.path.append(path) for subdir in os.scandir(path): if subdir.is_dir() and subdir.name not in ignore_list: # first, remove pycache folders for entry in os.scandir(os.path.join(path, subdir.name)): if entry.is_dir() and entry.name == '__pycache__': shutil.rmtree(os.path.join(path, subdir.name, entry.name)) # then, load modules for sources in os.scandir(os.path.join(path, subdir.name)): if sources.is_file() and sources.name not in ignore_list and sources.name.endswith('.py'): try: modpath = os.path.join(path, subdir.name, sources.name) modname = 'devices.' + subdir.name + '.' + sources.name[:-3] # remove ".py" from name spec = importlib.util.spec_from_file_location(modname, modpath) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) sys.modules[modname] = module for name, cls in inspect.getmembers(module, inspect.isclass): if Device in inspect.getmro(cls) and not inspect.isabstract(cls): device_types[name] = cls except ImportError as e: logging.getLogger("system").error("%s when importing device file %s: %s" % (e.__class__.__name__, modpath, str(e))) except Exception as e: errors.append("%s when importing device file %s: %s" % (e.__class__.__name__, modpath, str(e))) post_mortem() return errors
def main(argv=sys.argv[1:]): parser = argparse.ArgumentParser() parser.add_argument('-d', '--directory') args = parser.parse_args(argv) tokenize = Tokenizer( functools.partial(strip_chr, '.'), functools.partial(strip_chr, ','), functools.partial(strip_chr, ':'), functools.partial(strip_chr, ';'), BracketReplace(), clean_word, ) words = set() for entry in os.scandir(args.directory): if entry.is_file(): with open(entry.path, 'rb') as fp: txt = fp.read().decode() words.update(tokenize(txt)) words = sorted(words, key=lambda word: len(word), reverse=True) records = [] for entry in os.scandir(args.directory): if entry.is_file(): with open(entry.path, 'rb') as fp: buf = fp.read().decode() records.append([buf.count(word) for word in words]) for rec in records: print(rec)
def count_kept_reads(keep_dir, out_dir): """Count the kept reads""" jobfile = tmp.NamedTemporaryFile(delete=False, mode='wt') base_mode_dir = os.path.join(out_dir, 'mode') for index_dir in os.scandir(keep_dir): index_name = os.path.basename(index_dir) mode_dir = os.path.join(base_mode_dir, index_name) if not os.path.isdir(mode_dir): os.makedirs(mode_dir) for kept in os.scandir(index_dir): out_file = os.path.join(mode_dir, os.path.basename(kept)) if not os.path.isfile(out_file): jobfile.write("grep -ce '^>' {} > {}\n".format( kept.path, out_file)) jobfile.close() run_job_file( jobfile=jobfile.name, msg='Counting taken seqs', num_concurrent=16) return base_mode_dir
def reread(self): sys_module_path = conf.get('modules_path') user_module_path = os.path.join(Dirs.app_data_dir(), 'modules') os.makedirs(sys_module_path, exist_ok=True) os.makedirs(user_module_path, exist_ok=True) self._modules = [ ] try: for entry in os.scandir(sys_module_path): if entry.name == 'base': continue if entry.is_dir() is False: continue self._read_module(entry.path, True) for entry in os.scandir(user_module_path): if entry.name == 'base': continue if entry.is_dir() is False: continue self._read_module(entry.path, False) self._read = True except Exception as e: log.exception(e)
def keys(self): files = list(os.scandir(self.location)) while files: cur = files.pop(0) if cur.is_file(): yield ''.join(cur.path.split('/')[-2:]) else: files.extend(list(os.scandir(cur.path)))
def __iter__(self): files = list(os.scandir(self.dir_loc)) while files: cur = files.pop(0) if cur.is_file(): yield "".join(cur.path.split("/")[-2:]) else: files.extend(list(os.scandir(cur.path)))
def clear_non_color_label_folders(): for trial in os.scandir('ptrials'): if trial.is_dir() and trial.name.startswith('trial'): for folder in os.scandir(trial.path): if folder.is_dir() and not ('color' in folder.name or 'label' in folder.name): try: print('removing', folder.path) shutil.rmtree(folder.path) except OSError: pass
def test_mkdir_does_make_dirs(): with tempfile.TemporaryDirectory() as test_dir: end_dir = os.path.join(test_dir, 'dir_in_the_middle', 'leaf_dir') mkdir(end_dir) middle_dirs = list(os.scandir(test_dir)) assertDirIsUniqueAndNamed(middle_dirs, 'dir_in_the_middle') leaf_dirs = list(os.scandir(middle_dirs[0].path)) assertDirIsUniqueAndNamed(leaf_dirs, 'leaf_dir')
def make_triplets(): register_path = '../data/facerecog/ProcessedNamedFaces/' anchors = {} positives = {} negatives = {} for entry in os.scandir(register_path): if entry.is_dir() and entry.name != 'General': name = entry.name addedAnchor = False addedPos =False for img in os.scandir(entry.path): if not img.name.startswith('.') : if not addedAnchor: anchors[name] = img.path addedAnchor = True elif not addedPos: positives[name] = img.path addedPos = True prev = None First = None for k,v in anchors.items(): if prev == None: First = k if prev != None: negatives[k] = anchors[prev] prev = k negatives[First] = anchors[k] X_anchor = [] X_positive =[] X_negative =[] y = [] # Create the Input vectors for name, image in anchors.items(): anchor = cv2.imread(image,1) positive = cv2.imread(positives[name],1) negative = cv2.imread(negatives[name],1) X_anchor.append(anchor) X_positive.append(positive) X_negative.append(negative) X_anchor = np.asarray(X_anchor) X_positive = np.asarray(X_positive) X_negative = np.asarray(X_negative) y = np.ones((11,3,128)) print(y[:,0].shape) print(X_anchor.shape) return X_anchor, X_positive, X_negative, y
def main(args): files_in_first_dir = sorted(os.scandir(args.input_comparison_dirs[0]), key=lambda t: t.name) files_in_second_dir = sorted(os.scandir(args.input_comparison_dirs[1]), key=lambda t: t.name) sift = cv2.xfeatures2d.SURF_create() bf_matcher = cv2.BFMatcher() for file_index in range(len(files_in_first_dir)): img1 = cv2.imread(files_in_first_dir[file_index].path) img2 = cv2.imread(files_in_second_dir[file_index].path) do_feature_matching(img1, img2, sift, bf_matcher)
def getInstalledLanguagePacks(): translation_folder = settings().getBaseFolder("translations") if not os.path.exists(translation_folder): return jsonify(language_packs=dict(_core=[])) core_packs = [] plugin_packs = defaultdict(lambda: dict(identifier=None, display=None, languages=[])) for entry in scandir(translation_folder): if not entry.is_dir(): continue def load_meta(path, locale): meta = dict() meta_path = os.path.join(path, "meta.yaml") if os.path.isfile(meta_path): import yaml try: with open(meta_path) as f: meta = yaml.safe_load(f) except: pass else: import datetime if "last_update" in meta and isinstance(meta["last_update"], datetime.datetime): meta["last_update"] = (meta["last_update"] - datetime.datetime(1970,1,1)).total_seconds() l = Locale.parse(locale) meta["locale"] = locale meta["locale_display"] = l.display_name meta["locale_english"] = l.english_name return meta if entry.name == "_plugins": for plugin_entry in scandir(entry.path): if not plugin_entry.is_dir(): continue if not plugin_entry.name in plugin_manager().plugins: continue plugin_info = plugin_manager().plugins[plugin_entry.name] plugin_packs[plugin_entry.name]["identifier"] = plugin_entry.name plugin_packs[plugin_entry.name]["display"] = plugin_info.name for language_entry in scandir(plugin_entry.path): plugin_packs[plugin_entry.name]["languages"].append(load_meta(language_entry.path, language_entry.name)) else: core_packs.append(load_meta(entry.path, entry.name)) result = dict(_core=dict(identifier="_core", display="Core", languages=core_packs)) result.update(plugin_packs) return jsonify(language_packs=result)
def dir_scan(filepath, suffix='.xml'): """Recursively scan dir with os.scandir() (new to Python 3.5)""" for author in os.scandir('.'): if author.is_dir(): name = author.name opensource = os.path.join(name, 'opensource') if os.path.isdir(opensource): text_files = os.scandir(opensource) for text_file in text_files: if text_file.is_file() and not text_file.name.startswith('.') and text_file.name.endswith(suffix): yield text_file.path
def list_translations(dirname): if not os.path.isdir(dirname): return [] result = [] for entry in scandir(dirname): locale_dir = os.path.join(entry.path, 'LC_MESSAGES') if not os.path.isdir(locale_dir): continue if filter(lambda x: x.name.endswith('.mo'), scandir(locale_dir)): result.append(Locale.parse(entry.name)) return result
def get_all_blob_digests(self) -> Iterable[Digest]: blobs_path = os.path.join(self.get_path(), 'blobs') if os.path.isdir(blobs_path): for algorithm_path_entry in os.scandir(blobs_path): if algorithm_path_entry.is_dir(): for prefix_path_entry in os.scandir(algorithm_path_entry.path): if prefix_path_entry.is_dir(): for blob_path_entry in os.scandir(prefix_path_entry.path): if blob_path_entry.is_dir(): yield Digest(algorithm_path_entry.name, blob_path_entry.name) else: return []
async def resolve(self, path, movie): import_dirs = [] with os.scandir(os.path.dirname(path)) as it: for entry in it: if entry.is_dir() and entry.name.lower() in ['subs', 'subtitles']: import_dirs.append(entry.path) for importpath in import_dirs: with os.scandir(importpath) as it: for entry in it: if entry.is_file(): await self.import_sub(entry.path, movie) return movie
def upload_files(pool, bucketname, tiles, max_zoom, raven_client, bucket_prefix='tiles/'): # pragma: no cover result = { 'tile_changed': 0, 'tile_deleted': 0, 'tile_new': 0, 'tile_unchanged': 0, } zoom_levels = frozenset([str(i) for i in range(max_zoom + 1)]) tiny_levels = frozenset([str(i) for i in range(max_zoom - 2)]) paths = [] for entry in scandir(tiles): if not entry.is_dir() or entry.name not in zoom_levels: continue if entry.name in tiny_levels: # Process upper zoom levels in one go, as these contain # very few files. This avoids the overhead of repeated # Amazon S3 list calls and job scheduling. paths.append(entry.path) else: for subentry in scandir(entry.path): if subentry.is_dir(): paths.append(subentry.path) jobs = [] for folder in paths: jobs.append(pool.apply_async( upload_folder, (bucketname, bucket_prefix, tiles, folder))) for job in jobs: try: folder_result = job.get() for key, value in folder_result.items(): result[key] += value except Exception: raven_client.captureException() # Update status file conn = boto.connect_s3() bucket = conn.get_bucket(bucketname, validate=False) key = boto.s3.key.Key(bucket) key.key = bucket_prefix + 'data.json' key.set_contents_from_string( dumps({'updated': util.utcnow().isoformat()}), headers=JSON_HEADERS, reduced_redundancy=True) return result
def upload_files(pool, bucketname, tiles, max_zoom, raven_client, bucket_prefix='tiles/'): # pragma: no cover result = { 'tile_changed': 0, 'tile_deleted': 0, 'tile_new': 0, 'tile_unchanged': 0, } zoom_levels = frozenset([str(i) for i in range(max_zoom + 1)]) tiny_levels = frozenset([str(i) for i in range(min(10, max_zoom))]) paths = [] for entry in scandir(tiles): if not entry.is_dir() or entry.name not in zoom_levels: continue if entry.name in tiny_levels: # Process upper zoom levels in one go, as these contain # very few files. This avoids the overhead of repeated # Amazon S3 list calls and job scheduling. paths.append(entry.path) else: for subentry in scandir(entry.path): if subentry.is_dir(): paths.append(subentry.path) jobs = [] for folder in paths: jobs.append(pool.apply_async( upload_folder, (bucketname, bucket_prefix, tiles, folder))) for job in jobs: try: folder_result = job.get() for key, value in folder_result.items(): result[key] += value except Exception: raven_client.captureException() # Update status file s3 = boto3.resource('s3') bucket = s3.Bucket(bucketname) obj = bucket.Object(bucket_prefix + 'data.json') obj.put( Body=dumps({'updated': util.utcnow().isoformat()}), CacheControl='max-age=3600, public', ContentType='application/json', ) return result
def copy_files(distpath, distlib, a2lib, distui): print('distpath: %s' % distpath) app_path = join(distpath, 'a2app') if not os.path.isdir(app_path): raise FileNotFoundError( 'App Path was not found!\n %s\n' 'Package already handled?' % app_path) os.rename(app_path, distui) print('distui: %s' % distui) print('copying root files ...') for item in os.scandir(A2PATH): if item.is_file() and item.name in ROOT_FILES: shutil.copy2(item.path, distpath) print('copying lib files ...') os.mkdir(distlib) for item in os.scandir(a2lib): if item.name.startswith('_ '): continue base, ext = os.path.splitext(item.name) if base in LIB_EXCLUDES: continue if item.name == 'a2ui release.ahk': shutil.copy2(item.path, join(distlib, 'a2ui.ahk')) continue if item.is_file(): if ext == '.ahk': shutil.copy2(item.path, distlib) else: shutil.copytree(item.path, join(distlib, item.name), ignore=_ignore_items) print('copying ui files ...') for folder in UI_FOLDERS: shutil.copytree(join(A2UIPATH, folder), join(distui, folder), ignore=_ignore_items) shutil.rmtree(join(distui, 'PySide', folder), ignore_errors=True) for item in os.scandir(distui): if item.name.startswith('libopenblas.') and item.name.endswith('.dll'): print('removing libopenblas ...') os.remove(item.path) if item.is_dir() and item.name in ['lib2to3', 'Include', 'numpy']: print(f'removing {item.name} ...') shutil.rmtree(item.path, ignore_errors=True)
def collect(self): """ Collect operations """ self.last_episode = 0 # FIXME: global state is bad self.last_special = {} for source in self.sources: for f in sorted(os.scandir(source), key=natural_name_sort_key): if f.is_dir(): for ff in sorted(os.scandir(f.path), key=natural_name_sort_key): yield (ff.path, *self.process_file(ff.name, subdir=f.name)) else: yield (f.path, *self.process_file(f.name))
def create_ptrials(): create_folder('ptrials') trials = os.scandir('trials') for trial in trials: if trial.is_dir(): create_folder('p'+trial.path) for folder in os.scandir(trial.path): if folder.is_dir(): create_folder('p'+folder.path) cnt = 0 for img in os.scandir(folder.path): cnt +=1 if cnt % 7 == 0: shutil.copyfile(img.path,'p'+img.path)
def load_all(self): # Search path for _ in sys.path: if not os.path.exists(_): continue for dir in scandir(_): if dir.name == "translations": for file in scandir(dir.path): # YAML load all messages files if file.name == "msgs.yml": with open(file.path) as f: data = yaml.load(f) self.def_msgs = merge_dicts(self.def_msgs, data) elif file.name == "msgs.{}.yml".format(self.lang): with open(file.path) as f: data = yaml.load(f) self.msgs = merge_dicts(self.msgs, data)
def get_all_repositories(self) -> Iterable[Repository]: repositories_path = os.path.join(self.get_path(), 'repositories') if not os.path.isdir(repositories_path): return [] # Scan through all of the directories in the "repositories" root. The tricky bit # is that repositories can either be one or two levels deep (e.g. "foo/bar" or just "bar") for entry in os.scandir(repositories_path): # if the directory is a repository dir, return it if Registry._is_repository_dir(entry.path): yield Repository(self, entry.name) # otherwise, it's just a namespace, so continue scanning one level below else: for entry2 in os.scandir(entry.path): if Registry._is_repository_dir(entry2.path): yield Repository(self, entry.name + '/' + entry2.name)
def simetrize_color_label(): cnt = 0 for trial in os.scandir('ptrials'): if trial.is_dir() and trial.name.startswith('trial'): for color in os.scandir(trial.path+('/color')): if color.name.endswith('.png'): label = color.path.replace('color','label') if not os.path.exists(label): cnt +=1 try: os.remove(color.path) except: pass label = img print('total colors without labels',cnt)
def test_mkdir_does_make_dirs(tmpdir): def assertDirIsUniqueAndNamed(dirs, name): assert len(dirs) == 1 assert dirs[0].is_dir() assert dirs[0].name == name end_dir = os.path.join(tmpdir, 'dir_in_the_middle', 'leaf_dir') utils.mkdir(end_dir) middle_dirs = list(os.scandir(tmpdir)) assertDirIsUniqueAndNamed(middle_dirs, 'dir_in_the_middle') leaf_dirs = list(os.scandir(middle_dirs[0].path)) assertDirIsUniqueAndNamed(leaf_dirs, 'leaf_dir')
def _scan_disk(self, on_disk, path): for entry in scandir(path): if not entry.name.startswith(".") and entry.is_dir(): self._scan_disk(on_disk, entry.path) elif entry.is_file(): on_disk[entry.path] = entry.stat().st_mtime return on_disk
def process_dir(self, path, only_explicit = False): for entry in os.scandir(path): fc = self.config.get_info(entry.path) if fc.action == FileAction.RECURSE: self.apply_recurse(entry, fc) fc.processed = True elif entry.is_dir(): # Always recurse into directories, so that items inside that are explicitly marked in the config can still # be processed self.process_dir(entry.path, only_explicit = True) if only_explicit and not fc.from_config: # While recursing inside non-recurse directories, ignore any items that are not explicitly defined in the # config continue if fc.action == FileAction.SKIP: fc.processed = True if fc.action == FileAction.LINK: self.apply_link(entry, fc) fc.processed = True if not fc.processed: err(f'No behaviour has been defined for the action for {path}')
def sort_all_customer_files(): ''' sorts all customer files in customer_data directory ''' for entry in scandir(DATA_PATH + '/customer_data'): c = int(entry.name[2:9]) sort_customer_file(c)
def customer_avg_by_year(): ''' returns a dictionary of the average rating from each year for each customer {customer_id:{<year>:avg_that_year}} ''' release_years = load_pickle('amry') # all movie release years avgs = {} for entry in scandir(DATA_PATH + '/customer_data'): print(entry.name) c = int(entry.name[2:9]) with open(entry.path,'r') as cf: ratings = {} for l in cf.readlines(): movie_id, rating, _ = l.split(',') rating = int(rating) year = release_years[int(movie_id)] if year in ratings: ratings[year].append(rating) else: ratings[year] = [rating] for year in ratings: ratings[year] = reduce(lambda m, n: m + n, ratings[year]) / len(ratings[year]) avgs[c] = ratings return avgs
def get_training_data(root): points = { 'P': 1, 'R': 2, 'B': 3, 'N': 3, 'Q': 4, 'K': 9 } # Determine directories for all game datafiles data = pd.DataFrame() files = [] for loc in scandir(root): temp = listdir(loc.path) for file in temp: files.append(loc.path + '\\' + file) # Determine total number of games played with a winner tot_wins = 0 for file in files: temp = pd.read_csv(file) if temp.loc[0]['Winner'] != 'Draw': tot_wins += 1 games_used = len(files) count = 1 for file in files: game = pd.read_csv(file) temp = pd.DataFrame() # Record each dataset twice (once as a win and once as a loss, but oriented to the opposite player) print('Processing file ' + str(count) + ' of ' + str(len(files))) rec_as_winner = True for i in range(0, 2): # Aggregate training data to identify wins (both black and white wins) for index, row in game.iterrows(): cover = row['Ending Coverage'] board = row['Ending Board'] winner = row['Winner'] new_row = {} #if winner == 'Draw': # break rec_index = 0 if rec_as_winner: new_row['win'] = 1 if winner == 'White': i = 0 while i < 128: if cover[i] == 'W': new_row['cover' + str(rec_index)] = int(cover[i+1]) elif cover[i] == 'B': new_row['cover' + str(rec_index)] = int(cover[i+1]) * -1 else: new_row['cover' + str(rec_index)] = 0 if board[i] == 'W': new_row['pieces' + str(rec_index)] = int(points[board[i+1]]) elif board[i] == 'B': new_row['pieces' + str(rec_index)] = int(points[board[i+1]]) * -1 else: new_row['pieces' + str(rec_index)] = 0 i += 2 rec_index += 1 elif winner == 'Black' or winner == 'Draw': i = 126 while i > -1: if cover[i] == 'B': new_row['cover' + str(rec_index)] = int(cover[i+1]) elif cover[i] == 'W': new_row['cover' + str(rec_index)] = int(cover[i+1]) * -1 else: new_row['cover' + str(rec_index)] = 0 if board[i] == 'B': new_row['pieces' + str(rec_index)] = int(points[board[i+1]]) elif board[i] == 'W': new_row['pieces' + str(rec_index)] = int(points[board[i+1]]) * -1 else: new_row['pieces' + str(rec_index)] = 0 i -= 2 rec_index += 1 else: print('ERROR: Invalid game outcome') else: new_row['win'] = 0 if winner == 'Black': i = 0 while i < 128: if cover[i] == 'W': new_row['cover' + str(rec_index)] = int(cover[i+1]) elif cover[i] == 'B': new_row['cover' + str(rec_index)] = int(cover[i+1]) * -1 else: new_row['cover' + str(rec_index)] = 0 if board[i] == 'W': new_row['pieces' + str(rec_index)] = int(points[board[i+1]]) elif board[i] == 'B': new_row['pieces' + str(rec_index)] = int(points[board[i+1]]) * -1 else: new_row['pieces' + str(rec_index)] = 0 i += 2 rec_index += 1 elif winner == 'White' or winner == 'Draw': i = 126 while i > -1: if cover[i] == 'B': new_row['cover' + str(rec_index)] = int(cover[i+1]) elif cover[i] == 'W': new_row['cover' + str(rec_index)] = int(cover[i+1]) * -1 else: new_row['cover' + str(rec_index)] = 0 if board[i] == 'B': new_row['pieces' + str(rec_index)] = int(points[board[i+1]]) elif board[i] == 'W': new_row['pieces' + str(rec_index)] = int(points[board[i+1]]) * -1 else: new_row['pieces' + str(rec_index)] = 0 i -= 2 rec_index += 1 else: print('ERROR: Invalid game outcome') # Undo any cases where a draw is recorded as a winner (code uses the win/loss logic to record draws twice for both board orientations, but they should all be losses) if winner == 'Draw': new_row['win'] = 0 data = data.append(new_row, ignore_index=True) rec_as_winner = not rec_as_winner #if winner == 'Draw': #games_used -= 1 #break count += 1 print('Datapoints: ' + str(len(data))) data.to_csv('raw_data/training_data.csv') print('Games used: ' + str(games_used)) print('Datapoints: ' + str(len(data)))
def get_directory_map(current_path): directories = [f.name for f in os.scandir(current_path) if f.is_dir()] print(len(directories)) spoken_forms = create_spoken_forms(directories) return dict(zip(spoken_forms, directories))
matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np TARGET_DIR = sys.argv[1] try: os.mkdir(TARGET_DIR) except FileExistsError: print('Directory', TARGET_DIR, 'exists.') ans = input('Do you want to overwrite?[Y/n] ') if not ans.startswith('y') and not ans.startswith('Y'): quit() print('Directory created') with os.scandir(sys.argv[2]) as it: for entry in it: if entry.is_file(): print(entry.name) name = entry.name.split('.') benchmark = name[0] + '.' + name[1] fp = open(entry) line = fp.readline() x_axis = [] direction_mpki_axis = [] target_mpki_axis = [] while line: if line.startswith("Total Instructions"): total_ins = int(line.split()[2]) print('Total instructions: ', total_ins) if line.startswith("BTB"):
def generate_download_info(): boards = {} errors = [] new_tag = os.environ["RELEASE_TAG"] changes = {"new_release": new_tag, "new_boards": [], "new_languages": []} user = print_active_user() sha, this_version = get_version_info() git_info, current_info = get_current_info() languages = get_languages() support_matrix = shared_bindings_matrix.support_matrix_by_board( use_branded_name=False) new_stable = "-" not in new_tag previous_releases = set() previous_languages = set() # Delete the release we are replacing for board in current_info: info = current_info[board] for version in list(info["versions"]): previous_releases.add(version["version"]) previous_languages.update(version["languages"]) if version["stable"] == new_stable or ( new_stable and version["version"].startswith(this_version)): info["versions"].remove(version) board_mapping = get_board_mapping() for port in SUPPORTED_PORTS: board_path = os.path.join("../ports", port, "boards") for board_path in os.scandir(board_path): if board_path.is_dir(): board_files = os.listdir(board_path.path) board_id = board_path.name board_info = board_mapping[board_id] for alias in [board_id] + board_info["aliases"]: alias_info = board_mapping[alias] if alias not in current_info: changes["new_boards"].append(alias) current_info[alias] = {"downloads": 0, "versions": []} new_version = { "stable": new_stable, "version": new_tag, "modules": support_matrix[alias], "languages": languages, "extensions": board_info["extensions"], } current_info[alias]["downloads"] = alias_info[ "download_count"] current_info[alias]["versions"].append(new_version) changes["new_languages"] = set(languages) - previous_languages if changes["new_release"] and user: create_pr(changes, current_info, git_info, user) else: print("No new release to update") if "DEBUG" in os.environ: print(create_json(current_info).decode("utf8"))
#!/usr/bin/env python from distutils.core import setup import distutils.dist from distutils.command.install_data import install_data import os packages = ['triagesched'] with os.scandir('triagesched') as rit: for entry in rit: if entry.name[0] not in ('.', '_') and entry.is_dir() \ and os.path.isfile(f'{entry.path}/__init__.py'): packages.append(f'{entry.path.replace("/", ".")}') class InstallData(install_data): def run(self): install_data.run(self) for pkgfile in self.outfiles: with open(pkgfile, 'r') as tmpfile: filedata = tmpfile.read() filedata = filedata.replace('localhost:5000', 'triage.gtmanfred.com') with open(pkgfile, 'w') as tmpfile: print(filedata, file=tmpfile) class TriageDist(distutils.dist.Distribution): def __init__(self, attrs=None):
from datetime import datetime, timedelta import os import email.utils import sys from debmutate.changelog import ChangelogEditor from debmutate.debhelper import MaintscriptEditor from lintian_brush.debhelper import drop_obsolete_maintscript_entries from lintian_brush.fixer import report_result, upgrade_release, warn # If there is no information from the upgrade release, default to 5 years. DEFAULT_AGE_THRESHOLD_DAYS = 5 * 365 maintscripts = [] for entry in os.scandir('debian'): if not (entry.name == "maintscript" or entry.name.endswith(".maintscript")): continue maintscripts.append(entry.name) # Determine the date for which versions created then should still be supported. # This is a little bit tricky since versions uploaded at a particular date # may not have made it into the release then. from distro_info import DebianDistroInfo # noqa: E402 try: [release] = [ r for r in DebianDistroInfo().get_all('object') if r.codename.lower() == upgrade_release() ] except ValueError:
# axs[1].set_ylim(0, 200) axs[0].set_title('Rate coding synapse with different time windows w', fontsize='large', fontweight='bold') axs[1].set_title('200 Leaky intergrate-and-fire neurons (iaf_psc_alpha)', fontsize='large', fontweight='bold') plt.xlabel('Time [s]', fontsize='large', fontweight='bold') axs[0].set_ylabel('Spike Rate', fontsize='large', fontweight='bold') axs[1].set_ylabel('Neuron Index', fontsize='large', fontweight='bold') axs[0].grid() axs[1].grid() # ------------- Plot Setup ------------- # latest_event_file = max( [f for f in os.scandir("../../../Logs") if "event" in f.name], key=lambda x: x.stat().st_mtime).name latest_value_file = max( [f for f in os.scandir("../../../Logs") if "value" in f.name], key=lambda x: x.stat().st_mtime).name # shutil.copy("../../../Logs/" + latest_event_file, 'event.csv') # shutil.copy("../../../Logs/" + latest_value_file, 'value.csv') valueContent = [] eventContent = [] nestValueContent = [] nestEventContent = [] with open('value.csv') as f:
#!/usr/bin/python3 from os import scandir from os.path import dirname, join, isfile from yaml import load, SafeLoader root = dirname(dirname(__file__)) yaml_content = load(open(join(root, 'build-pacman-repo.yaml')), Loader=SafeLoader) expected_members = {member['directory'] for member in yaml_content['members']} container = join(root, 'members') actual_members = { entry.name for entry in scandir(container) if isfile(join(entry, 'PKGBUILD')) } assert actual_members == expected_members, (actual_members, expected_members)
import os, sys for i in os.scandir('.'): if i.path[-2:] != 'in': continue print(i.path, file=sys.stderr) sys.stdin = open(i.path) N, L, Q = map(int, input().split(' ')) T = input().split(' ') T = list(set(int(t, 2) for t in T if t != '')) T.sort() assert N == len(T) N = len(T) # print('>', N, ',', L, ',', Q, file=sys.stderr) # print('>', T, len(T), file=sys.stderr) # if N != len(T) or not all(T) or ' ' in T or '' in T or len(set(T)) != len(T): # print('error on ', i.path, file=sys.stderr) # continue with open('out/' + i.path, 'w') as f: print(N, L, Q, file=f) print(' '.join(str(t) for t in T), file=f)
from sklearn.model_selection import KFold from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Dense, Flatten from tensorflow.keras.applications import VGG16 from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau # img = cv2.imread('../data/LPD_competition/train/0/0.jpg') # cv2.imshow('img', img) # cv2.waitKey(0) # cv2.destroyAllWindows() labels = os.listdir('../data/LPD_competition/train') print(labels) for dir in os.scandir('../data/LPD_competition/train'): print(dir) for file in os.scandir(dir): print(file) break # Found 39000 images belonging to 1000 classes. train_generator = ImageDataGenerator(rescale=1. / 255, validation_split=0.2).flow_from_directory( '../data/LPD_competition/train', target_size=(128, 128), # color_mode='grayscale', subset='training') # Found 9000 images belonging to 1000 classes. val_generator = ImageDataGenerator(rescale=1. / 255, validation_split=0.2).flow_from_directory(
def test_stat_uses_lstat_cache(self): with os.scandir(TEST_FULL_PATH1) as dir: entry = next(dir) stat_res = entry.stat(follow_symlinks=False) os.unlink(self.abc_path) self.assertEqual(stat_res, entry.stat(follow_symlinks=True))
global img if event.key == 'q': print(object_list) write_xml(image_folder, img, object_list, tl_list, br_list, savedir) tl_list = [] br_list = [] object_list = [] img = None def toggle_selector(event): toggle_selector.RS.set_active(True) if __name__ == '__main__': for n, image_file in enumerate(os.scandir(image_folder)): img = image_file fig, ax = plt.subplots(1, figsize=(10.5, 8)) mngr = plt.get_current_fig_manager() mngr.window.setGeometry(250, 40, 800, 600) image = cv2.imread(image_file.path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) ax.imshow(image) toggle_selector.RS = RectangleSelector( ax, line_select_callback, drawtype='box', useblit=True, button=[1], minspanx=5,
""" Code to automatically run all notebooks as a test. Adapted from the same code for the Microsoft DoWhy library. """ import os import subprocess import tempfile import nbformat import pytest NOTEBOOKS_PATH = "docs/source/notebooks/" notebooks_list = [f.name for f in os.scandir(NOTEBOOKS_PATH) if f.name.endswith(".ipynb")] # notebooks that should not be run advanced_notebooks = [ "DiCE_with_advanced_options.ipynb", # requires tensorflow 1.x "DiCE_getting_started_feasible.ipynb", # needs changes after latest refactor "Benchmarking_different_CF_explanation_methods.ipynb" ] # Adding the dice root folder to the python path so that jupyter notebooks if 'PYTHONPATH' not in os.environ: os.environ['PYTHONPATH'] = os.getcwd() elif os.getcwd() not in os.environ['PYTHONPATH'].split(os.pathsep): os.environ['PYTHONPATH'] = os.environ['PYTHONPATH'] + os.pathsep + os.getcwd() def _check_notebook_cell_outputs(filepath): """Convert notebook via nbconvert, collect output and assert if any output cells are not empty. :param filepath: file path for the notebook
def test_stat_error_msg_bytes(self): with os.scandir(os.fsencode(TEST_FULL_PATH1)) as dir: entry = next(dir) os.unlink(self.abc_path) with self.assertRaisesRegex(FileNotFoundError, r"\[Errno 2\] [^:]+: b'" + self.abc_path + "'"): entry.stat()
#import Bloch import matplotlib.pyplot as plt import Bloch import angleCalc as ang import os, sys import numpy as np import math path = "C:\\Users\\quantum\\Desktop\\QST experiments\\August\\collimatorExperiment" #path = '/Users/charlie.goode/qst/August/lzr' os.chdir(path) files = os.scandir(path) avgStokes = [0, 0, 0] stdStokes = [0, 0, 0] listOfStokes = [] size = 0 avgLength = 0 thetasMeasured = [] phisMeasured = [] lengthsMeasured = [] for oneFile in files: if oneFile.is_dir(): size += 1 os.chdir(oneFile.name) with open("result.txt", "r") as myData: lines = [line.rstrip('\n') for line in myData] lines = lines[2:] #Parse measured
def test_scandir_entry_inode(self): sr = os.stat(self.abc_path) with os.scandir(TEST_FULL_PATH1) as dir: entry = next(dir) self.assertEqual('.abc', entry.name) self.assertEqual(sr.st_ino, entry.inode())
def main(): try: folder_path = sys.argv[1] except ValueError: print(__doc__) exit() # I experienced some memory errors occasionally # this can help with manually trying again without # repeating the same file. # seen = set([ # '0.txt', # '1.txt', # '2.txt', # '3.txt', # '4.txt', # '5.txt', # '6.txt', # '7.txt', # '8.txt', # '9.txt', # '10.txt', # '11.txt', # '12.txt', # '13.txt', # '14.txt', # '15.txt', # '16.txt', # '17.txt', # '18.txt', # '19.txt', # ]) seen = set() total_files = [ x for x in list(os.scandir(folder_path)) if x.is_file() and x.path.endswith('.txt') and x.name not in seen ] total_files_count = len(total_files) print( f'Found {total_files_count} text \ files: {[x.name for x in total_files]}' ) print('Loading spacy...') # We only need the NER tagger, with everything enabled it is much slower nlp = spacy.load(Config.SPACY_MODEL, disable=['parser', 'tagger']) entity_counts = Counter() if not os.path.isfile(Config.ENTITY_COUNTS_PICKLE): with open(Config.ENTITY_COUNTS_PICKLE, 'wb') as f: pickle.dump(entity_counts, f) for i, raw_file in enumerate(total_files): print( f'Counting entities in file {raw_file.name} \ ({i + 1} / {total_files_count})' ) # Because this process takes a long time (about 1.5 hours on # a 6 core 12 thread 16GB machine) we save the progress per file, # so if something goes wrong, we have not lost all progress. with open(Config.ENTITY_COUNTS_PICKLE, 'rb') as o: entity_counts = pickle.load(o) # We need to process the files using the pipe() to batch chunks, # otherwise it gets loaded into memory and with an average size # of 100mb combined with the already high memory usage of spacy # this is way too much. with open(raw_file.path, 'r', encoding='utf8') as f: for j, doc in enumerate(nlp.pipe(f, n_process=cpu_count() - 1)): for entity in doc.ents: entity_counts[entity.text.lower()] += 1 j % 500 == 0 and print(f'Processing line {j}', end='\r') print(f'\rFound {len(entity_counts.keys())} so far') with open(Config.ENTITY_COUNTS_PICKLE, 'wb') as o: pickle.dump(entity_counts, o) # This again just helps with the occasional crashes seen.add(raw_file.name) print(f'Seen files: {seen}') print( f'Counted all entities in corpus, totalling \ {len(entity_counts.keys())}' )
def test_scandir_default_arg(self): with os.scandir() as dir: self.assertEqual('./', next(dir).path[:2])
appdata = os.environ['LOCALAPPDATA'] if os.path.isdir(f'{appdata}\\Roblox'): print('Roblox directory in AppData installed: continuing.') else: tkinter.messagebox.showerror( "Roblox directory in AppData directory does not seem to exist. Program halted." ) exit() if os.path.isdir(f'{appdata}\\Roblox\\Versions'): print( 'Roblox versions directory seem to exist: checking if they actually have crap in them.' ) rblxversions = [ f.path for f in os.scandir(f'{appdata}\\Roblox\\Versions') if f.is_dir() ] if not rblxversions: tkinter.messagebox.showerror( "There seem to be no currently installed Roblox versions, " "therefore making it very unlikely any Roblox versions are currently installed. " "Program halted.") exit() else: tkinter.messagebox.showerror( "Roblox versions directory doesn't seem to exist, " "therefore making it very unlikely any Roblox versions are currently installed. " "Program halted.") for x in rblxversions:
def test_scandir_empty(self): with os.scandir(TEST_FULL_PATH1) as dir: self.assertEqual(0, len([entry for entry in dir]))
def get_file_map(current_path): files = [f.name for f in os.scandir(current_path) if f.is_file()] spoken_forms = create_spoken_forms([p for p in files]) return dict(zip(spoken_forms, [f for f in files]))
def test_path_respecialization(self): # regression test for https://github.com/graalvm/graalpython/issues/124 from pathlib import PurePath p = PurePath(".") for path in [p, "."]: os.scandir(path)
def main(): all_files = os.scandir("/etc/ssl/certs") for f in all_files: load_certificate(f)
<html> <head> </head> <body> <h1 style="background:red;color:yellow;">Hello World</h1> </body> </html> """, subtype='html') # Provision to add file as email content msg.add_alternative(files, subtype='html') # Module to read following extension types and add as an attachment to email filesList = ('.jpg','.html','.csv','.py','.php','.png','.css') my_files = [f.name for f in os.scandir() if f.name.endswith(filesList)] for i in my_files: with open(i, 'rb') as f: file_data = f.read() file_name = f.name msg.add_attachment(file_data, maintype='application', subtype='octet-stream', filename=file_name) # Module to send email with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp: smtp.login(me, 'password') smtp.send_message(msg)
def save_training_data(root): #cnx, cursor = establish_db_connection() #cursor.execute("USE chai") db = boto3.resource('dynamodb', region_name='us-east-1') table = db.Table('chai-training') # Setup MySQL insertion command query = 'INSERT INTO training_data (game_id, win, ' for i in range(0, 64): query += 'pieces' + str(i) + ', ' for i in range(0, 63): query += 'cover' + str(i) + ', ' query += 'cover63) VALUES (' points = { 'P': 1, 'R': 2, 'B': 3, 'N': 3, 'Q': 4, 'K': 9 } # Determine directories for all game datafiles files = [] for loc in scandir(root): temp = listdir(loc.path) for file in temp: files.append(loc.path + '\\' + file) count = 1 for file in files: game = pd.read_csv(file) # Record each dataset twice (once as a win and once as a loss, but oriented to the opposite player) print('Processing file ' + str(count) + ' of ' + str(len(files))) rec_as_winner = True # Aggregate training data to identify wins (both black and white wins) for index, row in game.iterrows(): for i in range(0, 2): cover = row['Ending Coverage'] board = row['Ending Board'] winner = row['Winner'] new_row = {} if winner == 'Draw': break rec_index = 0 if rec_as_winner: new_row['win'] = 1 if winner == 'White': j = 0 while j < 128: if cover[j] == 'W': new_row['cover' + str(rec_index)] = int(cover[j+1]) elif cover[j] == 'B': new_row['cover' + str(rec_index)] = int(cover[j+1]) * -1 else: new_row['cover' + str(rec_index)] = 0 if board[j] == 'W': new_row['pieces' + str(rec_index)] = int(points[board[j+1]]) elif board[j] == 'B': new_row['pieces' + str(rec_index)] = int(points[board[j+1]]) * -1 else: new_row['pieces' + str(rec_index)] = 0 j += 2 rec_index += 1 elif winner == 'Black' or winner == 'Draw': j = 126 while j > -1: if cover[j] == 'B': new_row['cover' + str(rec_index)] = int(cover[j+1]) elif cover[j] == 'W': new_row['cover' + str(rec_index)] = int(cover[j+1]) * -1 else: new_row['cover' + str(rec_index)] = 0 if board[j] == 'B': new_row['pieces' + str(rec_index)] = int(points[board[j+1]]) elif board[j] == 'W': new_row['pieces' + str(rec_index)] = int(points[board[j+1]]) * -1 else: new_row['pieces' + str(rec_index)] = 0 j -= 2 rec_index += 1 else: print('ERROR: Invalid game outcome') else: new_row['win'] = 0 if winner == 'Black': j = 0 while j < 128: if cover[j] == 'W': new_row['cover' + str(rec_index)] = int(cover[j+1]) elif cover[j] == 'B': new_row['cover' + str(rec_index)] = int(cover[j+1]) * -1 else: new_row['cover' + str(rec_index)] = 0 if board[j] == 'W': new_row['pieces' + str(rec_index)] = int(points[board[j+1]]) elif board[j] == 'B': new_row['pieces' + str(rec_index)] = int(points[board[j+1]]) * -1 else: new_row['pieces' + str(rec_index)] = 0 j += 2 rec_index += 1 elif winner == 'White' or winner == 'Draw': j = 126 while j > -1: if cover[j] == 'B': new_row['cover' + str(rec_index)] = int(cover[j+1]) elif cover[j] == 'W': new_row['cover' + str(rec_index)] = int(cover[j+1]) * -1 else: new_row['cover' + str(rec_index)] = 0 if board[j] == 'B': new_row['pieces' + str(rec_index)] = int(points[board[j+1]]) elif board[j] == 'W': new_row['pieces' + str(rec_index)] = int(points[board[j+1]]) * -1 else: new_row['pieces' + str(rec_index)] = 0 j -= 2 rec_index += 1 else: print('ERROR: Invalid game outcome') # Undo any cases where a draw is recorded as a winner (code uses the win/loss logic to record draws twice for both board orientations, but they should all be losses) if winner == 'Draw': new_row['win'] = 0 # Setup query temp_query = query temp_query += str(count) + ', ' + str(new_row['win']) + ', ' for j in range(0, 64): temp_query += str(new_row['pieces' + str(j)]) + ', ' for j in range(0, 63): temp_query += str(new_row['cover' + str(j)]) + ',' temp_query += str(new_row['cover63']) + ');' # Commit data to database #cursor.execute(temp_query) temp = { 'TrainingID' : int(count*100000 + index*10 + i), 'GameID' : int(count), 'Win' : new_row['win'] } for j in range(0, 64): temp['pieces' + str(j)] = new_row['pieces' + str(j)] for j in range(0, 64): temp['cover' + str(j)] = new_row['cover' + str(j)] table.put_item(Item=temp) rec_as_winner = not rec_as_winner if winner == 'Draw': #games_used -= 1 break #cnx.commit() count += 1
def rehashx(self, parameter_s=''): """Update the alias table with all executable files in $PATH. rehashx explicitly checks that every entry in $PATH is a file with execute access (os.X_OK). Under Windows, it checks executability as a match against a '|'-separated string of extensions, stored in the IPython config variable win_exec_ext. This defaults to 'exe|com|bat'. This function also resets the root module cache of module completer, used on slow filesystems. """ from IPython.core.alias import InvalidAliasError # for the benefit of module completer in ipy_completers.py del self.shell.db['rootmodules_cache'] path = [ os.path.abspath(os.path.expanduser(p)) for p in os.environ.get('PATH', '').split(os.pathsep) ] syscmdlist = [] savedir = os.getcwd() # Now walk the paths looking for executables to alias. try: # write the whole loop for posix/Windows so we don't have an if in # the innermost part if self.is_posix: for pdir in path: try: os.chdir(pdir) except OSError: continue # for python 3.6+ rewrite to: with os.scandir(pdir) as dirlist: dirlist = os.scandir(path=pdir) for ff in dirlist: if self.isexec(ff): fname = ff.name try: # Removes dots from the name since ipython # will assume names with dots to be python. if not self.shell.alias_manager.is_alias( fname): self.shell.alias_manager.define_alias( fname.replace('.', ''), fname) except InvalidAliasError: pass else: syscmdlist.append(fname) else: no_alias = Alias.blacklist for pdir in path: try: os.chdir(pdir) except OSError: continue # for python 3.6+ rewrite to: with os.scandir(pdir) as dirlist: dirlist = os.scandir(pdir) for ff in dirlist: fname = ff.name base, ext = os.path.splitext(fname) if self.isexec(ff) and base.lower() not in no_alias: if ext.lower() == '.exe': fname = base try: # Removes dots from the name since ipython # will assume names with dots to be python. self.shell.alias_manager.define_alias( base.lower().replace('.', ''), fname) except InvalidAliasError: pass syscmdlist.append(fname) self.shell.db['syscmdlist'] = syscmdlist finally: os.chdir(savedir)
def main(args): model_kwargs = {} if args.model_type is None: args.model_type = infer_model_type(args.model_name_or_path) assert args.model_type is not None if args.model_type.startswith("rag"): model_class = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration model_kwargs["n_docs"] = args.n_docs if args.index_name is not None: model_kwargs["index_name"] = args.index_name if args.index_path is not None: model_kwargs["index_path"] = args.index_path else: model_class = BartForConditionalGeneration checkpoints = ([ f.path for f in os.scandir(args.model_name_or_path) if f.is_dir() ] if args.eval_all_checkpoints else [args.model_name_or_path]) logger.info("Evaluate the following checkpoints: %s", checkpoints) score_fn = get_scores if args.eval_mode == "e2e" else get_precision_at_k evaluate_batch_fn = evaluate_batch_e2e if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path) and (not args.recalculate): logger.info( "Calculating metrics based on an existing predictions file: {}" .format(args.predictions_path)) score_fn(args, args.predictions_path, args.gold_data_path) continue logger.info("***** Running evaluation for {} *****".format(checkpoint)) logger.info(" Batch size = %d", args.eval_batch_size) logger.info(" Predictions will be stored under {}".format( args.predictions_path)) if args.model_type.startswith("rag"): retriever = RagRetriever.from_pretrained(checkpoint, **model_kwargs) model = model_class.from_pretrained(checkpoint, retriever=retriever, **model_kwargs) model.retriever.init_retrieval() else: model = model_class.from_pretrained(checkpoint, **model_kwargs) model.to(args.device) with open(args.evaluation_set, "r") as eval_file, open(args.predictions_path, "w") as preds_file: questions = [] for line in tqdm(eval_file): questions.append(line.strip()) if len(questions) == args.eval_batch_size: answers = evaluate_batch_fn(args, model, questions) preds_file.write("\n".join(answers) + "\n") preds_file.flush() questions = [] if len(questions) > 0: answers = evaluate_batch_fn(args, model, questions) preds_file.write("\n".join(answers)) preds_file.flush() score_fn(args, args.predictions_path, args.gold_data_path)
from function import show_slice, show_activations, plot_roc import pickle import numpy as np import os files = list(os.scandir('./run_opt_cv_opts')) files[0].is_file(): f = open(files[i], "rb") opts = pickle,load(f) f.close() i = 0 if (opts['T']['ytest'][0][i] == 0): subject_type = 'patient' else: subject_type = 'control' plt1 = show_slice(opts['T']['xtest'][i, :, :, 0], subject_type, opts['jj']) plt1.savefig('./images/opt_slice.png') os.mkdir('./Images/opt_cv_activations') f = open('./opt_cv_results/opt_cv_A.pkl', "rb") A = pickle.load(f) f.close() fig1, fig2, fig3 = show_activation(A) fig1.savefig('./images/opt_cv_activations/conv_1_activations.png') fig2.savefig('./images/opt_cv_activations/conv_2_activations.png') fig3.savefig('./images/opt_cv_activations/conv_3_activations.png') f = open('./opt_cv_results/opt_cv_C.pkl', "rb") C = pickle.load(f) f.close() plt3 = plot_roc(C)
import os if __name__ == '__main__': # list_of_dir_entries = os.listdir(r"D:\CoursePython\python-3.10.2-docs-html") list_of_dir_entries = os.scandir( r"D:\CoursePython\python-3.10.2-docs-html") entry: os.DirEntry for entry in list_of_dir_entries: stat = entry.stat() kind = 'DIR' if entry.is_dir() else 'FILE' print( f"{entry.name:<30s} {kind:<4s} {str(stat.st_size) if entry.is_file() else '':>8s}" )
def test_scandir_symlink_to_dir_file_not_found(self): with os.scandir(TEST_FULL_PATH1) as dir: entry = next(dir) os.rmdir(TEST_FULL_PATH2) self.assertFalse(entry.is_dir(follow_symlinks=True))