def loadTrainData(): paths = util.getFiles() paths_noface = util.getFiles("/home/xws/Downloads/300w_cropped/noface") paths = paths + paths_noface datas = util.loadIBUG(paths, Config.DATA_SIZE) return datas
def __init__(self, batchSize, is_train=True, is_random=True): self.batchSize = batchSize paths = util.getFiles() paths_noface = util.getFiles("/home/xws/Downloads/300w_cropped/noface") paths = paths + paths_noface self.datas = util.loadIBUG(paths) print("datas.length = ", len(self.datas))
def readFileNames(self, basePath): bgrPath = basePath + '/rgb_noseg/' bgrSuf = '.png' dPath = basePath + '/depth_noseg/' dSuf = '.png' infoPath = basePath + '/poses/' infoSuf = '.txt' self.bgrFiles = util.getFiles(bgrPath, bgrSuf) self.depthFiles = util.getFiles(dPath, dSuf) self.infoFiles = util.getFiles(infoPath, infoSuf, True)
def commits(request, repo_name, sha='master'): """display the commit history for the given sha/branch""" repo = util.getRepo(repo_name) # was the supplied sha really a ref? sha_ref = 'refs/heads/' + sha if sha_ref in repo.refs: sha = repo.refs[sha_ref] try: commit = repo.commit(sha) except: raise Http404 refs = util.getBranches(repo) tree = repo.tree(commit.tree) files = util.getFiles(tree, repo) hist = repo.revision_history(commit.id) return render_to_response("commits.html", dict(name=repo_name, files=files, refs=refs, commits=hist))
def main(): parser = argparse.ArgumentParser(description="Filter for customer reports") parser.add_argument("-id", help="domain ID you would like to filter reports on") args = parser.parse_args() domainID = args.id if domainID == None: domainID = 0 # Read in all the required files from the directory specified. metaFile, depositFiles, reportFiles = util.getFiles(directory) # Convert files to dataframe objects. metaD = pd.concat(metaFile, axis=0, ignore_index=True) customerD = pd.concat(depositFiles, axis=0, ignore_index=True) customerR = pd.concat(reportFiles, axis=0, ignore_index=True) # Clean and transform data. df = util.cleanDf(metaD, customerD, customerR) # Apply any filter provided to dataset. df1 = util.reportFilter(df, int(domainID)) # Generate and save reports. util.getReports(df1, domainID)
def commits(request, repo_name, sha='master'): """display the commit history for the given sha/branch""" repo = util.getRepo(repo_name) # was the supplied sha really a ref? sha_ref = 'refs/heads/' + sha if sha_ref in repo.refs: sha = repo.refs[sha_ref] try: commit = repo.commit(sha) except: raise Http404 refs = util.getBranches(repo) tree = repo.tree(commit.tree) files = util.getFiles(tree, repo) hist = repo.revision_history(commit.id) return render_to_response( "commits.html", dict(name=repo_name, files=files, refs=refs, commits=hist))
def tree(request, repo_name, sha='master'): repo = util.getRepo(repo_name) # was the supplied sha really a ref? sha_ref = 'refs/heads/' + sha if sha_ref in repo.refs: sha = repo.refs[sha_ref] try: commit = repo.commit(sha) except: raise Http404 # get the branches refs = util.getBranches(repo) tree = repo.tree(commit.tree) files = util.getFiles(tree, repo) parents = map(repo.get_object, commit.parents) return render_to_response( "tree.html", dict(name=repo_name, commit=commit, tree=tree, files=files, branches=refs, parents=parents))
def tree(request, repo_name, sha='master'): repo = util.getRepo(repo_name) # was the supplied sha really a ref? sha_ref = 'refs/heads/' + sha if sha_ref in repo.refs: sha = repo.refs[sha_ref] try: commit = repo.commit(sha) except: raise Http404 # get the branches refs = util.getBranches(repo) tree = repo.tree(commit.tree) files = util.getFiles(tree, repo) parents = map(repo.get_object, commit.parents) return render_to_response("tree.html", dict(name=repo_name, commit=commit, tree=tree, files=files, branches=refs, parents=parents))
def setInputFileList(self, inputFilePath): """获得待合并文件路径""" if inputFilePath == None: assert False, "指定路径不存在" fileList = [] if not isinstance(inputFilePath, list): inputFilePath = [inputFilePath] for file in inputFilePath: if os.path.isdir(file): fileList.extend(getFiles(file)) else: fileList.append(file) fileList = self.sortValue(fileList) return fileList
def __init__(self, folder, labelfile, suffix='.jpg', loader=folder.default_loader, transform=None): self.root = folder self.labelfile = labelfile self.suffix = suffix self.loader = loader self.transform = transform # get files and corresponding labels files, labels = getFiles(folder, suffix, labelfile) # use LabelEncoder of sklearn le = LabelEncoder() labels = le.fit_transform(labels) self.classes = le.classes_ # in python3 zip will return an iterator self.samples = list(zip(files, labels))
def filetree(request, repo_name, sha, path=''): try: repo = Repo(os.path.join(settings.REPOS_DIR, repo_name)) except NotGitRepository: raise Http404 # this may be needed, depnding on how we do branches #branch = getBranch(request) # check to see if a specific commit has been passed # if not, then assume head sha_ref = 'refs/heads/' + sha if sha_ref in repo.refs: sha = repo.refs[sha_ref] #raise Exception('%s - %s' % (sha, path)) commit = repo.commit(sha) tree = repo.tree(commit.tree) # Lookup the path; may be a tree or a blob obj = tree.lookup_path(repo.get_object, path)[1] try: # assume it's a tree new_tree = repo.tree(obj) except NotTreeError: # well maybe it was a file (blob) return file_(request, repo_name, repo, sha=obj, path=path) files = util.getFiles(new_tree, repo) return render_to_response("filetree.html", dict(name=repo_name, files=files, branch='master', path=path))
def filetree(request, repo_name, sha, path=''): try: repo = Repo(os.path.join(settings.REPOS_DIR, repo_name)) except NotGitRepository: raise Http404 # this may be needed, depnding on how we do branches #branch = getBranch(request) # check to see if a specific commit has been passed # if not, then assume head sha_ref = 'refs/heads/' + sha if sha_ref in repo.refs: sha = repo.refs[sha_ref] #raise Exception('%s - %s' % (sha, path)) commit = repo.commit(sha) tree = repo.tree(commit.tree) # Lookup the path; may be a tree or a blob obj = tree.lookup_path(repo.get_object, path)[1] try: # assume it's a tree new_tree = repo.tree(obj) except NotTreeError: # well maybe it was a file (blob) return file_(request, repo_name, repo, sha=obj, path=path) files = util.getFiles(new_tree, repo) return render_to_response( "filetree.html", dict(name=repo_name, files=files, branch='master', path=path))
def show(model, data, path): imgTensor = data[0] X = imgTensor.view(1, 3, Config.IMAGE_SIZE, Config.IMAGE_SIZE) testout = model(X) points = util.tensorToPoint(testout.cpu().detach()) for p in points: plt.plot(p[0] * Config.IMAGE_SIZE, p[1] * Config.IMAGE_SIZE, "r+") img = util.tensorToImage(imgTensor) plt.imshow(img) plt.savefig(path[0].replace("300w_cropped/01_Indoor", "test")) # plt.show() plt.cla() # load all image paths = util.getFiles() # 加载所有图片,并进行测试,将测试结果保存起来 model = Config.model() # 实例化全连接层 model.eval() # 模型转化为评估模式 if os.path.exists(Config.MODEL_SAVE_PATH): print("loading ...") state = torch.load(Config.MODEL_SAVE_PATH) model.load_state_dict(state["net"]) start_epoch = state["epoch"] print("loading over") for path in paths: print("path = ", path[0].replace("300w_cropped/01_Indoor", "test")) data = util.loadOneIBUG(path) if data[0].size()[0] != 3:
args = parser.parse_args() np.random.seed(42) if args.indir: assert (os.path.exists(args.indir)) if args.outdir: assert (os.path.exists(args.outdir)) outdir = args.outdir else: if args.indir: outdir = args.indir else: outdir = os.path.dirname(args.file) assert (os.path.exists(args.aru_model)) assert ((args.scale > 0) and (args.scale <= 1.0)) if args.file: files = [args.file] else: files, _ = getFiles(args.indir, args.suffix, args.labels) assert (len(files) > 0) infer = Inference_pb(args.aru_model, files, args.scale, to_line=args.to_line, outdir=outdir, out_suffix=args.out_suffix) infer.applyARUNet()
def loadTestData(): paths = util.getFiles("/home/xws/Downloads/300w_cropped/02_Outdoor") datas = util.loadIBUG(paths, Config.BATCH_SIZE) return datas
from util import isBinaryString, getFiles __all__ = [ "ALIASES", "ICONS", "SUBCLASSES", "match", "match_globs", "match_magic", "match_inode", "match_basic", "MAGIC_MAXLEN", ] ALIASES = defaultdict(lambda: None) for path in getFiles("mime/aliases"): with open(path, "r") as file: for line in file.read().splitlines(): mime, alias = line.split(" ") ALIASES[mime] = alias ALIASES = dict(ALIASES) ICONS = defaultdict(lambda: None) for path in getFiles("mime/generic-icons"): with open(path, "r") as file: for line in file.read().splitlines(): mime, icon = line.split(":") ICONS[mime] = icon ICONS = dict(ICONS) SUBCLASSES = defaultdict(list)
newFile = open(self.outputFilePath, "w", encoding="utf-8") newFile.write("".join(newData)) newFile.close() def mkfile(filePath, midffix=""): """创建新文件路径,在文件名的后面,格式前面添加新的字符 @param filePath 原文件路径 @param midffix 文件名添加的字符 @return 新文件路径 """ fileDirName = os.path.dirname(filePath) fileBaseName = os.path.basename(filePath) filePreffix = fileBaseName.split(".")[0] fileSuffix = fileBaseName.split(".")[-1] outputFileName = os.path.join(fileDirName, filePreffix + midffix + "." + fileSuffix) return outputFileName if __name__ == '__main__': fileDir = r"C:\Study\github\Lookoops\interview\src" fileList = getFiles(fileDir, None) for filePath in fileList: re = Exg(filePath) re.addRegx([",", "?", ":", ";", '(', ')', "###", '”', '。'], [", ", "? ", ": ", "; ", '(', ')', "##", '"', '.']) re.write(filePath)