def do_read(self, args): """Read objects into memory.""" if args: tokens = shlex.split(args) path = tokens[0] if 'as' in tokens: # destination var specified or not # or 3 == len(tokens) try: assert (tokens[1] == 'as') try: newobj = tokens[2] except IndexError: newobj = None # raise ValueError('Missing destination variable') except AssertionError: print 'Unknown command token: {}'.format(tokens[1]) print 'Will do regular read' else: newobj = None # find and read objects objs = self.rdir_helper.read(path, metainfo=True) if not objs: # nothing found, try glob pattern = path.rsplit('/', 1) if len(pattern) > 1: path, pattern = pattern[:-1], pattern[-1] else: path, pattern = None, pattern[0] from fnmatch import fnmatchcase match = lambda name: fnmatchcase(name, pattern) notdir = lambda key: not is_dir(key) and match(key.GetName()) objs = self.rdir_helper.read(path, robj_p=notdir, metainfo=True) if not objs: # nothing found, try regex import re match = re.compile(pattern).match notdir = lambda key: not is_dir(key) and match(key.GetName()) objs = self.rdir_helper.read(path, robj_p=notdir, metainfo=True) # save read objects if newobj: if len(objs) > 1: objs = {newobj: objs} else: objs = {newobj: objs[0]} # only one element else: objs = [(obj.GetName(), obj) for obj in objs] self.save_obj(objs) else: print('Nothing to read!')
def do_read(self, args): """Read objects into memory.""" if args: tokens = shlex.split(args) path = tokens[0] if 'as' in tokens: # destination var specified or not # or 3 == len(tokens) try: assert(tokens[1] == 'as') try: newobj = tokens[2] except IndexError: newobj = None # raise ValueError('Missing destination variable') except AssertionError: print 'Unknown command token: {}'.format(tokens[1]) print 'Will do regular read' else: newobj = None # find and read objects objs = self.rdir_helper.read(path, metainfo=True) if not objs: # nothing found, try glob pattern = path.rsplit('/', 1) if len(pattern) > 1: path, pattern = pattern[:-1], pattern[-1] else: path, pattern = None, pattern[0] from fnmatch import fnmatchcase match = lambda name: fnmatchcase(name, pattern) notdir = lambda key: not is_dir(key) and match(key.GetName()) objs = self.rdir_helper.read(path, robj_p=notdir, metainfo=True) if not objs: # nothing found, try regex import re match = re.compile(pattern).match notdir = lambda key: not is_dir(key) and match(key.GetName()) objs = self.rdir_helper.read(path, robj_p=notdir, metainfo=True) # save read objects if newobj: if len(objs) > 1: objs = {newobj: objs} else: objs = {newobj: objs[0]} # only one element else: objs = [(obj.GetName(), obj) for obj in objs] self.save_obj(objs) else: print('Nothing to read!')
def add_folders(): folder_path = request.form['shared_folder_path'] if not is_dir(folder_path): flash(f"{folder_path} not a directory") return redirect(url_for('user.index')) time_period = request.form['time_period'] add_to_folder_db(folder_path, time_period) return redirect(url_for('admin.admin_panel'))
def get_output_file(self): if utils.is_dir(self.path_or_file): self.full_file_path = os.path.join( self.path_or_file, self.output_file_name + ".spdx" ) self.output_file = open(self.full_file_path, "wb+") else: file_dir = os.path.dirname(os.path.abspath(self.path_or_file)) self.full_file_path = os.path.join( file_dir, self.output_file_name + ".spdx" ) self.output_file = open(self.full_file_path, "wb+")
def build_tree(self): """ Build tree using indentation level. Indentation indicates a change in hierarchy level. current line ending in '/' or not indicates regular file or not. The question in this loop: where to put this new line?, which is asking "who is the parent of new line?" """ parent_node = self.virtual_root prev_indent = -1 for line in self.input: cur_indent = utils.get_indent_count(line, self.indent_size) distance = cur_indent - prev_indent # who is the parent? parent_node = self._find_new_parent(parent_node, distance) filename = (utils.get_dirname(line) if utils.is_dir(line) else utils.get_filename(line)) child = dict( parent = parent_node, children = [] if utils.is_dir(line) else None, data = { 'filename': filename, 'basedir': os.path.join(parent_node['data']['basedir'], filename) }, ) parent_node['children'].append(child) prev_indent = cur_indent
def __init__(self, path): # the database is going to be a directory with files inside if not path.endswith('.db/'): path = path + '.db/' # save the base path self._base_path = path # create the folder if it does not exist bef = os.getcwd() if not utils.is_dir(path): for d in path.split('/'): try: os.mkdir(d) os.chdir(d) except: pass os.chdir(bef) if not utils.is_dir(path): # TODO uncoment the exception, only valid in micropython # raise Exception('JSONDB cannot create the structure of directories') pass # to work with a collection first we will have to cache it self._cache = {}
def __init__(self): self.captcha = Captcha() self.text, image = self.captcha.get_captcha_text_and_image() self.IMAGE_HEIGHT = 60 self.IMAGE_WIDTH = 160 self.MAX_CAPTCHA = len(self.text) self.CHAR_SET_LEN = self.captcha.get_len_char_set() self.X = tf.placeholder(tf.float32, [None, self.IMAGE_HEIGHT * self.IMAGE_WIDTH]) self.Y = tf.placeholder(tf.float32, [None, self.MAX_CAPTCHA * self.CHAR_SET_LEN]) self.keep_prob = tf.placeholder(tf.float32) self.model_path = is_dir(get_dir() + "ckpt" + os.sep) self.output = self.creat_captcha_cnn()
def _line_is_valid(self, this_line, prev_line, this_indent, prev_indent): ''' Once the first indent size is determined, each subsequent indent must be: 1) less than N by a multiple of N, e.g. 8 -> 4 or 8 -> 0 2) 0, or 3) preceded by a directory and greater than N by exactly N. ''' difference = this_indent - prev_indent return (difference == 0) or\ (difference == self.indent_size and utils.is_dir(prev_line)) or\ (difference < 0 and utils.is_multiple_of_indent(this_indent, self.indent_size))
def latest_idx(fdir, fname, fext="pth", pattern=None): """ :param fdir: 모델이 저장된 폴더 :param fname: iteration 번호를 제외한 model 이름 :param fext: 확장자 :param pattern: model 이름 패턴. 기본적으로는 ".*{}-(\d+).{}$".format(join(fdir, fname, fext)) 형태 :return: 해당 파일이 있으면 iteration 인덱스를 줌, 없으면 None """ # 경로 없으면 None if not exist(fdir): return None # 폴더인지 여부 if not is_dir(fdir): raise ValueError("pc6.util_pt.saver.latest_idx: path ({}) not dir".format(fdir)) # 인덱스 찾아서 리턴 files, idxs = ls_model(fdir, fname, fext, pattern=pattern) if len(idxs) == 0: return None else: return idxs[-1]
def create_spdx_document(self): """ Write identifier scan results as SPDX Tag/value or RDF. """ logging.basicConfig(level=logging.INFO) logging.info("Creating spdx document") self.get_output_file() self.spdx_document = Document( version=Version(2, 1), data_license=License.from_identifier( self.code_extra_params["lic_identifier"] ), ) self.set_creation_info() if isdir(self.path_or_file): input_path = self.path_or_file else: input_path = dirname(self.path_or_file) package = self.spdx_document.package = Package( download_location=NoAssert(), version=self.get_package_version() ) self.set_package_info(package) all_files_have_no_license = True all_files_have_no_copyright = True file_license_list = [] file_license_ids = [] if utils.is_dir(self.path_or_file): for idx, file_data in enumerate(self.id_scan_results): file_data_instance = open(file_data["FileName"], "r") if not utils.should_skip_file( file_data["FileName"], self.output_file_name ): name = file_data["FileName"].replace(self.path_or_file, ".") file_entry = File( name=name, chk_sum=Algorithm( "SHA1", utils.get_file_hash(file_data["FileName"]) or "" ), ) spdx_license = None if self.doc_type == utils.TAG_VALUE: spdx_license = License.from_identifier(file_data["SPDXID"]) else: licenseref_id = "SPDXID-Doc-Generator-" + file_data["SPDXID"] file_license_ids.append(licenseref_id) if licenseref_id in file_license_ids: spdx_license = ExtractedLicense(licenseref_id) spdx_license.name = NoAssert() comment = "N/A" spdx_license.comment = comment text = NoAssert() if not text: text = comment spdx_license.text = text self.spdx_document.add_extr_lic(spdx_license) package.add_lics_from_file(spdx_license) file_entry.add_lics(spdx_license) file_license_list.append(spdx_license) file_entry.conc_lics = NoAssert() file_entry.copyright = SPDXNone() file_entry.spdx_id = self.code_extra_params["file_ref"].format( idx + 1 ) package.add_file(file_entry) if self.doc_type == utils.TAG_VALUE: for spdx_license in list(set(file_license_list)): package.add_lics_from_file(spdx_license) if len(package.files) == 0: if self.doc_type == utils.TAG_VALUE: self.output_file.write( "# No results for package '{}'.\n".format(package.name) ) else: self.output_file.write( "<!-- No results for package '{}'. -->\n".format(package.name) ) if self.doc_type == utils.TAG_VALUE: from spdx.writers.tagvalue import write_document # NOQA else: from spdx.writers.rdf import write_document # NOQA if package.files: spdx_output = io.StringIO() if self.doc_type == utils.TAG_VALUE: write_document(self.spdx_document, spdx_output, validate=False) logging.info("SPDX Tag-Value Document created successfully.") else: # spdx_output = io.BytesIO() write_document(self.spdx_document, spdx_output, validate=False) logging.info("SPDX RDF Document created successfully.") result = spdx_output.getvalue() if self.doc_type == utils.TAG_VALUE: result = result.encode("utf-8") self.output_file.write(result)