def add_path(self, *args): ''' Add path(s) to %PATH% system variable. Uses os module for current session. Permanent key change requires reboot. ''' val = self.keys.environment for path in args: if isinstance(path, list): for i in path: System.add_path(i) if isinstance(path, str): newEnvPath = '' if os.path.exists(path): with reg.ConnectRegistry(None, LOCAL_MACHINE) as x: with reg.OpenKey(x, val, 0, ALL_ACCESS) as pathKey: Env = reg.QueryValueEx(pathKey,"path")[0] if path not in Env and path.upper() not in Env: newEnvPath = re.sub(';;',\ ';',\ Env + ';' + path) if newEnvPath: os.environ['path'] = newEnvPath reg.SetValueEx(pathKey,\ 'path',\ 0,\ reg.REG_EXPAND_SZ,\ newEnvPath) Env = reg.QueryValueEx(pathKey,"path")[0] if path in Env: pass else: warn('Failed to add %s to %%PATH%%'%path) else: pass
def export_notecard(request, pk): # calculate path notefile_Name = Notefile.objects.filter(author=request.user).get(pk=pk) path = getPath(request, notefile_Name.directory) + notefile_Name.name + "/" if request.method == 'POST': form = ImportForm(request.POST) if form.is_valid(): #Get full path. cd = form.cleaned_data path = cd.get('path') path = path.upper() try: create_file(pk, path) return redirect('notecard_list', pk=pk) except: messages.info(request, 'The path you have entered is not valid.') else: form = ImportForm() return render(request, 'srs/export_notecard.html', { 'form': form, 'pk': pk, 'path': path })
def add_keyword(self, path, line, unique=True): """Adds a keyword to section.""" parts = path.upper().split('/', 1) candidates = [s for s in self.subsections if s.name == parts[0]] if len(candidates) == 0: s = InputSection(name=parts[0]) self.subsections.append(s) candidates = [s] elif len(candidates) != 1: raise Exception('Multiple %s sections found ' % parts[0]) key = line.split()[0].upper() if len(parts) > 1: candidates[0].add_keyword(parts[1], line, unique) elif key == '_SECTION_PARAMETERS_': if candidates[0].params is not None: msg = 'Section parameter of section %s already set' % parts[0] raise Exception(msg) candidates[0].params = line.split(' ', 1)[1].strip() else: old_keys = [k.split()[0].upper() for k in candidates[0].keywords] if unique and key in old_keys: msg = 'Keyword %s already present in section %s' raise Exception(msg % (key, parts[0])) candidates[0].keywords.append(line)
def test_reconstruct_name(self) -> None: """test FileCache.reconstruct_name""" names = ["test", "test/42/9", "äöü|<", "\n\n\n"] with FileCache(Directory("tests/embedding", compiler), "tmp") as cache: for name in names: path = cache.path(name) self.assertEqual(cache.reconstruct_name(path.lower()), name) self.assertEqual(cache.reconstruct_name(path.upper()), name)
def test_init_descr_help(self): p = plugin.FFPlugin('mod3_action_descr_help', 'test') self.assertIsInstance(p, plugin.FFPlugin) self.assertEqual(p.name, 'mod3_action_descr_help') self.assertEqual(p.type, 'test') self.assertIsInstance(p.action, types.FunctionType) path = 'asd' self.assertEqual(p.action('mod3_action_descr_help', '', path), path.upper()) self.assertEqual(p.descr, 'short descr') self.assertEqual(p.help, 'some help') self.assertIsNone(p.argument)
def get_subsection(self, path): """Finds a subsection""" parts = path.upper().split('/', 1) candidates = [s for s in self.subsections if s.name == parts[0]] if len(candidates) > 1: raise Exception('Multiple %s sections found ' % parts[0]) if len(candidates) == 0: return None if len(parts) == 1: return candidates[0] return candidates[0].get_subsection(parts[1])
def get_subsection(self, path): '''Finds a subsection''' parts = path.upper().split('/', 1) candidates = [s for s in self.subsections if s.name == parts[0]] if(len(candidates) > 1): raise(Exception('Multiple %s sections found ' % parts[0])) if(len(candidates) == 0): return(None) if(len(parts) == 1): return(candidates[0]) return(candidates[0].get_subsection(parts[1]))
def get_subsection(self, path): """Finds a subsection""" parts = path.upper().split('/', 1) candidates = [s for s in self.subsections if s.name == parts[0]] if len(candidates) > 1: raise Exception('Multiple %s sections found ' % parts[0]) if len(candidates) == 0: s = InputSection(name=parts[0]) self.subsections.append(s) candidates = [s] if len(parts) == 1: return candidates[0] return candidates[0].get_subsection(parts[1])
def _override_value(page, key): metadata = copy(page.metadata) # Check if there is a setting that overrides paths for this directory # If there is, we rely on the setting to build the save path. if f'PAGE_{path.upper()}_{key.upper()}' in page.settings: metadata['slug'] = page.slug infix = path.upper() + '_' else: # We override the slug to include the path up to the filename metadata['slug'] = os.path.join(path, page.slug) # We have to account for non-default language and format either, # e.g., PAGE_SAVE_AS or PAGE_LANG_SAVE_AS infix = '' if in_default_lang(page) else 'LANG_' return page.settings['PAGE_' + infix + key.upper()].format(**metadata)
def _run_map(self): if "executable_path" not in self._config: path: str = tkinter.filedialog.askopenfilename( initialdir=self._blood_path, title="Path to Blood Executable", filetypes=(("Executable Files", "*.EXE"), ), ) if not path: return if not path.upper().endswith(".MAP"): path += ".MAP" self._config["executable_path"] = path raise NotImplementedError()
def __init__(self, imagedir=CHECKENV, mapdir=CHECKENV, fontdir=CHECKENV, zipFile=None): l = locals() for path in ("image", "map", "font"): if l[path + "dir"] == CHECKENV: checkPath = "PGAME_" + path.upper() + "S" setattr(self, "%sPath" % path, os.environ[checkPath]) else: setattr(self, "%sPath" % path, l[path + "dir"]) if zipFile: try: Resources.zipFile = zipfile.ZipFile(zipFile, "rb") except RuntimeError: return for item in self.zipFile.infolist(): self.zipDir[item.filename] = item
def process_file(self, path): if path.upper().endswith('.BAK'): return try: im = Image.open(path) except Exception as err: print 'Exception when loading file ' + path +':' print '"' + str(err) + '"\n' return # soubor nejspis neni obrazek print 'File %s loaded\n' % path # neni-li nastaven priznak resize_smaller, obrazky mensi nez je novy # rozmer neupravuji if ((self.dimension == 'w' and im.size[0] <= self.size) \ or (self.dimension == 'h' and im.size[1] <= self.size) \ or (self.dimension == 'p' and self.size >= 100)) \ and self.resize_smaller == False: print '\tFile is too small, skipping\n' return bak_path = path + '.BAK' if os.path.exists(bak_path): os.remove(bak_path) shutil.copyfile(path, bak_path) print '\tBackup created\n' print '\tOriginal size = ' + str(im.size) + ' px\n' if self.dimension != 'r': im = self.resize_image_base(im) else: im = self.resize_image_with_ratio(im) im.save(path) print '\tFile updated\n'
def parsePath(self, path): path = path.upper() format = types.getFileNameFormat(path) if format in [types.FILE_FORMAT_PATH_ABSOLUTE, types.FILE_FORMAT_DF_CURRENT, types.FILE_FORMAT_DF_PARENT]: absPath = path elif format in [types.FILE_FORMAT_ADF_ID, types.FILE_FORMAT_ADF_NAME]: absPath = types.addToPath("/", path) elif path == '/': absPath = "/" elif format in [types.FILE_FORMAT_NAME, types.FILE_FORMAT_ID]: head = path.split("/")[0] tail = "/".join(path.split("/")[1:]) fid = self.getPathFromRawPath(head) if not fid: return None absPath = types.addToPath(fid, tail) elif format == types.FILE_FORMAT_UNKNOWN: return None else: raise Exception("Format:%d not expected" %format) return absPath
def parsePath(self, path): path = path.upper() format = types.getFileNameFormat(path) if format in [ types.FILE_FORMAT_PATH_ABSOLUTE, types.FILE_FORMAT_DF_CURRENT, types.FILE_FORMAT_DF_PARENT ]: absPath = path elif format in [types.FILE_FORMAT_ADF_ID, types.FILE_FORMAT_ADF_NAME]: absPath = types.addToPath("/", path) elif path == '/': absPath = "/" elif format in [types.FILE_FORMAT_NAME, types.FILE_FORMAT_ID]: head = path.split("/")[0] tail = "/".join(path.split("/")[1:]) fid = self.getPathFromRawPath(head) if not fid: return None absPath = types.addToPath(fid, tail) elif format == types.FILE_FORMAT_UNKNOWN: return None else: raise Exception("Format:%d not expected" % format) return absPath
#define sk_%_dup(st) SKM_sk_dup(%, st) #define sk_%_pop_free(st, free_func) SKM_sk_pop_free(%, (st), (free_func)) #define sk_%_shift(st) SKM_sk_shift(%, (st)) #define sk_%_pop(st) SKM_sk_pop(%, (st)) #define sk_%_sort(st) SKM_sk_sort(%, (st)) #define sk_%_is_sorted(st) SKM_sk_is_sorted(%, (st)) ''' if len(sys.argv) < 2: sys.exit("Usage: %s source.c [source.c ...]" % sys.argv[0]) splitter = re.compile("[() \t]+").split token = None for line in fileinput.input(): if token is None: path = fileinput.filename().split(os.path.sep) path = os.path.join(path[-2], path[-1]) if len(path) > 1 else path[-1] token = "".join(c if c.isalnum() else "_" for c in path.upper()) sys.stdout.write(header.replace("%", token)) if "DECLARE_STACK_OF" in line: words = splitter(line) if len(words) > 1 and words[0] == "DECLARE_STACK_OF": sys.stdout.write(template.replace("%", words[1])) if token is not None: sys.stdout.write(footer.replace("%", token))
def create_data_df(self, root_dir): data = { 'sentence_id': [], 'token_id': [], 'char_start_id' : [], 'char_end_id': [], 'split': [], } id2word = [None] def add_row(sentence_id, token_id, char_start_id, char_end_id, split): data['sentence_id'].append(sentence_id) data['token_id'].append(token_id) data['char_start_id'].append(int(char_start_id)) data['char_end_id'].append(int(char_end_id)-1) data['split'].append(split) i = 0 for filename in glob.iglob(root_dir + "**/**", recursive=True): if '.xml' not in filename: continue filename_split = [path.upper() for path in filename.split('/')] split = None for path in filename_split: if path == 'TRAIN' or path == 'TEST': split = path break if split is None: raise Exception('Data folder is not structured in Train and Test folders') tree = ET.parse(filename) root = tree.getroot() for sentence in root: sentence_id = sentence.attrib['id'] sentence_text = sentence.attrib['text'] tokens = DataLoader.create_tokens_offs(sentence_text) for t in tokens: if t[0].lower() not in id2word: id2word.append(t[0].lower()) char_start_id = t[1][0] char_end_id = t[1][1] token_id = id2word.index(t[0].lower()) add_row(sentence_id, token_id, char_start_id, char_end_id, split) i += 1 if i % 1000 == 0: print(f'Finished with {i} sentences') print("Ready to extract data") df = pd.DataFrame.from_dict(data) print("Doing split") test_df = df[df['split']=="TEST"] train_and_val_df = df[df['split'] == 'TRAIN'] sent_unique = train_and_val_df['sentence_id'].unique() # the bolow lines make sure that one sentence is not # split beetween val and train dataframes distribution_count = round(len(sent_unique)*0.2) val_sent_ids = sent_unique[:distribution_count] pd.options.mode.chained_assignment = None train_and_val_df.loc[train_and_val_df['sentence_id'].isin(val_sent_ids), 'split'] = 'VAL' result_df = pd.concat([test_df, train_and_val_df]) print("Split data in train, test, val is done.") ids_train = list(result_df.loc[result_df['split'] == 'TRAIN', 'token_id'].unique()) # list of all ids vocab = [None] + [id2word[i] for i in ids_train] # print(len(ids_train)) return result_df, id2word, vocab
def PathToIncludeGuard(path): return re.sub(r'[/.]', '_', path.upper()) + '_'
def test_init_descr_help_callable(self): p = plugin.FFPlugin('mod4_action_descr_help_callable', 'test') self.assertIsInstance(p, plugin.FFPlugin) self.assertEqual(p.name, 'mod4_action_descr_help_callable') self.assertEqual(p.type, 'test') self.assertIsInstance(p.action, types.FunctionType) path = 'asd' self.assertEqual(p.action('mod4_action_descr_help_callable', '', path), 'mod4_action_descr_help_callable'.upper() + '!!' + path.upper()) self.assertEqual(p.descr, 'short descr for mod4_action_descr_help_callable') self.assertEqual(p.help, 'some help for mod4_action_descr_help_callable') self.assertIsNone(p.argument)
def test_run(self): path = 'asd' arg = 'ARG' p = plugin.FFPlugin('mod4_action_descr_help_callable', 'test', argument=arg) self.assertIsInstance(p, plugin.FFPlugin) result = 'mod4_action_descr_help_callable'.upper() + '!' + arg.lower() + '!' + path.upper() self.assertEqual(p.run(path), result)