def compile_tool(): global tool path('tools/' + tool).remove() proc = Process(target=monitor) proc.start() os.system('bjam gcc debug -a %s.o' % tool) proc.terminate()
def test_bes(filename, equation_count, term_size = 2): txtfile = filename + '.txt' besfile = filename + '.bes' answerfile = 'temp.answer' p = make_bes(equation_count, term_size) path(txtfile).write_text('%s' % p) os.system('txt2bes %s %s' % (txtfile, besfile))
def add_tool_dir(toolname): if mcrl2_tool_options.tooldir != '': guess = path(mcrl2_tool_options.tooldir) / toolname / toolname if os.path.isfile(guess): return guess return path(mcrl2_tool_options.tooldir) / 'bin' / toolname return toolname
def remove_temporary_files(): for filename in mcrl2_tool_options.files_to_be_removed.keys(): if path(filename).exists(): if mcrl2_tool_options.verbose: print 'removing', filename path(filename).remove() mcrl2_tool_options.files_to_be_removed.clear()
def compile_tools(): toolsdir = path('tools') if not toolsdir.exists(): toolsdir.mkdir() global tool for file in path('../include/mcrl2/pbes/tools').files('*.h'): tool = file.namebase compile_tool()
def compile_tools(): toolsdir = path("tools") if not toolsdir.exists(): toolsdir.mkdir() global tool for file in path("../source").files("*.cpp"): tool = file.namebase compile_tool()
def mem_usage(process_name): global tool stdout, stderr, returncode = run(["top", "-n1", "-b"]) lines = stdout.split("\n") for line in lines: if re.search(process_name, line) != None and len(line) >= 20: print line path("tools/" + tool).write_text(line + "\n", append=True)
def mem_usage(process_name): global tool stdout, stderr, returncode = run(['top', '-n1', '-b']) lines = stdout.split('\n') for line in lines: if re.search(process_name, line) != None and len(line) >= 20: print line path('tools/' + tool).write_text(line + '\n', append = True)
def run_examples(exampledir): for dir in path(exampledir).dirs(): os.chdir(dir) if path('myrun.py').exists(): os.system('echo ---------------------------------------------------------------------------') os.system('echo --- {} ---'.format(dir)) os.system('echo ---------------------------------------------------------------------------') os.system('python myrun.py') os.chdir(cwd)
def save_counter_example(self): txtfile = '%s_counter_example_%d.txt' % (self.name, self.counter) pbesfile = '%s_counter_example_%d.pbes' % (self.name, self.counter) path(txtfile).write_text(str(self.pbes)) run_txt2pbes(txtfile, pbesfile) text = run_pbespp(pbesfile) path(txtfile).write_text(text) print '--- %s counter example ---' % self.name print text self.counter = self.counter + 1
def make_return_continuous_path(x0,xf,y0,yf,tf,rx,ry,N): x0,xf = xf,x0 y0,yf = yf,y0 def path(x0,xf,rx): f = np.poly1d(poly3(x0,xf,0,tf,rx,rx)) return f(np.linspace(0,tf,N)) path_x = path(x0,xf,rx) path_y = path(y0,yf,ry) p = np.vstack((path_x,path_y)) return p
def test_pbes_solvers(p, filename): txtfile = filename + '.txt' path(txtfile).write_text('%s' % p) pbesfile = filename + '.pbes' run_txt2pbes(txtfile, pbesfile) answer1 = run_pbes2bool(pbesfile) answer2 = run_pbespgsolve(pbesfile) print filename, answer1, answer2 if answer1 == None or answer2 == None: return True return answer1 == answer2
def test_pbesstategraph(p, filename): txtfile = filename + '.txt' path(txtfile).write_text('%s' % p) testfile = '../random/tests/pbesstategraph.yml' inputfiles = [txtfile] reporterrors = True settings = dict() settings['verbose'] = False settings['toolpath'] = '../../tools/bin' result = run_replay(testfile, inputfiles, reporterrors, settings) print filename, result return result
def make_return_continuous_path(x0, xf, y0, yf, tf, rx, ry, N): x0, xf = xf, x0 y0, yf = yf, y0 def path(x0, xf, rx): f = np.poly1d(poly3(x0, xf, 0, tf, rx, rx)) return f(np.linspace(0, tf, N)) path_x = path(x0, xf, rx) path_y = path(y0, yf, ry) p = np.vstack((path_x, path_y)) return p
def test_pbesabsinthe(p, filename, strategy): txtfile = filename + '.txt' path(txtfile).write_text('%s' % p) pbesfile1 = filename + 'a.pbes' pbesfile2 = filename + 'b.pbes' run_txt2pbes(txtfile, pbesfile1) run_pbesabsinthe(pbesfile1, pbesfile2, strategy) answer1 = run_pbes2bool(pbesfile1) answer2 = run_pbes2bool(pbesfile2) print filename, answer1, answer2 if answer1 == None or answer2 == None: return True return answer1 == answer2
def test_alphabet(p, filename): txtfile = filename + ".mcrl2" path(txtfile).write_text("%s" % p) lpsfile1 = filename + "a.lps" lpsfile2 = filename + "b.lps" ltsfile1 = filename + "a.lts" ltsfile2 = filename + "b.lts" run_mcrl22lps(txtfile, lpsfile1) run_mcrl22lps(txtfile, lpsfile2, "--no-alpha") run_lps2lts(lpsfile1, ltsfile1) run_lps2lts(lpsfile2, ltsfile2) answer_text = run_ltscompare(ltsfile1, ltsfile2, "-ebisim") answer = answer_text.find("LTSs are strongly bisimilar") != -1 return answer
def test_pbesinst(p, filename): txtfile = filename + ".txt" path(txtfile).write_text("%s" % p) pbesfile = filename + ".pbes" besfile = filename + ".bes" run_txt2pbes(txtfile, pbesfile) if run_pbesinst(pbesfile, besfile, strategy="lazy"): answer1 = run_pbes2bool(pbesfile) answer2 = run_bessolve(besfile) print filename, answer1, answer2 if answer1 == None or answer2 == None: return True return answer1 == answer2 return True
def test_pbesinst_finite(p, filename): txtfile = filename + '.txt' path(txtfile).write_text('%s' % p) pbesfile1 = filename + '.pbes' pbesfile2 = filename + 'a.pbes' run_txt2pbes(txtfile, pbesfile1) if run_pbesinst(pbesfile1, pbesfile2, strategy = 'finite', selection = '*(*:Bool)'): answer1 = run_pbes2bool(pbesfile1) answer2 = run_pbes2bool(pbesfile2) print filename, answer1, answer2 if answer1 == None or answer2 == None: return True return answer1 == answer2 return True
def test_pbesrewr1(p, filename, pbes_rewriter): txtfile = filename + '.txt' path(txtfile).write_text('%s' % p) pbesfile1 = filename + '1.pbes' pbesfile2 = filename + '2.pbes' run_txt2pbes(txtfile, pbesfile1) run_pbesrewr(pbesfile1, pbesfile2, pbes_rewriter) run_txt2pbes(txtfile, pbesfile1) answer1 = run_pbes2bool(pbesfile1) answer2 = run_pbes2bool(pbesfile2) print filename, pbes_rewriter, answer1, answer2 if answer1 == None or answer2 == None: return True return answer1 == answer2
def make_report(): text = "" for file in sorted(path("tools").files("*")): if file.endswith(".txt"): continue mem = [] time = [] for line in file.lines(): items = line.split() mem.append(items[4]) time.append(items[10]) mem = sorted(mem, key=memunit) text = text + "%-20s time = %7s memory = %s\n" % (file.namebase, time[-1], mem[-1]) path("tools/report.txt").write_text(text) print text
def test_pbesabstract(p, filename, abstraction_value): txtfile = filename + '.txt' path(txtfile).write_text('%s' % p) pbesfile1 = filename + 'a.pbes' pbesfile2 = filename + 'b.pbes' run_txt2pbes(txtfile, pbesfile1) run_pbesabstract(pbesfile1, pbesfile2, abstraction_value) answer1 = run_pbes2bool(pbesfile1) answer2 = run_pbes2bool(pbesfile2) print filename, answer1, answer2, 'abstraction value =', abstraction_value if answer1 == None or answer2 == None: return True if abstraction_value != answer2: return answer1 == answer2 return True
def make_report(): text = '' for file in sorted(path('tools').files('*')): if file.endswith('.txt'): continue mem = [] time = [] for line in file.lines(): items = line.split() mem.append(items[4]) time.append(items[10]) mem = sorted(mem, key=memunit) text = text + '%-15s time = %7s memory = %s\n' % (file.namebase, time[-1], mem[-1]) path('tools/report.txt').write_text(text) print text
def test_lps2pbes_structured(p, filename): mcrl2file = filename + '.mcrl2' path(mcrl2file).write_text('%s' % p) lpsfile = filename + '.lps' ltsfile = filename + '.lts' mcffile = 'nodeadlock.mcf' pbesfile = filename + '.pbes' path(mcffile).write_text('[true*]<true>true') add_temporary_files(mcffile) run_mcrl22lps(mcrl2file, lpsfile, '--no-alpha') text = run_lps2lts(lpsfile, ltsfile, '-D') answer1 = text.find('deadlock-detect: deadlock found') != -1 run_lpspbes(lpsfile, mcffile, pbesfile, '--structured') answer2 = not run_pbes2bool(pbesfile) print answer1, answer2 return answer1 == answer2
def configure(self, noconfigure): target = self.dir / "etc" / "grid" / "config.xml" if noconfigure: log.warn("Target %s already exists, skipping.", target) self.configure_ports() return # Early exit! if not self.cfg.exists(): log.info("%s not found. Copying old files", self.cfg) from path import path old_grid = path(self.args.sym) / "etc" / "grid" old_cfg = old_grid / "config.xml" if os.path.samefile(old_cfg, target): # This likely is caused by the symlink being # created early on an initial install. pass else: old_cfg.copy(target) else: self.cfg.copy(target) # TODO: Unneeded if copy old? self.run(["config", "set", "omero.web.server_list", self.args.web]) log.debug('Configuring JVM memory') templates = self.dir / "etc" / "grid" / "templates.xml" for line in fileinput.input([templates], inplace=True): line = line.replace("Xmx512M", self.args.mem) line = line.replace("Xmx256M", self.args.mem) print line, self.configure_ports()
def read_disk(self, pathlist, depth, callback_dir=None): """ to read the DirTree from the disk. """ # creation of the root ElementTree: self.et = ET.Element(TAG_DIRTREE) #print self.et.tag if pathlist: self.rootpath = path(pathlist[0]) level = pathlist[1] # name attribute = rootpath self.et.set(ATTR_NAME, self.rootpath) # time attribute = time of scan self.et.set(ATTR_TIME, str(time.time())) #e = ET.SubElement(self.et, TAG_DIR) #e.set(ATTR_NAME, self.rootpath) try: self._scan_dir(self.rootpath, level, depth, self.et, callback_dir) if self.et.get(ATTR_NAME) == "": #print self.tree.getroot() #self.et=ET.Element("a") pass except Exception as e: print(" Error : unable to scan the directory %s %s " % (self.rootpath, e)) raise
def pathdict(self, relpath): """ to create a dictionary which indexes all objects by their paths. """ self.dict = collections.OrderedDict() # self.dict = {} self._pathdict_dir(path(relpath), self.et)
def __init__(self, _pop_count=0, _n=0, _height=0, _width=0): self.paths = [] #self.paths = [path(_n, _height, _width) for _ in range(_pop_count)] for i in range(0, _pop_count): self.insert(path(_n, _height, _width)) for i in range(0, len(self.paths)): print self.paths[i].cost
def add_path(self, path_list, target): try: new_path = path(path_list, target) self.game_state.paths.append(new_path) self.game_state.movable.discard(path_list[0]) if self.check_path(new_path): self.step_path(new_path) except: pass
def add_path(self, path_list, target): try: new_path = path(path_list, target) self.game_state.paths.append(new_path) if self.check_path(new_path): #self.logger.debug("Add paths %s", new_path) self.step_path(new_path) except: self.logger.debug("Add paths error: print_exc(): %s", traceback.format_exc())
def __init__(self, rootpath="", isStop=None): """ DirTree constructor. """ self.rootpath = path(rootpath) if isStop == None: self.isStop = threading.Event() else: self.isStop = isStop
def test_bessolve(p, filename): txtfile = filename + '.txt' path(txtfile).write_text('%s' % p) besfile = filename + '.bes' run_txt2bes(txtfile, besfile) # bessolve gauss answer1 = run_bessolve(besfile, strategy = 'gauss') # bessolve spm answer2 = run_bessolve(besfile, strategy = 'spm') # pbes2bool answer3 = run_pbes2bool(besfile) print filename, answer1, answer2, answer3 if answer1 == None or answer2 == None or answer3 == None: return True return answer1 == answer2 and answer1 == answer3
def make_tool_jamfile(tool): print '---', tool d = path('d:/mcrl2/tools') / tool jamfile = d / 'Jamfile.v2' files = d.files('*.cpp') sources = [x.basename() for x in files] text = JAMFILE text = re.sub('<TOOL>', tool, text) text = re.sub('<SOURCES>', '\n '.join(sources), text) jamfile.write_text(text)
def test_symbolic_exploration(p, filename): txtfile = filename + '.txt' path(txtfile).write_text('%s' % p) pbesfile1 = filename + '.pbes' pbesfile2 = filename + '_optimized_clustered.pbes' pbesfile3 = filename + '_optimized_not_clustered.pbes' pbesfile4 = filename + '_not_optimized_clustered.pbes' pbesfile5 = filename + '_not_optimized_not_clustered.pbes' run_txt2pbes(txtfile, pbesfile1) run_symbolic_exploration(pbesfile1, pbesfile2, '-o1 -c') run_symbolic_exploration(pbesfile1, pbesfile3, '-o1') run_symbolic_exploration(pbesfile1, pbesfile4, '-o0 -c') run_symbolic_exploration(pbesfile1, pbesfile5, '-o0') answer1 = run_pbes2bool(pbesfile1) answer2 = run_pbes2bool(pbesfile2) answer3 = run_pbes2bool(pbesfile3) answer4 = run_pbes2bool(pbesfile4) answer5 = run_pbes2bool(pbesfile5) answers = [answer1, answer2, answer3, answer4, answer5] print filename, answer1, answer2, answer3, answer4, answer5 return not (True in answers and False in answers)
def read_recovery_file(self, filename): """ Creates a directory tree from the top level recovery file, reads the directory depth and scans the other recovery files """ dirlist = [] try: tree = ET.parse(filename) root = tree.getroot() rootpath = path(root.get(ATTR_NAME)) depth = int(path(root.get('depth', '2'))) #print rootpath while depth > 1: try: level = 0 if len(root) > 0: for child in root.iter('file'): #print child.text f = path(child.text) g = os.path.join(rootpath, f) #recfilelist.append(g) tree1 = ET.parse(g) root1 = tree1.getroot() rootpath1 = path(root1.get(ATTR_NAME)) #print rootpath1 dirlist.append((rootpath1, level)) level += 1 depth -= 1 else: dirlist.append( (rootpath, 0)) # catch no folders in root except IOError: # a recovery file may be missing msg = "Missing recovery file %s - defaulting to full scan" % f raise IOError(msg) return dirlist except Exception as e: raise (e)
def call(converter_id, filename, extra_args=[]): format_params = [filename] + extra_args converter = converters.get(converter_id, None) if converter: command = converter.command tmp_dir = tempfile.mkdtemp(prefix="tmp") try: virtualenv_aware_path = os.path.dirname( sys.executable)+':'+os.environ.get('PATH') response = subprocess.check_output( command.format(*format_params), stderr=subprocess.STDOUT, cwd=tmp_dir, env=dict(os.environ, SCRIPTS=path(os.getcwd()) / 'bin', LIB=path(os.getcwd()) / 'lib', TMPDIR = tmp_dir, TEMP = tmp_dir, TMP = tmp_dir, PATH=virtualenv_aware_path), shell=True) return response except subprocess.CalledProcessError as exp: cexp = ConversionError() cexp.output = exp.output message = ('[CONVERSION ERROR]\n' 'converter id: %s\n' 'output: %s') try: exp.output.decode('ascii') except UnicodeDecodeError: conversion_log.warning(message %(converter_id, '[unable to display]\n') ) else: conversion_log.warning(message %(converter_id, exp.output)) raise cexp finally: path(tmp_dir).rmtree() else: raise NotImplementedError
def process_dir(year, author, dir, ext): for file in path(dir).walkfiles(ext): old_text = file.text() assert file.startswith(dir) filename = re.sub(r"\\", r"/", file[len(dir) + 1 :]) print filename text = old_text text = remove_license(text) text = replace_include_guards(text, file) text = add_license(text, filename, year, author) if text != old_text: file.write_text(text)
def solve(self, delay: float = None): paths = deque([path(self.start)]) while paths: cur = paths.popleft() case = cur.move(self) if delay is not None: print('\n'*10) self.show(cur) time.sleep(delay) if case == 'solved': self.solution = cur break elif case == 'fork': unchecked = [i for i in self.validDirections(cur.end) if cur.end.move(i) != cur.prev] paths.extend([cur + i for i in unchecked])
def parse_mcrl2_syntax(filename): result = [] text = path(filename).text() sections = map(string.strip, text.split(r'//---', re.S | re.M))[1:] for section in sections: paragraphs = re.split('\n\s*\n', section) title = paragraphs[0].strip() # skip the identifiers and whitespace sections if title in ['Identifiers', 'Whitespace']: continue productions = paragraphs[1:] result.append((title, map(parse_production, productions))) return result
def resetbyRegexp(expr): """ Re-initialisation of sent files logged in the XML file using a regexp pattern Consult the python howto regexp for the syntax Examples: .* : all files .*\.txt$ : all files with extension ".txt" monrep\\.* : all files containing the phrase monrep """ def _checkresetregexTags(DRef): DRef.pathdict(directory) NbFile=0 NbReinitFile=0 pattern=re.compile(expr) for myfile in DRef.dict: if(DRef.dict[myfile].tag == xfl.TAG_FILE): NbFile+=1 res=pattern.match(str(myfile)) if res!=None: NbFile+=1 NbReinitFile+=1 print (" %s" % myfile) for attr in (ATTR_LASTSEND, ATTR_NBSEND): DRef.dict[myfile].set(attr, str(0)) debugprint(u'Initialisation of %d file(s) on %d.' % (NbReinitFile, NbFile)) """ DRef = xfl.DirTree() #ref directory DRef.read_file(XFLFile) DRef.pathdict() """ DRef = xfl.DirTree() if not path(XFLFile).isfile(): return dirlist=DRef.read_recovery_file(XFLFile) dircount=0 sessionFile=XFLFile sessRoot,ext=os.path.splitext(sessionFile) while dircount < len(dirlist): directory=dirlist[dircount][0]# get the directory name for this iteration dirlevel=dirlist[dircount][1]# and the directory level XFLFile1=sessRoot+'_'+str(dircount)+ext # create the session file to index the iteration files filelist=DRef.read_file(XFLFile1) NbReinitFile=_checkresetregexTags(DRef) if NbReinitFile > 0: DRef.write_file(XFLFile) dircount+=1 print (u"Finished scan, %d files re-initialised" %NbreinitFile)
def sanitize(filename): """ Walks the directory recursively and sanitizes filenames that match the given pattern. """ for f in path(".").walkfiles(filename): source = f.text() if re.search("[\t ]+\n", source): print "/".join((f.parent, f.name)) + " has whitespace before a newline" if re.search("[ ]+\t", source): print "/".join((f.parent, f.name)) + " has tabs after spaces" sanitized = re.sub("[\t ]+\n", "\n", source) sanitized = re.sub("([ ]+)\t", "\\1 ", sanitized) f.write_text(sanitized)
def configure(self, _): target = self.dir / "etc" / "grid" / "config.xml" if target.exists(): print "Target %s already exists. Skipping..." % target return # Early exit! if not self.cfg.exists(): print "%s not found. Copying old files" % self.cfg from path import path old_grid = path("OMERO-CURRENT") / "etc" / "grid" old_cfg = old_grid / "config.xml" old_cfg.copy(target) else: self.cfg.copy(target) _(["config", "set", "omero.web.server_list", WEB]) # TODO: Unneeded if copy old? for line in fileinput.input([self.dir / "etc" / "grid" / "templates.xml"], inplace=True): print line.replace("Xmx512M", self.mem).replace("Xmx256M", self.mem),
def read_file(self, filename): """ to read the DirTree from an XML file. """ flist = [] self.tree = ET.parse(filename) self.et = self.tree.getroot() self.rootpath = self.et.get(ATTR_NAME) if "AROWsynchro.xml" in filename: dirs = self.et.findall('file') for f in path(self.rootpath).files("AROWsynchro_*"): found = False for dirt in dirs: if os.path.split(f)[1] == dirt.text: #f= path(child.tag) #g=os.path.join(self.rootpath,f) found = True flist.append(f) if not found: os.remove(f) return flist
def resetbyDate(ResetDate,XFLFile): """ Initialisation within the XML file by date Use file timestamp within the tree structure to easily identify the time of non-transmission """ def _checkresetdateTags(DRef): DRef.pathdict(directory) NbFile=0 NbReinitFile=0 for myfile in DRef.dict: if(DRef.dict[myfile].tag == xfl.TAG_FILE): NbFile+=1 if float(DRef.dict[myfile].get(ATTR_LASTSEND)) > ResetDate: #if float(DRef.dict[myfile].get(bftp.ATTR_LASTSEND)) > ResetDate: NbReinitFile+=1 print (" %s sent earlier than %s \n" % (myfile,ResetDate)) for attr in (ATTR_LASTSEND, ATTR_NBSEND): #for attr in (bftp.ATTR_LASTSEND, bftp.ATTR_NBSEND): DRef.dict[myfile].set(attr, str(0)) debugprint('Initialisation of %d file(s) on %d.' % (NbReinitFile, NbFile)) return NbReinitFile DRef = xfl.DirTree() if not path(XFLFile).isfile(): return dirlist=DRef.read_recovery_file(XFLFile) dircount=0 sessionFile=XFLFile sessRoot,ext=os.path.splitext(sessionFile) while dircount < len(dirlist): directory=dirlist[dircount][0]# get the directory name for this iteration dirlevel=dirlist[dircount][1]# and the directory level XFLFile1=sessRoot+'_'+str(dircount)+ext # create the session file to index the iteration files filelist=DRef.read_file(XFLFile1) NbReinitFile=_checkresetdateTags(DRef) if NbReinitFile > 0: DRef.write_file(XFLFile1) dircount+=1 print (u"Finished scan, %d files re-initialised" %NbReinitFile)
def configure(self, copyold, prestartfile): def samecontents(a, b): # os.path.samefile is not available on Windows try: return os.path.samefile(a, b) except AttributeError: with open(a) as fa: with open(b) as fb: return fa.read() == fb.read() target = self.dir / "etc" / "grid" / "config.xml" if copyold: from path import path old_grid = path(self.args.sym) / "etc" / "grid" old_cfg = old_grid / "config.xml" log.info("Copying old configuration from %s", old_cfg) if not old_cfg.exists(): raise Stop(40, 'config.xml not found') if target.exists() and samecontents(old_cfg, target): # This likely is caused by the symlink being # created early on an initial install. pass else: old_cfg.copy(target) else: if target.exists(): log.info('Deleting configuration file %s', target) target.remove() if prestartfile: for f in prestartfile: log.info('Loading prestart file %s', f) ftype, fpath = fileutils.get_as_local_path(f, 'backup') if ftype != 'file': raise Stop(50, 'Expected file, found: %s %s' % (ftype, f)) self.run(['load', fpath])
rom django.urls import include,path from rest_framework import routers from . import views #router=routers.DefaultRouter() #router.register(r'listassets',views.userViewSet.as_view({'get':'list'}),basename='listassets') urlpatterns=[ #path('',include(router.urls)), path('user/',views.userViewSet.as_view({'get':'list','post':'create'})), path('user/<int:pk>/',views.userViewSet.as_view({'delete':'destroy'})), path('deposit/',views.depositViewSet.as_view({'get':'list','post':'create'})), path('deposit/<int:pk>/',views.depositViewSet.as_view({'delete':'destroy'})), path('withdraw/',views.withdrawViewSet.as_view({'get':'list','post':'create'})), path('withdraw/<int:pk>/',views.withdrawViewSet.as_view({'delete':'destroy'})), path('api-auth/', include('rest_framework.urls' , namespace='rest_framework')), ]
def multiparm_path(node, parm): path_var = path.vm_filename_plane(node, parm) path(node, path_var)
rom django.urls import path from book.views import index urlpatterns = [ path('index/',index) ]
rom django.urls import path import adminapp.views as adminapp app_name = 'adminapp' urlpatterns = [ # url(r'^users/create/$', adminapp.user_create, name='user_create'), # url(r'^users/read/$', adminapp.users, name='users'), # url(r'^users/update/(?P<pk>\d+)/$', adminapp.user_update, name='user_update'), # url(r'^users/delete/(?P<pk>\d+)/$', adminapp.user_delete, name='user_delete'), # url(r'^categories/create/$', adminapp.category_create,name='category_create'), # url(r'^categories/read/$', adminapp.categories, name='categories'), # url(r'^categories/update/(?P<pk>\d+)/$', adminapp.category_update, name='category_update'), # url(r'^categories/delete/(?P<pk>\d+)/$', adminapp.category_delete, name='category_delete'), path('products/create/', adminapp.ProductCreateView.as_view(), name='product_create'), path('products/read/', adminapp.ProductListView.as_view(), name='products'), path('products/read/category/<int:category_pk>/', adminapp.ProductListView.as_view(), name='products_by_category'), path('products/read/<int:pk>/', adminapp.ProductDetailView.as_view(), name='product_read'), path('products/update/<int:pk>/', adminapp.ProductUpdateView.as_view(), name='product_update'), path('products/delete/<int:pk>/', adminapp.ProductDeleteView.as_view(), name='product_delete'), ]
def callback_file_print(file, element): """ sample callback function to print file path. """ print(" - " + file) #--- MAIN --------------------------------------------------------------------- if __name__ == "__main__": if len(sys.argv) < 3: print(__doc__) print(u"usage: python %s <root path> <xml file> [previous xml file]" % path(sys.argv[0]).name) sys.exit(1) d = DirTree() d.read_disk(sys.argv[1], callback_dir_print, callback_file_print) d.write_file(sys.argv[2]) if len(sys.argv) > 3: d2 = DirTree() d2.read_file(sys.argv[3]) same, different, only1, only2 = compare_DT(d, d2) print("\nSAME:") for f in sorted(same): print(" " + f) print(u"\nDIFFERENT:") for f in sorted(different): print(" " + f) print(u"\nNEW:")
import os from path import * for file in path('../games/fail').walkfiles('*.pdn'): print file os.system('cat "%s" | ../dparser/pdn' % file) for file in path('../games/succeed').walkfiles('*.pdn'): print file os.system('cat "%s" | ../dparser/pdn' % file)
import os from path import * from pdn_reading_tpg import * for file in path('../games').walkfiles('*.pdn'): print file text = path(file).text() pdn_parse(text)
The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from home.views import homepage urlpatterns = [ path('admin/', admin.site.urls), path('', homepage), ]
U I1°_Nã@sLdZddlmZddlmZmZedejjƒededƒƒededƒƒgZd S) approject URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) é)Úadmin)ÚpathÚincludezadmin/zcatalog/zcatalog.urlszblog/z blog.urlsN) Ú__doc__Zdjango.contribrÚdjango.urlsrrÚsiteÚurlsÚurlpatterns©r r ú(/home/azaliya/AK/project/project/urls.pyÚ<module>sý
{#Code Mostly from Code Insitute#} from django.urls import path from . import views urlpatterns = [ path('', views.all_products, name='products'), path('<product_id>', views.product_detail, name='product_detail'), ]
зfrom django.urls import path from mon_app.api.competitor_products_api import api_productcompetitor_id, api_productcompetitor from mon_app.api.my_products_api import api_productmy_id, api_productmy from mon_app.api.match_api import api_match_id, api_match from .views import index, parsing, support urlpatterns = [ path('', index, name='index_url'), path('parsing/', parsing, name='parsing_url'), path('support/', support, name='support_url'), path('api/productcompetitor/<id>', api_productcompetitor_id, name='api_productcompetitor_id_url'), path('api/productcompetitor/', api_productcompetitor, name='api_productcompetitor_url'), path('api/productmy/<id>', api_productmy_id, name='api_productmy_id_url'), path('api/productmy/', api_productmy, name='api_productmy_url'), path('api/match/<id>', api_match_id, name='api_match_id_url'), path('api/match/', api_match, name='api_match_url') ]
sfrom django.urls import path from blog.views import (AboutView, PostListView, PostDetailView, CreatePostView, PostUpdateView, PostDeleteView, DraftListView, add_comment_to_post, comment_approve, comment_remove, post_publish,signupview) urlpatterns = [ path('',PostListView.as_view(),name='post_list'), path('about/',AboutView.as_view(),name='aboutview'), path('post/<int:pk>',PostDetailView.as_view(),name='post_detail'), path('post/new/',CreatePostView.as_view(),name='new_post'), path('post/<int:pk>/edit/',PostUpdateView.as_view(),name='post_edit'), path('post/<int:pk>/remove/',PostDeleteView.as_view(),name='post_remove'), path('drafts/',DraftListView.as_view(),name='post_draft_list'), path('post/<int:pk>/comments/',add_comment_to_post,name='add_comment_to_post'), path('comment/<int:pk>/approve/',comment_approve,name='comment_approve'), path('comment/<int:pk>/remove/',comment_remove,name='comment_remove'), path('post/<int:pk>/publish/',post_publish,name='post_publish'), path('signup/',signupview,name='signup'), ]
TrackerListViewfrom django.urls import path from blog.views import general, category, subcategory, affiliate_program, affiliate_tag, top_money_post, top_money_product app_name = 'blog' urlpatterns = [ path('manage/', general.BlogManage.as_view(), name='manage'), # CATEGORY URLS path('categories/', category.CategoryListView.as_view(), name='category-list'), path('category/<int:pk>/subcategories/', category.CategoryDetailListView.as_view(), name='category-detail-list'), path('category-create/', category.CategoryCreateView.as_view(), name='category-create'), path('category-update/<int:pk>/', category.CategoryUpdateView.as_view(), name='category-update'), path('category-delete/<int:pk>/', category.CategoryDeleteView.as_view(), name='category-delete'), # SUBCATEGORY URLS path('subcategories/', subcategory.SubCategoryListView.as_view(), name='subcategory-list'), path('subcategory/<int:pk>/posts/', subcategory.SubCategoryDetailListView.as_view(), name='subcategory-detail-list'), path('subcategory-create/', subcategory.SubCategoryCreateView.as_view(), name='subcategory-create'), path('subcategory-update/<int:pk>/', subcategory.SubCategoryUpdateView.as_view(), name='subcategory-update'), path('subcategory-delete/<int:pk>/', subcategory.SubCategoryDeleteView.as_view(), name='subcategory-delete'), # AFFILIATE PROGRAM URLS path('affiliate-programs/', affiliate_program.AffiliateProgramListView.as_view(), name='affiliate-program-list'), path('affiliate-program/<int:pk>/', affiliate_program.AffiliateProgramDetailView.as_view(), name='affiliate-program-detail'), path('affiliate-program-create/', affiliate_program.AffiliateProgramCreateView.as_view(), name='affiliate-program-create'), path('affiliate-program-update/<int:pk>/', affiliate_program.AffiliateProgramUpdateView.as_view(), name='affiliate-program-update'), path('affiliate-program-delete/<int:pk>/', affiliate_program.AffiliateProgramDeleteView.as_view(), name='affiliate-program-delete'), # AFFILIATE TAG URLS path('affiliate-tags/', affiliate_tag.AffiliateTagListView.as_view(), name='affiliate-tag-list'), path('affiliate-tag/<int:pk>/', affiliate_tag.AffiliateTagDetailView.as_view(), name='affiliate-tag-detail'),
#!/usr/bin/env python import tf import rospy import roslib import math import numpy as np import time from controller import * from path import * #pull path info and create path p1 = path() #get path path = p1.get_path() #send path to controller to move jorge cont = controller(path, True) #second arg determines whether it gives output
"""step_tours URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ import path as path from django.contrib import admin from django.urls import path from tours.views import MainView, DepartureView, TourView urlpatterns = [ path('', MainView.as_view(), name='main'), path('departure/<str:departure>/', DepartureView.as_view(), name='departure'), path('tour/<int:id>/', TourView.as_view(), name='tour'), path('admin/', admin.site.urls), ]
rom django.urls import path from . import views urlpatterns = [ path('/login/', views.login , name='accounts-login_user_view'), , path('/createaccount/', views.create_account, name='accounts-createaccount') , path() ]