def install(**kwargs): """setup entry point""" if USE_SETUPTOOLS: if '--force-manifest' in sys.argv: sys.argv.remove('--force-manifest') # install-layout option was introduced in 2.5.3-1~exp1 elif sys.version_info < (2, 5, 4) and '--install-layout=deb' in sys.argv: sys.argv.remove('--install-layout=deb') if subpackage_of: package = subpackage_of + '.' + modname kwargs['package_dir'] = {package : '.'} packages = [package] + get_packages(os.getcwd(), package) if USE_SETUPTOOLS: kwargs['namespace_packages'] = [subpackage_of] else: kwargs['package_dir'] = {modname : '.'} packages = [modname] + get_packages(os.getcwd(), modname) if USE_SETUPTOOLS and install_requires: kwargs['install_requires'] = install_requires kwargs['dependency_links'] = dependency_links kwargs['packages'] = packages return setup(name = distname, version = version, license = license, description = description, long_description = long_description, author = author, author_email = author_email, url = web, scripts = ensure_scripts(scripts), data_files = data_files, ext_modules = ext_modules, cmdclass = {'install_lib': MyInstallLib}, **kwargs )
def create_filetree(path=None, depth=0, max_depth=0): tree = None if max_depth == 0 or depth < max_depth: if path is None: path = os.getcwd() tree = dict(name=os.path.basename(path), children=[]) try: lst = os.listdir(path) except OSError: pass # ignore errors else: for name in lst: fn = os.path.join(path, name) if (os.path.isdir(fn) and re.match('^.*(Compiled)$', fn) is None): child = create_filetree(fn, depth + 1, max_depth) if child is not None: tree['children'].append(child) elif re.match('^.*\.(m|def|txt|csv)$', fn) is not None: tree['children'].append(dict(name=fn.replace( os.getcwd() + os.path.sep, ""))) return tree
def SanityCheckDirectory(): if os.getcwd().endswith(SAFE_DIR_SUFFIX): return logging.error('httpd.py should only be run from the %s', SAFE_DIR_SUFFIX) logging.error('directory for testing purposes.') logging.error('We are currently in %s', os.getcwd()) sys.exit(1)
def get_file_list_recursive(extension="*", file_list=[], dir_path=None, recurs_iter=0, recurs_max=8): """ Returns a list of files with a specific extension by analysing directories recursively """ cwd = os.getcwd() if dir_path is not None: os.chdir(dir_path) dir_path_add = os.getcwd() for _, dirs, files in os.walk("./"): if len(dirs) > 0: for dir_recur in dirs: file_list_dir = get_file_list_recursive(extension=extension, file_list=file_list, dir_path=dir_recur, recurs_iter=recurs_iter+1, recurs_max=recurs_max) else: file_list_dir = glob.glob("*.%s" % (extension)) # adding directory to the path for i in range(len(file_list_dir)): file_list_dir[i] = os.path.join(dir_path_add, file_list_dir[i]) # joining two lists, python makes it so easy :) file_list += file_list_dir os.chdir(cwd) return file_list
def download_mango(url, path): """ Function: download_mango(url, path) Usage: download_mango('http://www.mangareader.net/poyopoyo-kansatsu-nikki/1', os.getcwd()) Added in version: 0.1 Beta """ if path != os.getcwd(): pathchange(os.getcwd(), path) urlContent = urllib2.urlopen(url).read() imgUrls = re.findall('img .*?src="(.*?.jpg)"', urlContent) for imgUrl in imgUrls: try: imgData = urllib2.urlopen(imgUrl).read() fileName = basename(urlsplit(imgUrl)[2]) output = open(fileName, 'wb') output.write(imgData) output.close() except IOError: print "File not found or full disk. Try again." sys.exit(1) except KeyboardInterrupt: print "Operation aborted manually." sys.exit(1) except: print "Unknown error. If this persists, contact the author or create a ticket in the bugtracker." sys.exit(1)
def filter_files(kernel, is_relative, exclude, write_files): extra='' for i in exclude: extra += ' -not -path "*' + i + '/*" ' if kernel: cmd="""find {0} \ -path "$PWD/arch/*" ! -path "$PWD/arch/x86*" -prune -o \ -path "$PWD/include/asm-*" ! -path "$PWD/include/asm-i386*" -prune -o \ -path "$PWD/sound*" -prune -o \ -path "$PWD/firmware*" -prune -o \ {1} \ -iname "*.[chxsS]" -print > {2} """.format('.' if is_relative else os.getcwd(), extra, write_files) else: cmd="""find {0} \( \ -name '*.py' \ -o -name '*.php' \ -o -name '*.java' \ -o -iname '*.[ch]' \ -o -name '*.cpp' \ -o -name '*.cc' \ -o -name '*.hpp' \) \ {1} \ > {2} """.format('.' if is_relative else os.getcwd(), extra, write_files) print cmd process = subprocess.Popen(cmd, stdout=subprocess.PIPE,shell=True) out, err = process.communicate() print out
def test_run_multiproc_nondaemon_with_flag(nondaemon_flag): ''' Start a pipe with two nodes using the multiproc plugin and passing the nondaemon_flag. ''' cur_dir = os.getcwd() temp_dir = mkdtemp(prefix='test_engine_') os.chdir(temp_dir) pipe = pe.Workflow(name='pipe') f1 = pe.Node(interface=Function(function=TestInterface.testFunction, input_names=['sum'], output_names=['sum_out']), name='f1') f2 = pe.Node(interface=Function(function=TestInterface.testFunction, input_names=['sum'], output_names=['sum_out']), name='f2') pipe.connect([(f1,f2,[('sum_out','sum')])]) pipe.base_dir = os.getcwd() f1.inputs.sum = 0 # execute the pipe using the MultiProc plugin with 2 processes and the non_daemon flag # to enable child processes which start other multiprocessing jobs execgraph = pipe.run(plugin="MultiProc", plugin_args={'n_procs':2, 'non_daemon':nondaemon_flag}) names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()] node = execgraph.nodes()[names.index('pipe.f2')] result = node.get_output('sum_out') yield assert_equal, result, 180 # n_procs (2) * numberOfThreads (2) * 45 == 180 os.chdir(cur_dir) rmtree(temp_dir)
def paipai(self): self.parent.cam.saveSnapshot(os.getcwd() + '/image/s2.png', timestamp=self.timestamp, boldfont=self.boldfont, quality=self.quality) self.autoCam() img = ImageProcess() img.thumbnails(os.getcwd() + '/image/s2.png', os.getcwd() + '/image/s3.png', 170, 170) self.parent.showimage() self.close()
def create_backup(): cwd = os.getcwd() os.chdir(BASE_DIR) current_date = datetime.utcnow().strftime(FILE_TIME_FORMAT) filename = os.path.join(MEDIA_ROOT, current_date + SQL_FORMAT) backup_name = "Ignite_" + current_date + TAR_FORMAT tar_name = os.path.join(MEDIA_ROOT, BACKUP, backup_name) db_dump(filename) file_obj = tarfile.open(tar_name, "w:gz") logger.debug("cwd = " + os.getcwd()) for name in FILE_LIST: file_obj.add(MEDIA + "/" + name) file_obj.add(MEDIA + "/" + current_date + SQL_FORMAT) file_obj.close() try: os.remove(filename) except: delete_backup([current_date + TAR_FORMAT]) raise IgniteException(ERR_FAILED_TO_REMOVE + filename) resp = {} resp["status"] = "success" resp["filename"] = backup_name os.chdir(cwd) return resp
def download_dir(dsvc,dir_file,depth=0): cwd = os.getcwd() out_root = os.path.join(cwd,dir_file['title']) disp_path = out_root.replace(PROG_ROOT,"") print("ChangeDir to %s" % disp_path) if(not os.path.exists(out_root)): os.makedirs(out_root) os.chdir(out_root) file_list = get_file_meta(dsvc,"'%s' in parents" % dir_file['id']) counter = 1 total = len(file_list) for file in file_list: if(file['mimeType'] == "application/vnd.google-apps.folder"): print("[%s%d/%d] %s (Directory)" % ((depth * "-"),counter,total,file['title'])) download_dir(dsvc,file,depth+1) else: #We have a file. fsmb = float(float(file['fileSize']) / (1024*1024)) print("[%s%d/%d] %s (%.2fMB)" % ((depth * "-"),counter,total,file['title'],fsmb)) download_file(dsvc,file) counter +=1 os.chdir("..") cwd = os.getcwd() disp_path = cwd.replace(PROG_ROOT,"") if(disp_path != ""): print("ChangeDir to %s" % disp_path)
def run(self, args): namespace = self.parser.parse_args(args) # Get absolute paths namespace.pecan_config = os.path.join(os.getcwd(), namespace.pecan_config) namespace.api_paste_config = os.path.join(os.getcwd(), namespace.api_paste_config) namespace.config_file = os.path.join(os.getcwd(), namespace.config_file) # Check conf files exist if not os.path.isfile(namespace.pecan_config): print("Bad config file: %s" % namespace.pecan_config, file=sys.stderr) sys.exit(1) if not os.path.isfile(namespace.api_paste_config): print("Bad config file: %s" % namespace.api_paste_config, file=sys.stderr) sys.exit(2) if not os.path.isfile(namespace.config_file): print("Bad config file: %s" % namespace.config_file, file=sys.stderr) sys.exit(1) app.SurveilCommand().run(namespace)
def unzip_input(file): print "* Unzipping" + file zfile = zipfile.ZipFile(os.getcwd() + "/input/" + file + ".zip") for name in zfile.namelist(): (dirname, filename) = os.path.split(name) zfile.extract(name, os.getcwd() + "/input/") zfile.close()
def prompt_for_solution_stack(module_name=None): solution_stacks = elasticbeanstalk.get_available_solution_stacks() # get list of platforms platforms = [] for stack in solution_stacks: if stack.platform not in platforms: platforms.append(stack.platform) cwd = os.getcwd() # First check to see if we know what language the project is in try: fileoperations._traverse_to_project_root() platform = heuristics.find_language_type() if platform == 'Docker': # Check to see if dockerrun is version one or two dockerrun_file = dockerrun.get_dockerrun( os.path.join(os.getcwd(), 'Dockerrun.aws.json')) if dockerrun_file: if dockerrun_file.get('AWSEBDockerrunVersion') in (1, '1'): platform = 'Docker' else: platform = 'Multi-container Docker' finally: os.chdir(cwd) if platform is not None: io.echo() io.echo(prompts['platform.validate'].replace('{platform}', platform)) correct = io.get_boolean_response() if not platform or not correct: # ask for platform io.echo() io.echo(prompts['platform.prompt'] if not module_name else prompts['platform.prompt.withmodule'].replace('{module_name}', module_name)) platform = utils.prompt_for_item_in_list(platforms) # filter solution_stacks = [x for x in solution_stacks if x.platform == platform] #get Versions versions = [] for stack in solution_stacks: if stack.version not in versions: versions.append(stack.version) #now choose a version (if applicable) if len(versions) > 1: io.echo() io.echo(prompts['sstack.version']) version = utils.prompt_for_item_in_list(versions) else: version = versions[0] return get_latest_solution_stack(version, stack_list=solution_stacks)
def get_extensions(): from distutils.extension import Extension common_extension_args = { 'include_dirs': get_include_dirs(), 'library_dirs': get_library_dirs(), 'libraries': get_libraries()} try: from Cython.Build import cythonize sources = [os.path.join(os.getcwd(), 'pyfftw', 'pyfftw.pyx')] have_cython = True except ImportError as e: # no cython sources = [os.path.join(os.getcwd(), 'pyfftw', 'pyfftw.c')] if not os.path.exists(sources[0]): raise ImportError( str(e) + '. ' + 'Cython is required to build the initial .c file.') have_cython = False ext_modules = [ Extension('pyfftw.pyfftw', sources=sources, **common_extension_args)] if have_cython: return cythonize(ext_modules) else: return ext_modules
def __upload_file(self,ip="",port=36000,username="",password="",file_name=""): """function:upload script file""" try: result=True code =0 msg = "OK" t = paramiko.Transport((ip,port)) t.connect(username=username,password=password) sftp =paramiko.SFTPClient.from_transport(t) # try: sftp.put(os.getcwd()+"/tools/"+file_name,"/tmp/"+file_name) #print "Upload Local File:", except Exception,e: print "Exception:",str(e) print "Try again" time.sleep(5) try: sftp.put(os.getcwd()+"/tools/"+file_name,"/tmp/"+file_name) except Exception,e: msg+=(str(e)) result=False code = 1 msg=str(e)
def verifyArguments(args): # Verify supplied arguments if args.application is None or os.path.exists(args.application) is False: # Before we error, lets verfiy if current parent directory matches # the application they were supplying: if os.path.basename(os.getcwd()) != args.application: print 'You must specify a path to the application you wish to build an SQA documentation for.' sys.exit(1) else: args.application = os.getcwd() else: if os.path.exists(os.path.join(args.application, 'doc/sqa', args.requirements)) is False: print 'I could not find ', os.path.join(os.path.abspath(args.application), 'doc/sqa', args.requirements), 'file.' \ '\nPlease see the directory:', os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '..', 'framework/doc/sqa'), 'for a working example' sys.exit(1) args.application_path = os.path.abspath(args.application) args.requirements_path = os.path.join(args.application_path, 'doc/sqa', args.requirements) args.application_name = os.path.split(args.application_path)[1] # Set the current working directory to this script location # We to do this _after_ discovering application path in case # the user supplied a relative path instead of an absolute # path args.cwd = os.getcwd() os.chdir(os.path.abspath(os.path.dirname(sys.argv[0]))) # Set MOOSE_DIR to parent directory (were sitting # in moose/scripts at the moment) args.moose_dir = os.path.abspath('..') return args
def copy_rails_config_file(directory, filename, flag=0): """ Description: Vars: 1. f_dst: source file name 2. f_src: dst file name 3. flag 1 => copy ".project" 2 => copy "do_heroku.bat" 4. content: content of ".project" 5. fin: file object 6. fout: file object 7. directory: full directory path """ f_dst = os.path.join(os.getcwd(), filename) f_src = os.path.join(os.path.dirname(inspect.currentframe().f_code.co_filename), directory, filename) if flag == 1: fin = open(f_src, "r") # content = f.read() content = fin.read() content = content.replace("@project_name@", os.path.basename(os.getcwd())) # os.path.basename(f_dst)) fout = open(f_dst, "w") fout.write(content) fin.close() fout.close() else: shutil.copyfile(f_src, f_dst) print "File copied" print "\t", "From: %s" % f_src print "\t", "To: %s" % f_dst
def create(username, site_name): import re newsite = "bench new-site " + site_name installapp = "bench --site "+ site_name + " install-app erpnext" # NOTETHIS: check if site_name already exists print "Creating new site.." os.system(newsite) print "Installing erpnext to site.." os.system(installapp) # get database name from site_config.json of new site old_dir = os.getcwd() path = '/home/testuser/frappe-bench/sites/'+ site_name os.chdir(path) print os.getcwd() with open('site_config.json') as f: content = f.readlines() tmp = content[1].strip() tmp = tmp.split(":")[1] db_name = re.sub('\W+','',tmp) os.chdir(old_dir) # add site to database, mapped to owner newsite = User_Sites() newsite.username = username newsite.db_name = db_name newsite.site_name = site_name newsite.save() print "Done!"
def __init__(self, defaults=None, confFile=None, *args, **kwds): """Initialize the parser. *defaults* -- defaults values. *confFile* -- the file (or list of files) to parse.""" ConfigParser.ConfigParser.__init__(self, defaults=defaults) if confFile is None: dotFileName = '.' + confFileName # Current and home directory. confFile = [os.path.join(os.getcwd(), confFileName), os.path.join(os.getcwd(), dotFileName), os.path.join(os.path.expanduser('~'), confFileName), os.path.join(os.path.expanduser('~'), dotFileName)] if os.name == 'posix': sep = getattr(os.path, 'sep', '/') # /etc/ and /etc/conf.d/ confFile.append(os.path.join(sep, 'etc', confFileName)) confFile.append(os.path.join(sep, 'etc', 'conf.d', confFileName)) else: # etc subdirectory of sys.prefix, for non-unix systems. confFile.append(os.path.join(sys.prefix, 'etc', confFileName)) for fname in confFile: try: self.read(fname) except (ConfigParser.MissingSectionHeaderError, ConfigParser.ParsingError), e: _aux_logger.warn('Troubles reading config file: %s' % e) # Stop at the first valid file. if self.has_section('imdbpy'): break
def main(): description = ("Output an LZW compressed binary file and JSON dict" "containing the keycodes for decompression, or decompress a" "previously LZW compressed file.") parser = argparse.ArgumentParser(description=description) subparsers = parser.add_subparsers() parser_comp = subparsers.add_parser("compress") parser_comp.add_argument("--infile", nargs="?", type=argparse.FileType("r"), default=sys.stdin) parser_comp.add_argument("--outfile", nargs="?", type=argparse.FileType("wb"), default=os.path.join(os.getcwd(), "output.bin")) parser_comp.add_argument("--keycode-file", nargs="?", type=argparse.FileType("w"), default=os.path.join(os.getcwd(), "keycodes.json")) parser_comp.set_defaults(func=parse_for_compress) parser_decomp = subparsers.add_parser("decompress") parser_decomp.add_argument("--infile", nargs="?", type=argparse.FileType("rb"), default=os.path.join(os.getcwd(), "output.bin")) parser_decomp.add_argument("--keycode-file", nargs="?", type=argparse.FileType("r"), default=os.path.join(os.getcwd(), "keycodes.json")) parser_decomp.add_argument("--outfile", nargs="?", type=argparse.FileType("w"), default=sys.stdout) parser_decomp.set_defaults(func=parse_for_decompress) args = parser.parse_args() args.func(args) args.infile.close() args.outfile.close() args.keycode_file.close()
def load_XRF_data(self): file = '%s/XRF_Training_Set.xlsx'%os.getcwd() wb = xlrd.open_workbook(filename = 'XRF_Training_Set.xlsx') sh = wb.sheet_by_index(0) row_index = 1 while True: try: XRF_data_list = [] column_index = 1 while True: try: XRF_data_list.append(sh.cell_value(row_index, column_index)) column_index += 1 except: break with open('%s/controlData_AVG_Files/%s Ref Data/XRF_%s.json'%(os.getcwd(), sh.cell_value(row_index, 0), sh.cell_value(row_index, 0)), 'w') as j: j.write(json.dumps(XRF_data_list, sort_keys=True, indent=4, separators=(',', ': '))) with open('%s/controlData_AVG_Files/%s Ref Data/XRF_%s.csv'%(os.getcwd(), sh.cell_value(row_index, 0), sh.cell_value(row_index, 0)), 'w') as c: writer = csv.writer(c) writer.writerow(XRF_data_list) row_index += 1 self.XRF_listLength = len(XRF_data_list) except: break
def display_path(path): """Gives the display value for a given path, making it relative to cwd if possible.""" path = os.path.normcase(os.path.abspath(path)) if path.startswith(os.getcwd() + os.path.sep): path = '.' + path[len(os.getcwd()):] return path
def _quick_savefile(self, PTYPE=IMAGE_EXT): '''Save the current display via PyArt interface.''' imagename = self.display.generate_filename( self.Vfield.value, self.Vtilt.value, ext=IMAGE_EXT) self.canvas.print_figure(os.path.join(os.getcwd(), imagename), dpi=DPI) self.statusbar.showMessage( 'Saved to %s' % os.path.join(os.getcwd(), imagename))
def OnButton_textUserButton(self, evt): '''txt 用户数据库处理''' wildcard = "txt (*.txt)|*.txt" try: Dir=unicode(os.getcwd(), self.textencoding) except: Dir=os.getcwd() #这里的style不能选 wx.CHANGE_DIR,更改后,会引起脚本执行目录的变化 dlg = wx.FileDialog( self, message="Choose a txt file", defaultDir=Dir, defaultFile="", wildcard=wildcard, style=wx.OPEN ) if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() self.mainStatusBar.SetLabel(path) self.userControlPanelString=path dlg.Destroy() self.users=txtUserControl(self.userControlPanelString) if self.users.userDataInit()== userControlErrValue["OK"]: self.Config.setContrlMedia("txt") self.Config.setControlMediaPath(self.userControlPanelString) self.Config.saveServerConfig() else: uiDebug("serverManager userDataInit error") uiDebug("OnButton_textUserButton()")
def OnButton_xmlUserButton(self, evt): '''xml 用户数据库处理''' wildcard = "xml (*.xml)|*.xml" try: Dir=unicode(os.getcwd(), self.textencoding) except: Dir=os.getcwd() dlg = wx.FileDialog( self, message="Choose a xml file", defaultDir=Dir, defaultFile="", wildcard=wildcard, style=wx.OPEN ) if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() self.mainStatusBar.SetLabel(path) self.userControlPanelString=path dlg.Destroy() self.users=xmlUserControl(self.userControlPanelString) if self.users.userDataInit()== userControlErrValue["OK"]: self.Config.setContrlMedia("xml") self.Config.setControlMediaPath(self.userControlPanelString) self.Config.saveServerConfig() else: uiDebug("serverManager userDataInit error") uiDebug("serverManager OnButton_xmlUserButton()")
def _call_avrdude(args): if utils.is_windows(): avr_exe_path = os.path.join(pm.RES_PATH, 'avrdude.exe') avr_config_path = os.path.join(pm.RES_PATH, 'avrdude.conf') elif utils.is_mac(): avr_exe_path = os.path.join(pm.RES_PATH, 'avrdude') avr_config_path = os.path.join(pm.RES_PATH, 'avrdude.conf') elif utils.is_linux(): avr_exe_path = os.path.join(pm.RES_PATH, 'avrdude64' if utils.is64bits() else "avrdude") avr_config_path = os.path.join(pm.RES_PATH, 'avrdude.conf' if utils.is64bits() else "avrdude32.conf") else: raise Exception("Platform not supported") os.chmod(avr_exe_path, int("755", 8)) # force to have executable rights in avrdude avr_exe_path = os.path.normpath(os.path.relpath(avr_exe_path, os.getcwd())) avr_config_path = os.path.normpath(os.path.relpath(avr_config_path, os.getcwd())) cmd = [avr_exe_path] + ["-C"] + [avr_config_path] + args.split(" ") log.debug("Command executed: {}".format(cmd)) p = subprocess.Popen(cmd, shell=utils.is_windows(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=(not utils.is_windows())) output = p.stdout.read() err = p.stderr.read() log.debug(output) log.debug(err) return output, err
def plot_graphs(self,path,count): '''Save graphs''' Gs=nx.to_agraph(self.gS) Gm=nx.to_agraph(self.gM) Gs_w=nx.to_agraph(self.gS_w) #add color to main nodes for node in self.gM.nodes(): n=Gs.get_node(node) n.attr['shape']='box' n.attr['style']='filled' n.attr['fillcolor']='turquoise' #add weight to edges for edge in self.gS_w.edges(data=True): ed=Gs_w.get_edge(edge[0],edge[1]) ed.attr['label']=edge[2]['weight'] loc= os.getcwd()+path+'/spanning/gS' + str(count)+'.png' loc1= os.getcwd()+path+'/projection/gM' + str(count)+'.png' loc2= os.getcwd()+path+'/spanning_w/gS_w' + str(count)+'.png' Gs.layout(prog='dot') # use dot Gm.layout(prog='dot') # use dot Gs_w.layout(prog='dot') Gs.draw(loc) Gm.draw(loc1) Gs_w.draw(loc2) return
def test_absolute_paths(self): from certbot.configuration import NamespaceConfig config_base = "foo" work_base = "bar" logs_base = "baz" server = "mock.server" mock_namespace = mock.MagicMock(spec=['config_dir', 'work_dir', 'logs_dir', 'http01_port', 'tls_sni_01_port', 'domains', 'server']) mock_namespace.config_dir = config_base mock_namespace.work_dir = work_base mock_namespace.logs_dir = logs_base mock_namespace.server = server config = NamespaceConfig(mock_namespace) self.assertTrue(os.path.isabs(config.config_dir)) self.assertEqual(config.config_dir, os.path.join(os.getcwd(), config_base)) self.assertTrue(os.path.isabs(config.work_dir)) self.assertEqual(config.work_dir, os.path.join(os.getcwd(), work_base)) self.assertTrue(os.path.isabs(config.logs_dir)) self.assertEqual(config.logs_dir, os.path.join(os.getcwd(), logs_base)) self.assertTrue(os.path.isabs(config.accounts_dir)) self.assertTrue(os.path.isabs(config.backup_dir)) self.assertTrue(os.path.isabs(config.csr_dir)) self.assertTrue(os.path.isabs(config.in_progress_dir)) self.assertTrue(os.path.isabs(config.key_dir)) self.assertTrue(os.path.isabs(config.temp_checkpoint_dir))
def parse_args(): parser = argparse.ArgumentParser(description='Train a Region Proposal Network') parser.add_argument('--image_set', dest='image_set', help='can be trainval or train', default='trainval', type=str) parser.add_argument('--year', dest='year', help='can be 2007, 2010, 2012', default='2007', type=str) parser.add_argument('--root_path', dest='root_path', help='output data folder', default=os.path.join(os.getcwd(), 'data'), type=str) parser.add_argument('--devkit_path', dest='devkit_path', help='VOCdevkit path', default=os.path.join(os.getcwd(), 'data', 'VOCdevkit'), type=str) parser.add_argument('--pretrained', dest='pretrained', help='pretrained model prefix', default=os.path.join(os.getcwd(), 'model', 'vgg16'), type=str) parser.add_argument('--epoch', dest='epoch', help='epoch of pretrained model', default=1, type=int) parser.add_argument('--prefix', dest='prefix', help='new model prefix', default=os.path.join(os.getcwd(), 'model', 'rpn'), type=str) parser.add_argument('--gpus', dest='gpu_ids', help='GPU device to train with', default='0', type=str) parser.add_argument('--begin_epoch', dest='begin_epoch', help='begin epoch of training', default=0, type=int) parser.add_argument('--end_epoch', dest='end_epoch', help='end epoch of training', default=8, type=int) parser.add_argument('--frequent', dest='frequent', help='frequency of logging', default=20, type=int) parser.add_argument('--kv_store', dest='kv_store', help='the kv-store type', default='device', type=str) parser.add_argument('--work_load_list', dest='work_load_list', help='work load for different devices', default=None, type=list) parser.add_argument('--finetune', dest='finetune', help='second round finetune', action='store_true') parser.add_argument('--resume', dest='resume', help='continue training', action='store_true') args = parser.parse_args() return args
def render_all(template_path=None, output_path=None, static_path=None): if not template_path: template_path = 'lae_site/templates/' if not output_path: output_path = 'lae_rendered/' if not static_path: static_path = 'content/static/' env = Environment(loader=MyLoader(template_path)) # find all the files inside all the subdirectories of the template path all_the_things = os.walk(template_path) for root, dirs, files in all_the_things: for f in files: # check that it's a template file if f[-5:] == '.html' and f[:1] != '_': full_path = root + '/' + f # path relative to template_path relative_path = full_path[len(template_path) + 1:] print "Rendering " + relative_path # render the template template = env.get_template(relative_path) # calculate directory output should go in dirname = os.path.dirname(output_path + '/' + relative_path) # and if it doesn't exist yet, create it if not os.path.exists(dirname): os.makedirs(dirname) # make rendered html file with codecs.open(output_path + '/' + relative_path, 'w', 'utf-8') as render_file: for line in template.render(): render_file.write(line) if not os.path.isdir(output_path + 'static'): subprocess.check_call(['ln', '-s', '../' + static_path, 'static'], cwd=os.getcwd() + '/' + output_path) print "Made symlink to static files." subprocess.check_call(['python', '-m', 'SimpleHTTPServer', '8002'], cwd=os.getcwd() + '/' + output_path)
''' Everything the code does operates through an instance of the Flask class. We followed this format for our own packages, which perfrom all their operations on their respective objects and are linked to the database file, rather than having all functions live in the same app file. app.py just talks to fucntions in other objects and links the results together with the HTML and CSS. ''' # This stores the page names and links for our navigation bar. page_link_dict = { "Home": "/home/", "About": "/about/", "Weather": "/weather/", "Polls": "/polls/", } my_path = os.getcwd().replace('\\', '/') + "/static/audio" songs = [i.replace(my_path + "\\", "") for i in glob.glob(my_path + "/*.mp3")] ''' Flask uses decorators, a special Python tool for linking functions together, in order to redirect requests to our custom server code. "Endpoints" (app.route) are the paths that tell the server what the client wants and thus, what function to run. A client will specify where on the site they want to go, and if that endpoint is assigned to a function, that function will tell Flask what information to render back to the client. ''' def log_user_entrance(user): with open("User-Logs.txt", "a+") as file: file.write(f"User: {user}\tTime: {datetime.now()}\n") @app.route("/")
"../../../../../../..", "../../../../../../../..", "../../../../../../../../.." ] head = os.path.dirname(sys.argv[0]) if len(head) > 0: path = [os.path.join(head, p) for p in path] path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ] if len(path) == 0: raise RuntimeError("can't find toplevel directory!") sys.path.append(os.path.join(path[0], "scripts")) import TestUtil router = os.path.join(TestUtil.getCppBinDir(), "glacier2router") # # Generate the crypt passwords file # TestUtil.hashPasswords(os.path.join(os.getcwd(), "passwords"), {"userid": "abc123"}) args = ' --Ice.Warn.Dispatch=0' + \ ' --Ice.Warn.Connections=0' + \ ' --Glacier2.Filter.Category.Accept="c1 c2"' + \ ' --Glacier2.Filter.Category.AcceptUser="******"' + \ ' --Glacier2.SessionTimeout="30"' + \ ' --Glacier2.Client.Endpoints="default -p 12347"' + \ ' --Glacier2.Server.Endpoints="tcp -h 127.0.0.1"' \ ' --Ice.Admin.Endpoints="tcp -h 127.0.0.1 -p 12348"' + \ ' --Ice.Admin.InstanceName=Glacier2' + \ ' --Glacier2.CryptPasswords="' + os.path.join(os.getcwd(), "passwords") + '"' sys.stdout.write("starting router... ") sys.stdout.flush() routerConfig = TestUtil.DriverConfig("server")
num_classes = 2 ################################################################################ # HYPERPARAMETERS AND DESIGN CHOICES num_neurons = 128 batch_size = 64 ACTIV_FN = "relu" activation_fn = cnn.get_activ_fn(ACTIV_FN) num_epochs = 50 learn_rate = 0.001 drop_prob = 0.1 optim = "Adam" # callbacks for Save weights, Tensorboard # creating a new directory for each run using timestamp folder = os.path.join(os.getcwd(), datetime.now().strftime("%d-%m-%Y_%H-%M-%S"), str(ACTIV_FN)) tb_callback = TensorBoard(log_dir=folder) # Build, train, and test model model = cnn.build_model(input_shape, activation_fn, learn_rate, drop_prob, num_neurons, num_classes) train_accuracy, train_loss, valid_accuracy, valid_loss = cnn.train_model( model, train_images, train_labels, batch_size, num_epochs, valid_images, valid_labels, tb_callback) test_accuracy, test_loss, predictions = cnn.test_model(model, test_images, test_labels) # # # save test set results to csv # predictions = np.round(predictions)
# ------------- #%% User Edits # ------------- # Indicate machine to set path machine='pdwang' # Set directory and load data depending on machine if machine == 'local-glenn': os.chdir('/Users/gliu/Downloads/2020_Fall/6.862/Project/predict_amv/') outpath = '/Users/gliu/Downloads/2020_Fall/6.862/Project' sst_normed = np.load('../CESM_data/CESM_SST_normalized_lat_weighted.npy').astype(np.float32) sss_normed = np.load('../CESM_data/CESM_SSS_normalized_lat_weighted.npy').astype(np.float32) psl_normed = np.load('../CESM_data/CESM_PSL_normalized_lat_weighted.npy').astype(np.float32) else: outpath = os.getcwd() sst_normed = np.load('../../CESM_data/CESM_sst_normalized_lat_weighted_2deg_NAT_Ann.npy').astype(np.float32) sss_normed = np.load('../../CESM_data/CESM_sss_normalized_lat_weighted_2deg_NAT_Ann.npy').astype(np.float32) psl_normed = np.load('../../CESM_data/CESM_psl_normalized_lat_weighted_2deg_NAT_Ann.npy').astype(np.float32) # Data preparation settings #tunits = 'year' # Indicate units of time ('month' or 'year) leads = np.arange(0,25,1) # Time ahead (in tunits) to forecast AMV tstep = 1032 # Total number of time units percent_train = 0.8 # Percentage of data to use for training (remaining for testing) ens = 40 # Ensemble members to use # Select variable
Forked from cluster.py to cluster_mem.py to load dev data in memory and try other methods (DBSCAN). Do I need to load the entire dataset into memory? """ import argparse import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import sys import wandb PROJECT_DIR = os.path.abspath(os.path.join(os.getcwd(), '../')) print(PROJECT_DIR) sys.path.append(PROJECT_DIR) from project_core.models import model_factory, LinearModel from project_core.utils import load_image, build_files_dataframe, prune_file_list from sklearn.cluster import MiniBatchKMeans, DBSCAN from sklearn.metrics import (adjusted_rand_score, balanced_accuracy_score, normalized_mutual_info_score) from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler from tensorflow.keras import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.preprocessing.image import ImageDataGenerator def main(args):
def select_class_file(self): options = QFileDialog.Options() options |= QFileDialog.DontUseNativeDialog fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", os.getcwd(), "Text Files (*.txt);;All Files (*)", options=options) self.system["class_file"] = fileName; self.tb5.setText(fileName); self.system["model"] = self.cb1.currentText(); self.system["use_gpu"] = self.cb3.currentText(); self.system["conf_thresh"] = self.e4.text(); self.system["img_short_side"] = self.e6.text(); self.system["img_long_side"] = self.e7.text(); self.system["mean"] = self.e8.text(); self.system["std"] = self.e9.text(); with open('obj_3_mxrcnn_infer.json', 'w') as outfile: json.dump(self.system, outfile)
import os from requests import get # to make GET request import urllib, tarfile def download(url, file_name): # open in binary mode with open(file_name, "wb") as file: # get request response = get(url) # write to file file.write(response.content) path_cwd = os.getcwd() path = path_cwd+"/extra_data/body_module" try: os.makedirs(path) except OSError: print ("Creation of the directory %s failed" % path) os.chdir(path) else: print ("Successfully created the directory %s " % path) os.chdir(path) url = "http://visiondata.cis.upenn.edu/spin/data.tar.gz" filename = 'data.tar.gz' print ('Downloading: '+filename) download(url,filename) tf = tarfile.open(filename) tf.extractall()
def write_2chan_franalyzeFile(xytab,frTrials_dico,ratio,shuffled,nbTrials,filename,simuTime,seed,pathToFile=os.getcwd(), model=None,rezero=False,reversedChans=False) : print "------------- Saving data -----------" # retrieving the string for the xytab values xytext = ";".join(map(str,xytab)) # first writting the model and the antag textToWrite = "#%d\nratio=%.3f\nseed=%d\nSimuDuration=%s\nshuffled=%s\nNbTrials=%d\nReZero=%s\nReversedChannels=%s\n\n%s\n\n" % (model,ratio,seed,simuTime,shuffled,nbTrials,rezero,reversedChans,xytext) directory = os.path.join(pathToFile,"data/files/") # writing every trails for each (x.y) tuple for keys,values in frTrials_dico.items() : keyTxt = str(keys[0]) + ";" + str(keys[1]) + ";" valTxt = ";".join(map(lambda x: str(x[0])+","+str(x[1])+","+str(x[2])+","+x[3],values)) # values is a list of (3 int and a str) tuples textToWrite += keyTxt + valTxt + "\n" try : with open(os.path.join(directory,filename),"w") as outFile : outFile.write(textToWrite) except IOError as e: os.system("mkdir -p %s" % directory) with open(os.path.join(directory,filename),"w") as outFile : outFile.write(textToWrite) print "\tData saved in %s file in %s directory" % (filename,directory)
def main(): """Run when running this module as the primary one.""" EVALUATION_SCALES = [1.0] # must be all floats! parser = argparse.ArgumentParser() parser.add_argument('-m', '--model', type=str, default='pspnet50_ade20k', help='Model/Weights to use', choices=['pspnet50_ade20k', 'pspnet101_cityscapes', 'pspnet101_voc2012']) parser.add_argument('-i', '--input_path', type=str, default='../example_images', help='Path to the input images') parser.add_argument('-o', '--output_path', type=str, default='../example_results', help='Path to output') parser.add_argument('-g', '--groundtruth_path', type=str, default='../example_groundtruth', help='Path to groundtruth') parser.add_argument('--id', default="0") parser.add_argument('-s', '--sliding', action='store_true', help="Whether the network should be slided over the original image for prediction.") parser.add_argument('-f', '--flip', action='store_true', help="Whether the network should predict on both image and flipped image.") parser.add_argument('-ms', '--multi_scale', action='store_true', help="Whether the network should predict on multiple scales.") parser.add_argument('-hm', '--heat_maps', action='store_true', help="Whether the network should diplay heatmaps.") parser.add_argument('-v', '--vis', action='store_false', help="Whether an interactive plot should be diplayed.") parser.add_argument('-cci', '--complete_coarse_image', action='store_true', help="Whether a coarse imae should be completed with predictions.") parser.add_argument('-e', '--evaluate', action='store_true', help="Whether an evaluation against groundtruth should be attempted.") args = parser.parse_args() environ["CUDA_VISIBLE_DEVICES"] = args.id sess = tf.Session() K.set_session(sess) with sess.as_default(): print(args) import os cwd = os.getcwd() print("Running in %s" % cwd) image_paths = [] if isfile(args.input_path): image_paths.append(args.input_path) elif isdir(args.input_path): file_types = ('png', 'jpg') for file_type in file_types: image_paths.extend(glob.glob(join(args.input_path + '/**/*.' + file_type), recursive=True)) image_paths = sorted(image_paths) # print(image_paths) if "pspnet50" in args.model: pspnet = PSPNet50(nb_classes=150, input_shape=(473, 473), weights=args.model) if "ade20k" in args.model: from ade20k_labels import id2label, name2label elif "pspnet101" in args.model: if "cityscapes" in args.model: pspnet = PSPNet101(nb_classes=19, input_shape=(713, 713), weights=args.model) from cityscapes_labels import id2label, name2label if "voc2012" in args.model: pspnet = PSPNet101(nb_classes=21, input_shape=(473, 473), weights=args.model) from pascal_voc_labels import id2label, name2label else: print("Network architecture not implemented.") if args.multi_scale: EVALUATION_SCALES = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] # original implementation, must be all floats! for image_path in image_paths: image_name, ext = splitext(os.path.basename(image_path)) image_name = image_name.replace('_leftImg8bit', '') # strip leftImg8bit tag for gt matching and producting groundtruth print("Predicting image name: %s" % (image_name + ext)) img = misc.imread(image_path) class_scores = predict_multi_scale(img, pspnet, EVALUATION_SCALES, args.sliding, args.flip) if args.heat_maps: # show_class_heatmap(class_scores, 'person') show_class_heatmaps(class_scores) # visualize_prediction(img, class_scores, id2label) class_image = np.argmax(class_scores, axis=2) output_path, _ = splitext(args.output_path) if not os.path.exists(output_path): os.makedirs(output_path) output_path = join(output_path, image_name) print("Writing results to %s" % (output_path + ext)) confidence_map = np.max(class_scores, axis=2) # probability of the most likely class, a vage measure of the networks confidence colored_class_image = color_class_image(class_image, id2label) # colored_class_image is [0.0-1.0] img is [0-255] alpha_blended = 0.5 * colored_class_image + 0.5 * img if "cityscapes" in args.model: class_image = trainid_to_class_image(class_image) misc.imsave(output_path + "_gtFine_labelIds" + ext, class_image) misc.imsave(output_path + "_seg" + ext, colored_class_image) misc.imsave(output_path + "_probs" + ext, confidence_map) misc.imsave(output_path + "_seg_blended" + ext, alpha_blended) gt_path = find_matching_gt(args.groundtruth_path, image_name, args.model, verbose=True) if gt_path is not None: if args.complete_coarse_image: # only for cityscapes try: coarse_image = misc.imread(gt_path) class_image = complete_coarse_image(coarse_image, class_image) misc.imsave(output_path + "_gtFine_labelIds" + ext, class_image) except AttributeError as err: print("Warning: Could not read groundtruth: %s" % err) if args.evaluate: if "cityscapes" in args.model: evaluate_iou([class_image], [misc.imread(gt_path)], classes=35) else: # gt_image to class image gt_image = misc.imread(gt_path) gt_class_image = gt_image_to_class_image(gt_image, id2label) evaluate_iou([class_image], [gt_class_image], classes=pspnet.nb_classes) else: print("Could not find groundtruth for %s" % image_name)
import os import random import unittest from MyFunction import Search_Website, Login_Admin from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select from selenium.webdriver.common.by import By from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.common.action_chains import ActionChains import pytest import allure current_url = os.getcwd() screenshot_folder = current_url+'/images' if not os.path.exists(screenshot_folder): os.mkdir(screenshot_folder) admin_images = current_url+'/images/Admin' if not os.path.exists(admin_images): os.mkdir(admin_images) class TestAdmin(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome(current_url+'/chromedriver.exe') self.start_url = "https://admin.myreales.tk/login" self.driver.maximize_window()
#!/usr/bin/env python3 from __future__ import unicode_literals, absolute_import, print_function, division import sys, re, time, traceback import socket, asyncore, asynchat, ssl, select import os, codecs import errno import tools IRC_CODES = ('001', '354', '002', '003', '004', '005', '253', '251', '252', '254', '255', '265', '266', '250', '315', '328', '333', '352', '353', '354', '366', '372', '375', '376', 'QUIT', 'NICK', 'JOIN') cwd = os.getcwd() class Origin(object): source = re.compile(r'([^!]*)!?([^@]*)@?(.*)') def __init__(self, bot, source, args): match = Origin.source.match(source or '') self.nick, self.user, self.host = match.groups() target = mode = mode_target = names = other = other2 = other4 = rest = other5 = other6 = None arg_len = len(args) if arg_len > 1: target = args[1] if arg_len > 2: mode = args[2] if arg_len > 3: mode_target = args[3] if arg_len > 4:
def write_2chan_chanChoicefile(xytab,trials_dico,ratio,shuffled,filename,simuTime,pathToFile=os.getcwd(),model=0) : print "------------- Saving data -----------" # retrieving the string for the xytab values xytext = ";".join(map(str,xytab)) # first writting the model and the antag textToWrite = "#%d\nratio=%.3f\nSimuDuration=%s\nshuffled=%s\n%s\n\n" % (model,ratio,simuTime,shuffled,xytext) directory = os.path.join(pathToFile,"data/files/") # writing every trails for each (x.y) tuple for keys,values in trials_dico.items() : keyTxt = str(keys[0]) + ";" + str(keys[1]) + ";" valTxt = ";".join(values) # values is already a list of string ["1","2","0","3"...] textToWrite += keyTxt + valTxt + "\n" try : with open(os.path.join(directory,filename),"w") as outFile : outFile.write(textToWrite) except IOError as e: os.system("mkdir -p %s" % directory) with open(os.path.join(directory,filename),"w") as outFile : outFile.write(textToWrite) print "\tData saved in %s file in %s directory" % (filename,directory)
import json import os f = open('city.list.json', 'r') saveFileName = 'sorted_CN_city.txt' dic = {} for line in f: if '"CN"' in line: j = json.loads(line) key = j["name"] dic[key] = line dict = sorted(dic.iteritems(), key=lambda d: d[0]) for fileName in os.listdir(os.getcwd()): if fileName == saveFileName: os.remove(fileName) g = open(saveFileName, 'w') for item in dict: g.write(item[1]) g.close() f.close()
import os import sys sys.path.extend([os.path.dirname(os.getcwd())]) from torch.utils.data import DataLoader from torchvision import transforms from Jizong_code import medicalDataLoader from Jizong_code.network import UNet, SegNet from Jizong_code.enet import Enet import numpy as np, matplotlib.pyplot as plt from Jizong_code.criterion import CrossEntropyLoss2d, partialCrossEntropyLoss2d, logBarrierLoss, dice_loss import pandas as pd, torch, torch.nn as nn, torch.nn.functional as F from tqdm import tqdm from torchnet.meter import AverageValueMeter from Jizong_code.utils import pred2segmentation, dice_loss, Colorize from Jizong_code.visualize import Dashboard board_train_image = Dashboard(server='http://turing.livia.etsmtl.ca', env="train_image") board_val_image = Dashboard(server='http://turing.livia.etsmtl.ca', env="vali_image") board_loss = Dashboard(server='http://turing.livia.etsmtl.ca', env="loss") cuda_device = "0" batch_size = 1 batch_size_val = 1 num_workers = 0 lr = 0.0002 max_epoch = 100 root_dir = '../ACDC-2D-All' model_dir = 'model' os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
-------------------------------- ''' # Import the required python modules import arcpy import os import sys import re ############################ VARIABLES #################################### # Data base connection to ArcGIS Server containing floor maps for buildings database_conn = "ehsprod2.ehs.unr.edu.sde" # The current building to process buildings = ["sde.BUILDING.ansari_business_building"] ############################ CONSTANTS #################################### # Output location to create the intermediate files and output protobuffer files current_working_dir = os.getcwd() outputPath = current_working_dir + os.sep + "\Output" # The output projected co-ordinate system outSpatialReference = "NAD 1983 UTM Zone 11N" # Features from building dataset to process features = ["rooms", "doors"] # The maximum distance that will be used to combine adjecent features for navigable space aggregationDist = "4 Feet" # Query used to find open spaces rooms_query = "(UPPER([ROOM_TYPE]) LIKE '%CORRIDOR%' OR UPPER([ROOM_TYPE]) LIKE '%VESTIBULE%' OR UPPER([ROOM_TYPE]) LIKE '%LOBBY%')" # Query used to find the elevators and stairs on a floor stairs_query = "(UPPER([ROOM_TYPE]) LIKE '%ELEVATOR%' OR UPPER([ROOM_TYPE]) LIKE '%STAIRS%')" # set arcpy environment variable for overwriting database outputs arcpy.env.overwriteOutput = True #############################################################################
# visualize import matplotlib.pyplot as plt import matplotlib.style as style import seaborn as sns from matplotlib import pyplot from matplotlib.ticker import ScalarFormatter sns.set_context("talk") style.use('seaborn-colorblind') # custom from lgb_param_models import lgb_model from xgb_param_models import xgb_model from catb_param_models import catb_model from lin_param_models import lin_model from nn_param_models import nn_model mypath = os.getcwd() sys.path.append(mypath + '/code/') from train_helper import get_oof_ypred from cv_methods import GroupKFold, StratifiedGroupKFold class RunModel(object): """ Model Fitting and Prediction Class: :INPUTS: :train_df: train pandas dataframe :test_df: test pandas dataframe :target: target column name (str) :features: list of feature names :categoricals: list of categorical feature names. Note that categoricals need to be in 'features'
import os import glob from tqdm import tqdm import pandas as pd from joblib import Parallel, delayed num_loc = 3 PWD = os.getcwd() csvs = glob.glob('./csv/*/*') def raster(category): df = pd.read_csv(category, index_col=0) csv_num = int(len(df)/num_loc) bar = tqdm(desc = str(category), total = csv_num, leave = False) for i in range(csv_num): for j in range(num_loc-1): ##bubble sort for k in range(num_loc-1, j, -1): if df.loc[num_loc*i+k-1, 'y']>df.loc[num_loc*i+k, 'y']: temp_x = df.loc[num_loc*i+k-1, 'x'] temp_y = df.loc[num_loc*i+k-1, 'y'] df.loc[num_loc*i+k-1, 'x'] = df.loc[num_loc*i+k, 'x'] df.loc[num_loc*i+k-1, 'y'] = df.loc[num_loc*i+k, 'y'] df.loc[num_loc*i+k, 'x'] = temp_x df.loc[num_loc*i+k, 'y'] = temp_y elif df.loc[num_loc*i+k-1, 'y']==df.loc[num_loc*i+k, 'y']:
t.cell(0, 2).paragraphs[0].clear() # 清楚原‘姓名’字符串, t.cell(0, 2).paragraphs[0].add_run(c) # 替换为‘编号’ # 定义自动创建文件夹的函数 def make_folder(p): if os.path.exists(p): # 判断文件夹是否存在 shutil.rmtree(p) # 如果存在删除原有目录的文件夹以及其中所有文件 os.mkdir(p) if __name__ == '__main__': # 定义全局变量c,a分别存放‘序号’字符串和 c = '序号' a = 1 file_path = os.getcwd() # 获取当前py文件的路径 local_path = file_path + '/tihuanmingzi' make_folder(local_path) # 遍历当前路径的所有.doc文件 for file_name in glob.glob(os.path.join(file_path, '*.docx')): file = docx.Document(file_name) tables = file.tables # 读取所有表格 name = {} # 定义字典储存姓名编号 # 遍历所有表,迭代得出每一张表 for i in range(len(tables)): t = tables[i] # 将每一张表传递给参数t if t.cell(0, 0).text == '编号': # 查找目标表 if t.cell(0, 3).text + t.cell( 0, 1).text not in name: # 判断是否已经收录了个人信息 p = Process(target=replace_name_and_count( t)) # Process 对象只是一个子任务,运行该任务时系统会自动创建一个子进程
# оформленные в виде соответствующих функций, # и импортированные в данный файл из easy.py import os, shutil import easy while True: print('1. Перейти в папку') print('2. Просмотреть содержимое текущей папки') print('3. Удалить папку') print('4. Создать папку') print('5. Выход') command = input('>>> ') if command == '5': break if command == '2': print('Текущая папка: ', os.getcwd()) list = easy.view_dir() if command == '1': new_dir = input('Введите имя директории: ') if new_dir == '/': a = os.getcwd().split('\\')[:-1] new_dir = '\\'.join(i for i in os.getcwd().split('\\')[:-1]) try: os.chdir(new_dir) print('переход осуществлен') print('Текущая папка: ', os.getcwd()) except FileNotFoundError: print('переход невозможен - неверный адрес')
def main(argv): parser = ArgumentParser(usage=__doc__.lstrip()) parser.add_argument("--verbose", "-v", action="count", default=1, help="more verbosity") parser.add_argument( "--no-build", "-n", action="store_true", default=False, help="do not build the project (use system installed version)") parser.add_argument("--build-only", "-b", action="store_true", default=False, help="just build, do not run any tests") parser.add_argument("--doctests", action="store_true", default=False, help="Run doctests in module") parser.add_argument( "--coverage", action="store_true", default=False, help=("report coverage of project code. HTML output goes " "under build/coverage")) parser.add_argument( "--gcov", action="store_true", default=False, help=("enable C code coverage via gcov (requires GCC). " "gcov output goes to build/**/*.gc*")) parser.add_argument("--lcov-html", action="store_true", default=False, help=("produce HTML for C code coverage information " "from a previous run with --gcov. " "HTML output goes to build/lcov/")) parser.add_argument("--mode", "-m", default="fast", help="'fast', 'full', or something that could be " "passed to nosetests -A [default: fast]") parser.add_argument( "--submodule", "-s", default=None, help="Submodule whose tests to run (cluster, constants, ...)") parser.add_argument("--pythonpath", "-p", default=None, help="Paths to prepend to PYTHONPATH") parser.add_argument("--tests", "-t", action='append', help="Specify tests to run") parser.add_argument("--python", action="store_true", help="Start a Python shell with PYTHONPATH set") parser.add_argument("--ipython", "-i", action="store_true", help="Start IPython shell with PYTHONPATH set") parser.add_argument("--shell", action="store_true", help="Start Unix shell with PYTHONPATH set") parser.add_argument("--debug", "-g", action="store_true", help="Debug build") parser.add_argument("--show-build-log", action="store_true", help="Show build output rather than using a log file") parser.add_argument("--bench", action="store_true", help="Run benchmark suite instead of test suite") parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER, help="Arguments to pass to Nose, Python or shell") args = parser.parse_args(argv) if args.lcov_html: # generate C code coverage output lcov_generate() sys.exit(0) if args.pythonpath: for p in reversed(args.pythonpath.split(os.pathsep)): sys.path.insert(0, p) if args.gcov: gcov_reset_counters() if not args.no_build: site_dir = build_project(args) sys.path.insert(0, site_dir) os.environ['PYTHONPATH'] = site_dir extra_argv = args.args[:] if extra_argv and extra_argv[0] == '--': extra_argv = extra_argv[1:] if args.python: if extra_argv: # Don't use subprocess, since we don't want to include the # current path in PYTHONPATH. sys.argv = extra_argv with open(extra_argv[0], 'r') as f: script = f.read() sys.modules['__main__'] = imp.new_module('__main__') ns = dict(__name__='__main__', __file__=extra_argv[0]) exec_(script, ns) sys.exit(0) else: import code code.interact() sys.exit(0) if args.ipython: import IPython IPython.embed(user_ns={}) sys.exit(0) if args.shell: shell = os.environ.get('SHELL', 'sh') print("Spawning a Unix shell...") os.execv(shell, [shell] + extra_argv) sys.exit(1) if args.coverage: dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage') fn = os.path.join(dst_dir, 'coverage_html.js') if os.path.isdir(dst_dir) and os.path.isfile(fn): shutil.rmtree(dst_dir) extra_argv += ['--cover-html', '--cover-html-dir=' + dst_dir] test_dir = os.path.join(ROOT_DIR, 'build', 'test') if args.build_only: sys.exit(0) elif args.submodule: modname = PROJECT_MODULE + '.' + args.submodule try: __import__(modname) if args.bench: test = sys.modules[modname].bench else: test = sys.modules[modname].test except (ImportError, KeyError, AttributeError) as e: print("Cannot run tests for %s (%s)" % (modname, e)) sys.exit(2) elif args.tests: def fix_test_path(x): # fix up test path p = x.split(':') p[0] = os.path.relpath(os.path.abspath(p[0]), test_dir) return ':'.join(p) tests = [fix_test_path(x) for x in args.tests] def test(*a, **kw): extra_argv = kw.pop('extra_argv', ()) extra_argv = extra_argv + tests[1:] kw['extra_argv'] = extra_argv from numpy.testing import Tester if args.bench: return Tester(tests[0]).bench(*a, **kw) else: return Tester(tests[0]).test(*a, **kw) else: __import__(PROJECT_MODULE) if args.bench: test = sys.modules[PROJECT_MODULE].bench else: test = sys.modules[PROJECT_MODULE].test # Run the tests under build/test try: shutil.rmtree(test_dir) except OSError: pass try: os.makedirs(test_dir) except OSError: pass shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'), os.path.join(test_dir, '.coveragerc')) cwd = os.getcwd() try: os.chdir(test_dir) if args.bench: result = test(args.mode, verbose=args.verbose, extra_argv=extra_argv) else: result = test(args.mode, verbose=args.verbose, extra_argv=extra_argv, doctests=args.doctests, coverage=args.coverage) finally: os.chdir(cwd) if isinstance(result, bool): sys.exit(0 if result else 1) elif result.wasSuccessful(): sys.exit(0) else: sys.exit(1)
## License: GPL v.2 ## ## Purpose: Post processing script ################################################################################ import fileinput import os import sys import shutil cmake_src_path = os.path.join(sys.argv[1], 'CMakeLists.txt') if not os.path.exists(cmake_src_path): exit('Parse path not exists') utilfile = os.path.join(os.getcwd(), os.pardir, 'cmake', 'util.cmake') # Get values ft_major = "0" ft_minor = "0" ft_patch = "0" major_get = False minor_get = False patch_get = False def extract_value(text): val_text = text.split("\"") return val_text[1]
from transformers.configuration_bert import BertConfig from tokenization_numerical import BertNumericalTokenizer from transformers.optimization import get_linear_schedule_with_warmup import wandb from metrics import log_wandb, summarize_metrics, anomaly_evaluation from metrics import exponent_metrics, loss_metrics, mantissa_metrics, regression_metrics, numeracy_metrics, flow_metrics, log_metrics from metrics import anomaly_sample from metrics import save_metrics, save_results, save_args PREGENERATED_DATA = { "fin-all": 'news', "fin-dol": 'news_dollar', "sci-doc": 'scidocs' } CHECKPOINT_PATH = os.getcwd()+'/checkpoints/' log_format = '%(asctime)-10s: %(message)s' logging.basicConfig(level=logging.INFO, format=log_format) def get_model(args): if args.model_name == 'GMMBert': NumberBertModel = GMMBert args.do_gmm = True elif args.model_name == 'LogBert': NumberBertModel = LogBert args.do_log = True elif args.model_name == 'ExpBert': NumberBertModel = ExponentBert args.do_exp = True
""" Created on Thu Sep 23 2019 @author: fkaragul """ import osmnx as ox import networkx as nx import numpy as np import pandas as pd import matplotlib.cm as cm import os # os.chdir('C:\\giraffe\\viasat_data\\gv_net') os.chdir('C:\\python\\projects\\giraffe\\viasat_data\\reti_VALENTI\\gv_net') os.getcwd() #load Graph Bracciano = ox.load_graphml('network_Bracciano_6km_epgs4326.graphml') # way class: mean, max, min (make a DICTIONARY) these are the "keys" # these numbers are the speeds on different type of road way_dict={ "residential" : [ 30 , 50 , 10 ], "secondary" : [ 40 , 90 , 30 ], "primary" : [ 50 , 70 , 20 ], "tertiary" : [ 35 , 70 , 10 ], "unclassified" :[ 40 , 60 , 10 ], "secondary_link": [ 40 , 55 , 30 ], "trunk" : [ 70 , 90 , 40 ], "tertiary_link": [ 35 , 50 , 30 ],
def url_for_version(self, version): return "file://{0}/maker-{1}.tgz".format(os.getcwd(), version)
def load(filename): input_data = {} if os.path.isfile(os.path.join(os.getcwd(), 'input', filename)): if filename.split(".")[-1] == 'xls': workbook = xlrd.open_workbook( os.path.join(os.getcwd(), 'input', filename)) elif filename.split(".")[-1] == 'xlsx': workbook = openpyxl.open_workbook( os.path.join(os.getcwd(), 'input', filename)) else: print('Extension of file not allowed') pass else: print('File does not exist') pass worksheet = workbook.sheet_by_index(0) # Refrigerant input_data["refrigerant"] = worksheet.cell_value(22, 3) # Evaporator input_data["ev_temperature"] = worksheet.cell_value( 5, 3) + k # [C] Evaporator temperature input_data["ev_super_heat"] = worksheet.cell_value( 6, 3) # [K] Evaporator outlet super heating input_data["ev_pressure_drop"] = worksheet.cell_value( 7, 3) * 1e3 # [Pa] Evaporator pressure drop # Suction line input_data["sl_temperature_change"] = worksheet.cell_value( 8, 3) # [K] Superheat (Suction line) input_data["sl_pressure_drop"] = worksheet.cell_value( 9, 3) * 1e3 # [Pa] Suction Line # Compressor input_data["capacity_volumetric"] = worksheet.cell_value( 10, 3) # [-] Isentropic Efficiency input_data["efficiency_isentropic"] = worksheet.cell_value( 11, 3) # [-] Isentropic Efficiency input_data["efficiency_volymetric"] = worksheet.cell_value( 12, 3) # [-] Volymetric Efficiency # Discharge line input_data["dl_temperature_change"] = worksheet.cell_value( 13, 3) # [K] Superheat (Suction line) input_data["dl_pressure_drop"] = worksheet.cell_value( 14, 3) * 1e3 # [K] Superheat (Suction line) # Condenser input_data["co_temperature"] = worksheet.cell_value( 15, 3) + k # [C] Temperature input_data["co_sub_cooling"] = worksheet.cell_value( 16, 3) # [K] Outlet sub cooling input_data["co_pressure_drop"] = worksheet.cell_value( 17, 3) * 1e3 # [Pa] Pressure drop # Liquid line input_data["ll_temperature_change"] = worksheet.cell_value( 18, 3) # [K] Superheat (Suction line) input_data["ll_pressure_drop"] = worksheet.cell_value( 19, 3) * 1e3 # [K] Superheat (Suction line) return input_data
import sys, os sys.path.append(os.path.dirname(os.getcwd())) from bs4 import BeautifulSoup from beautifultable import BeautifulTable from _tools import * from _helpers import * STARTING_URL = "https://www.tripadvisor.fr/Restaurants-g187147-Paris_Ile_de_France.html" restaurants = {} raw_html = simple_get(STARTING_URL) if raw_html : print('Connection successful...') html = BeautifulSoup(raw_html, 'html.parser') restaurant_nodes = html.select('[id^="eatery_"]') for r in restaurant_nodes : key = get_key(r) restaurants[key] = { 'name': get_name(r), 'rating': get_rating(r), 'reviews': get_reviews(r), 'rank': get_rank(r), }
def writeFrame( w2if, lwr, n ): w2if.Modified() current_dir = os.getcwd() filename = current_dir + "/frames/vd_lin50"+ ('%05d' % n)+".png" lwr.SetFileName( filename ) lwr.Write()
def reference(filename): reference_data = numpy.zeros((10, 6), dtype=numpy.float) # Temperature # Differenft functions depending of version of Excel if filename.split(".")[-1] == 'xls': workbook = xlrd.open_workbook( os.path.join(os.getcwd(), 'input', filename)) elif filename.split(".")[-1] == 'xlsx': workbook = openpyxl.open_workbook( os.path.join(os.getcwd(), 'input', filename)) else: print('Extension of file not allowed') pass # Set working sheet worksheet = workbook.sheet_by_index(0) # After suction line/ Before compressor reference_data[0, 0] = worksheet.cell_value(108, 3) + k # Temperature reference_data[0, 1] = worksheet.cell_value(109, 3) * 1e3 # Pressure reference_data[0, 2] = worksheet.cell_value(110, 3) * 1e3 # Enthalpy reference_data[0, 3] = worksheet.cell_value(111, 3) * 1e3 # Entropy reference_data[0, 4] = worksheet.cell_value(112, 3) # Density # After compressor / Before discharge line reference_data[1, 0] = worksheet.cell_value(108, 4) + k # Temperature reference_data[1, 1] = worksheet.cell_value(109, 4) * 1e3 # Pressure reference_data[1, 2] = worksheet.cell_value(110, 4) * 1e3 # Enthalpy reference_data[1, 3] = worksheet.cell_value(111, 4) * 1e3 # Entropy reference_data[1, 4] = worksheet.cell_value(112, 4) # Density # After discharge line / Condenser inlet reference_data[2, 0] = worksheet.cell_value(124, 3) + k # Temperature reference_data[2, 1] = worksheet.cell_value(125, 3) * 1e3 # Pressure reference_data[2, 2] = worksheet.cell_value(126, 3) * 1e3 # Enthalpy reference_data[2, 3] = worksheet.cell_value(127, 3) * 1e3 # Entropy reference_data[2, 4] = worksheet.cell_value(128, 3) # Density # Condenser dew point reference_data[3, 0] = worksheet.cell_value(130, 3) + k # Temperature # Condenser bubble point reference_data[4, 0] = worksheet.cell_value(130, 4) + k # Temperature # Condenser outlet / Before liquid line reference_data[5, 0] = worksheet.cell_value(124, 4) + k # Temperature reference_data[5, 1] = worksheet.cell_value(125, 4) * 1e3 # Pressure reference_data[5, 2] = worksheet.cell_value(126, 4) * 1e3 # Enthalpy reference_data[5, 3] = worksheet.cell_value(127, 4) * 1e3 # Entropy reference_data[5, 4] = worksheet.cell_value(128, 4) # Density # After liquid line / Before expansion valve reference_data[6, 0] = worksheet.cell_value(141, 3) + k # Temperature reference_data[6, 1] = worksheet.cell_value(142, 3) * 1e3 # Pressure reference_data[6, 2] = worksheet.cell_value(143, 3) * 1e3 # Enthalpy reference_data[6, 3] = worksheet.cell_value(144, 3) * 1e3 # Entropy reference_data[6, 4] = worksheet.cell_value(145, 3) # Density # After expansion valve / Evaporator inlet reference_data[7, 0] = worksheet.cell_value(47, 3) + k # Temperature reference_data[7, 1] = worksheet.cell_value(48, 3) * 1e3 # Pressure reference_data[7, 2] = worksheet.cell_value(49, 3) * 1e3 # Enthalpy reference_data[7, 3] = worksheet.cell_value(50, 3) * 1e3 # Entropy reference_data[7, 4] = worksheet.cell_value(51, 3) # Density reference_data[7, 5] = worksheet.cell_value(52, 3) # Vapor mass quality # Evaporator dew point reference_data[8, 0] = worksheet.cell_value(53, 4) + k # Temperature # Evaporator outlet / Before suction line reference_data[9, 0] = worksheet.cell_value(47, 4) + k # Temperature reference_data[9, 1] = worksheet.cell_value(48, 4) * 1e3 # Pressure reference_data[9, 2] = worksheet.cell_value(49, 4) * 1e3 # Enthalpy reference_data[9, 3] = worksheet.cell_value(50, 4) * 1e3 # Entropy reference_data[9, 4] = worksheet.cell_value(51, 4) # Density return reference_data
import os import argparse import cv2 from imageai.Detection import ObjectDetection execution_path = os.getcwd() from tqdm import tqdm parser = argparse.ArgumentParser(description="YOLO car cropped.") parser.add_argument("--data_dir", type=str, help="The location of the dataset") parser.add_argument("--output_dir", type=str, help="The location of the dataset") args = parser.parse_args() if __name__=='__main__': failed_filenames = [] detector = ObjectDetection() detector.setModelTypeAsRetinaNet() detector.setModelPath( os.path.join(execution_path , "object-detection-models/resnet50_coco_best_v2.0.1.h5")) detector.loadModel(detection_speed="fastest") custom_objects = detector.CustomObjects(car=True, truck=True) categories = os.listdir(args.data_dir) categories.sort() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir)
filler = np.zeros(difference) self.num_steps = max_steps self.xcom = np.concatenate((self.xcom, filler)) self.xfbk = np.concatenate((self.xfbk, filler)) self.ycom = np.concatenate((self.ycom, filler)) self.yfbk = np.concatenate((self.yfbk, filler)) self.errors = np.zeros((2, self.num_steps)) actions = [ 2, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1, 1, .9, .8, .7, .6, .5, .4, .3, .2, .1, 0, -.1, -.2, -.3, -.4, -.5, -.6, -.7, -.8, -.9, -1, -1.1, -1.2, -1.3 - 1.4, -1.5, -1.6, -1.7, -1.8, -1.9, -2 ] action_size = len(actions) dirname = os.getcwd() dirname += '/data/' data_parser = parser() files = [] for file in os.listdir(dirname): if file.endswith(".DAT"): files.append(file) chosen_test_set = 0 file_data_train = [] file_data_test = [] for i in range(len(files)): data_parser.parse_data(dirname + files[i]) temp = data_parser.get_all_com_fbk().values if i == chosen_test_set: file_data_test.append(temp)