def __init__(self, parent=None): super(UpdateRegisterClass, self).__init__(parent) self.r = None self.root = p('') self.source = p('') self.setupUi(self) self.button_actions()
def generate_spx_layer(path_tgt, if_top=False): """Construct sphinx-ready file tree. """ # listup dirs and files list_dir = listup_dir(path_tgt) list_file = listup_docsrc(path_tgt) tocs = "" for dr in list_dir: line = str(dr) + "/" + str(dr) + ".rst\n" tocs += " " + line for f in list_file: line = str(f) + "\n" tocs += " " + line toc_title = path_tgt.stem index_body = generate_toc(toc_title, tocs) if if_top: idx_name = "index.rst" else: idx_name = str(path_tgt.stem) + ".rst" idx_name = p(idx_name) path_idx = path_tgt / idx_name with open(path_idx, 'w', encoding="utf-8") as f: f.write(index_body) # recursive for dr in list_dir: print(p().cwd()) print(dr) generate_spx_layer(path_tgt / dr)
def __create_pickles(self): sb.call('mkdir ' + self.output, shell=True) for i, f in enumerate(self.files, 1): print(i, len(self.files), f) name = p(f).stem output = p(self.output) / name create_pickle(Graph2Pandas(f).df, p(self.output) / name)
def __init__(self, parent=None): super(RegisterCompileClass, self).__init__(parent) self.register = p('') self.project = p('') self.destination_Folder = p('') self.update = RegisterCompileUpdate.Ui_Dialog() self.setupUi(self) self.button_actions()
def add_files_to_zip(files, root): root = p(root, "COMBINED") if not root.is_dir(): root.mkdir() save_path = p(str(root), file_name()) with ZipFile(str(save_path), 'w') as myzip: for file in files: myzip.write(file.full_path, file.name)
def main(): p_project = p(project).absolute() p_build = p_project / p("build") print("copy from: " + str(p_build)) print("copy to : " + str(publish_to)) du.copy_tree(str(p_build), str(publish_to))
def tilda(obj): if isinstance(obj, list): return [ str(p(o).expanduser()) if isinstance(o, str) else o for o in obj ] elif isinstance(obj, str): return str(p(obj).expanduser()) else: return obj
def name_builder(self, wbs, disp, finish=False): today = datetime.date.fromtimestamp(time.time()) file_name = "{} ({}-{}-{}).pdf".format(disp, today.year, today.month, today.day) if finish: file_name = p(self.date_folder, file_name) else: file_name = p(self.date_folder, wbs, file_name) return str(file_name)
def __find_ontologies(self): files = [] for wc in self.wildcard: if p(wc).is_dir(): files.extend(glob(wc + '/**/*.ttl', recursive=True)) elif p(wc).is_file() and p(wc).suffix in [ '.owl', '.ttl', '.pickle' ]: files.append(wc) return files
def execute_spx(): path_spx = p(spx_prj_dir) os.chdir(path_spx) print(p().cwd()) path_build = path_spx / p("build") if path_build.exists(): shutil.rmtree(path_build) cmd = "make.bat html" subprocess.call(cmd)
def __fix_path(path): if not isinstance(path, str): return path elif '~' == path[0]: tilda_fixed_path = tilda(path) if is_file(tilda_fixed_path): return tilda_fixed_path else: exit(path, ': does not exit.') elif is_file(p.home() / path): return str(p().home() / path) elif is_dict(p.home() / path): return str(p().home() / path) else: return path
def main(): import retro from pathlib import Path as p import shutil retro_dir = p( retro.__file__).parents[0] / 'data/stable/SuperMarioBros-Nes/' files_to_copy = [ p('./integration/data.json').resolve(), p('./integration/metadata.json').resolve(), p('./integration/scenario.json').resolve() ] for f in files_to_copy: shutil.copy(str(f), str(retro_dir))
def get_srcs(build_dirs): build_files = [] for build_dir in build_dirs: build_files += list(p(build_dir).glob("**/*")) return set(build_files)
def main(): path_doc_src = p("./spx_source") # make temporaly workspace path_tmp_ws = str(path_doc_src) + "_" path_tmp_ws = p(path_tmp_ws) print(path_tmp_ws) if path_tmp_ws.exists(): shutil.rmtree(path_tmp_ws) shutil.copytree(path_doc_src, path_tmp_ws) generate_spx_layer(path_tmp_ws, if_top=True) update_spx_source(path_tmp_ws) execute_spx()
def go(): #print(current_data.get(),destination_data.get(),sep='\n') if not (os.path.exists(current_data.get()) and os.path.exists(destination_data.get())): windoww = Tk() windoww.title("ERROR!") c = Label( windoww, text="Enter valid location or use forward slash instead of backward" ) c.config(font=(1)) c.grid(row=0, column=0, pady=2) window.destroy() try: i = float(imdb_data.get()) except: windoww = Tk() windoww.title("ERROR!") c = Label(windoww, text="Enter a number") c.config(font=(1)) c.grid(row=0, column=0, pady=2) window.destroy() make = [] if i < 0 or i > 10: windoww = Tk() windoww.title("ERROR!") c = Label(windoww, text="Enter rating in range 0 to 10") c.config(font=(1)) c.grid(row=0, column=0, pady=2) window.destroy() for file, folder, sub in os.walk(current_data.get()): for s in sub: make.append(file + '\\' + s) for j in make: temp = p(j) if temp.suffix == ".mkv" or temp.suffix == ".mp4" or temp.suffix == ".avi": for k in googlesearch.search(temp.stem + "+ imdb", tld='com', num=1, stop=1, pause=2): try: res = requests.get(k) except: break no = bs4.BeautifulSoup(res.text, 'html.parser') tt = no.find(itemprop='ratingValue') if tt == None: break t = tt.get_text() t = float(t) if t >= i: shutil.copy(j, destination_data.get() + '/') window.destroy()
def create_date_folder(root): today = datetime.date.fromtimestamp(time.time()) folder_name = "{}-{}-{}".format(today.year, today.month, today.day) folder = p(root, folder_name) if not folder.is_dir(): folder.mkdir() return folder
def find_pdf(root): for root, dirs, files in os.walk(root): for file in files: file = p(os.path.join(root, file)) if find_file_type(file.suffix): yield file
def create_combine(self): folder = p(self.root_folder, "COMBINED") if folder.is_dir(): self.combine = folder else: folder.mkdir() self.combine = folder
def find_root_dir(self, root): root = p(root) self.root_folder = "Pdf's" if self.root_folder in root.parts: print(root.parts) i = root.parts.index(self.root_folder) print(i) self.root_folder = p(*[i for i in root.parts[:i+1]]) print(self.root_folder.absolute()) else: for root, dirs, files in os.walk(str(root)): for d in dirs: d = p(root, d) if d.name == self.root_folder: self.root_folder = d return
def __init__(self, email, password, config_path, debug): log.debug('Initializing GoogleMaps module.') self.path = p(config_path) self.config = Config(self.path, debug) self.startup(email, password) self.browser = Browser() self.location = Location() self.people = None self.update()
def create_date(self): today = datetime.date.fromtimestamp(time.time()) folder_name = "{}-{}-{}".format(today.year, today.month, today.day) folder = p(self.combine, folder_name) if not folder.is_dir(): folder.mkdir() self.date_folder = folder
def update_spx_source(src=""): """Move a file tree to sphinx source. Old sources are deleted. """ # delete old spx_prj source p_spxsrc = p(spx_src_dir) if p_spxsrc.is_dir(): shutil.rmtree(p_spxsrc) p_spxsrc.mkdir() # prepare new spx_prj source dir p_spxsrc_org = p(str(p_spxsrc) + "_org") list_src = p_spxsrc_org.glob("**/*") _listprint(list_src) du.copy_tree(str(p_spxsrc_org), str(p_spxsrc)) # copy doc source list_src = src.glob("**/*") _listprint(list_src) du.copy_tree(str(src), str(p_spxsrc))
def path_walk(path): excluded = ["COMBINED"] for root, dirs, files in os.walk(path): for exclude in excluded: if exclude in dirs: dirs.remove(exclude) for file in files: file = p(os.path.join(root, file)) if find_file_type(file.suffix): yield file
def main(): # read paths with open("settings.json", 'r', encoding='utf-8') as f: d = json.load(f) project = d["project"] # generate sphinx project p_prj = p(project).absolute() if p_prj.is_dir(): files = p_prj.glob("**/*") for f in files: os.chmod(f, 0o777) shutil.rmtree(p_prj)
def startup(self): hassio = '/srv/homeassistant/lib/python3.6/site-packages/gmapslocsharing/core/config.conf' docker = '/config/deps/lib/python3.7/site-packages/gmapslocsharing/core/config.conf' if p(hassio).exists(): log.info('Loading hass.io config path.') log.debug('Config Path: {}'.format(hassio)) self.config.read(hassio) elif p(docker).exists(): log.info('Loading docker config path.') log.debug('Config Path: {}'.format(docker)) self.config.read(docker) else: suffix = '/site-packages/gmapslocsharing/core/config.conf' log.info('Loading manual install config path.') c = ''.join([ _.as_posix() for _ in (self.path / 'deps/lib').glob('python*') ]) c += suffix log.debug('Config Path: {}'.format(c)) self.config.read(c)
def Graph2Pandas_converter(self): '''Updates self.g or self.path bc you could only choose 1''' if isinstance(self.path, str) or isinstance(self.path, p): self.path = str(self.path) filetype = p(self.path).suffix if filetype == '.pickle': self.g = pickle.load(open(self.path, 'rb')) if isinstance(self.g, rdflib.graph.Graph): return self.get_sparql_dataframe() else: print( 'WARNING:: function df() wont work unless an ontology source is loaded' ) return self.g elif filetype == '.ttl' or filetype == '.rdf': self.g = rdflib.Graph() self.g.parse(self.path, format='turtle') return self.get_sparql_dataframe() elif filetype == '.nt': self.g = rdflib.Graph() self.g.parse(self.path, format='nt') return self.get_sparql_dataframe() elif filetype == '.owl' or filetype == '.xrdf': self.g = rdflib.Graph() try: self.g.parse(self.path, format='xml') except: # some owl formats are more rdf than owl self.g.parse(self.path, format='turtle') return self.get_sparql_dataframe() else: exit('Format options: owl, ttl, df_pickle, rdflib.Graph()') try: return self.get_sparql_dataframe() self.path = None except: exit('Format options: owl, ttl, df_pickle, rdflib.Graph()') elif isinstance(self.g, rdflib.graph.Graph): self.path = None return self.get_sparql_dataframe() else: exit('Obj given is not str, pathlib obj, or an rdflib.Graph()')
def run(self): print('I am running') files = Compile.run(str(self.register), str(self.project)) folder = Compile.create_date_folder(str(self.destination_Folder)) Compile.copy_files(files['normal'], str(folder)) tsd_files = [] for file in files['tsd']: tsd_files.append(merge.PDF(p(file.path))) tsd_files = merge.newest_file(tsd_files) tsd_files = merge.group_files(tsd_files) tsd_files = merge.sort_files(tsd_files) tsd_finish = merge.FinishPDF(folder, tsd_files) print('Merging files') tsd_finish.run(finish=True) print(str(folder)) report = TextReport(files['error'], files['progress'], files['ignore'], str(folder)) report.run() print('Finished')
# if(len(DATE)>430 or max(DATE)-min(DATE)> td(days=2.5)): # yield from delData(engine) for name in ATOS.keys(): fig=plt.figure() plt.plot(DATE, ATOS[name]) fig.autofmt_xdate() plt.savefig('./imagines/ATOS_'+ name+'.jpg') plt.close() for x in p('./imagines').iterdir(): if x.is_file(): src='.\\imagines\\'+x.name dst='..\\HotIO\\public\\private_images\\' sh.copy(src,dst+x.name) pass sh.copy('.\\Calendar.txt', '..\\HotIO\\public\\Calendar.txt') if __name__ == '__main__': for x in p('./imagines').iterdir(): if x.is_file(): src='.\\imagines\\'+x.name print(src) dst='..\\HotIO\\public\\private_images\\' sh.copy(src,dst+x.name) pass sh.copy('.\\Calendar.txt', '..\\HotIO\\public\\Calendar.txt') # os.system('copy Calendar.txt ..\\\\HotIO\\public\\') # os.system('copy imagines\\*.* ..\\HotIO\\public\\private_images')
import datetime from pprint import pprint as pp import time import make_html import publish # ################################## # read paths with open("settings.json", 'r', encoding='utf-8') as f: d = json.load(f) project = d["project"] # set build target directory curdir = p().cwd().absolute() build_src_dirs = d["doc_source"] # ################################## def get_srcs(build_dirs): build_files = [] for build_dir in build_dirs: build_files += list(p(build_dir).glob("**/*")) return set(build_files) def main():
#!/usr/bin/python3 # -*- coding: utf-8 -*- import pathlib import shutil from pathlib import Path as p from distutils import dir_util as du # cwd path_current = pathlib.Path().cwd() print(path_current) # cwd shortened path_current = p().cwd() print(path_current) # parent path_parent = path_current.parent print(path_parent) # only current # only suffix # recursive list_dir = path_current.glob("*") list_dir = path_current.glob("*.py") list_dir = path_current.glob("**/*") # path concatinate # rm dir # mkdir path_new = path_current / p("new_dir")
import joblib from pathlib import Path as p import os base_path = p(__file__).parent.parent.absolute() artefact_path = p.joinpath(base_path, "artefacts") def load_artefacts(model_name, encoder_name): """ This func load saved artefacts during R&D such as model, encoder etc. param: artefacts names return: list of artefacts """ model = joblib.load(p.joinpath(artefact_path, model_name)) encoder = joblib.load(p.joinpath(artefact_path, encoder_name)) return [model, encoder]
def Draw(engine): XM={ 'EURUSD':[], 'USDJPY':[], 'GBPUSD':[], 'XAUUSD':[], 'XAGUSD':[], 'OIL':[], 'US30':[], 'JP225':[], 'EURJPY':[], 'GBPJPY':[], 'GER30':[] } ATOS={ 'EURUSD':[], 'USDJPY':[], 'GBPUSD':[], 'XAUUSD':[], 'XAGUSD':[], 'OIL':[], 'US30':[], 'AUDUSD':[], 'HKG50':[] } DATE=[] with (yield from engine) as conn: res = yield from conn.execute(xm.select().where(xm.c.DATE>thrashold).order_by(xm.c.DATE)) for row in res: DATE.append(row.DATE) for name in XM.keys(): XM[name].append(row[name]) for name in XM.keys(): fig=plt.figure() plt.plot(DATE, XM[name]) fig.autofmt_xdate() plt.savefig('./imagines/XM_'+ name+'.jpg') plt.close() DATE=[] res = yield from conn.execute(atos.select().where(atos.c.DATE>thrashold).order_by(atos.c.DATE)) for row in res: DATE.append(row.DATE) for name in ATOS.keys(): ATOS[name].append(row[name]) # if(len(DATE)>430 or max(DATE)-min(DATE)> td(days=2.5)): # yield from delData(engine) for name in ATOS.keys(): fig=plt.figure() plt.plot(DATE, ATOS[name]) fig.autofmt_xdate() plt.savefig('./imagines/ATOS_'+ name+'.jpg') plt.close() for x in p('./imagines').iterdir(): if x.is_file(): src='.\\imagines\\'+x.name dst='..\\HotIO\\public\\private_images\\' sh.copy(src,dst+x.name) pass sh.copy('.\\Calendar.txt', '..\\HotIO\\public\\Calendar.txt')
with open(outfile,'w',encoding='utf-8') as f: f.write(texto) pass if __name__ == "__main__": os.chdir(sys.path[0]) from pathlib import Path as p try: with open('filename.txt','r',encoding='utf-8') as f: ss=f.read().strip() except: ss=input('请输入github对应仓库名!\n') with open('filename.txt','w',encoding='utf-8') as f: f.write(ss) try: p('./'+ss).mkdir() except: pass for x in p('.').iterdir(): if x.is_file(): plist=['*.py', '*.txt', '*.js', '*.cpp', '*.c', '*.html','*.json', '*.bat','*.sql'] match=False for m in plist: if x.match(m): match=True break if match: readFile(str(x)) getPW() encrypt('./'+ss+'/'+str(x))
with open(outfile,'w',encoding='utf-8') as f: f.write(texto) pass if __name__ == "__main__": import os import sys os.chdir(sys.path[0]) from pathlib import Path as p, PurePath as pp os.chdir(sys.path[0]) try: with open('filename.txt','r',encoding='utf-8') as f: ss=f.read().strip() except: ss=input('请输入要解密文件夹名称:\n') with open('filename.txt','w',encoding='utf-8') as f: f.write(ss) if p('./'+ss).is_dir(): for x in p('./'+ss).iterdir(): if x.is_file(): plist=['*.py', '*.txt', '*.js', '*.cpp', '*.c', '*.html','*.json', '*.bat','*.sql'] match=False for m in plist: if x.match(m): match=True break if match: readFile(str(x)) getPW() decrypt(str(x.name))