def music(file, output_dir, filetags, move, use_over, filename): path = TemplateP(output_dir).safe_substitute(filetags) os.path.normpath(path) os.path.abspath(path) if (not os.path.isdir(path)): os.makedirs(path) if (not filename == os.path.basename(file)): for e in [ '.mp3', '.oga', '.ogg', '.opus', '.wav', '.flac', '.wma', '.m4b', '.m4a', '.mp4' ]: if os.path.basename(file).lower().endswith(e): ext = e break filename = TemplateP(filename).safe_substitute(filetags) + ext file_path = os.path.join(path, filename) os.path.normpath(file_path) if (os.path.isfile(file_path) and (not use_over)): return False if (move): mv(file, file_path) m_or_c = 'Moved' else: copy2(file, file_path) m_or_c = 'Copied' if (verbose): print(m_or_c, os.path.basename(file), 'to', file_path) return True
def station_down(self): ''' decrement station 1 ''' try: self.txt_inpt.text = str(int(self.txt_inpt.text)-1) self.textinput.text += 'Station is '+self.txt_inpt.text+'\n' except: # with open('station_start.txt','rb') as f: # st=str(f.read()).split('\n')[0] # f.close() st = STATION_START self.txt_inpt.text = str(int(st)-1) self.textinput.text += 'Station is '+self.txt_inpt.text+'\n' self.txt_inpt.foreground_color = self.textinput.foreground_color # # get the last site visited and add 1, write to station file # fsite = open('station_start.txt','wb') # fsite.write(str(int(self.txt_inpt.text)+1)) # fsite.close() #overwrite the kv file on line 34 with the new station number countmax=16; counter=0 with open('lobos.cfg','rb') as oldfile, open('lobos_new.cfg','wb') as newfile: for line in oldfile: counter += 1 if counter==countmax: #newfile.write(" text: '"+st+"'\n") newfile.write('station_start = '+self.txt_inpt.text+'\n') else: newfile.write(line) mv('lobos_new.cfg','lobos.cfg')
def buildlib(self, libfile, sources=(), depdirs = (), include_paths = (), define_symbols = (), undefine_symbols = (), extra_opts = (), working_dir = None, export_dirs = (), cmd=(), force_rebuild=False, extra_mappers=()): if working_dir is None: raise Exception("Must specify working_dir.") if isinstance(sources, basestring): sources = [sources] if not export_dirs and working_dir: export_dirs = (working_dir,) export_dirs = allabs(export_dirs) depdirs = allabs(list(depdirs) + list(self.depdirs)) include_paths = allabs(list(include_paths) + list(self.include_paths)) define_symbols = list(define_symbols) + list(self.define_symbols) undefine_symbols = list(undefine_symbols) + list(self.undefine_symbols) extra_opts = list(extra_opts) + list(self.extra_opts) type_mappers = list(extra_mappers) + list(self.type_mappers) if working_dir is None: working_dir = self.working_dir else: working_dir = os.path.abspath(working_dir) with chdir(working_dir): if force_rebuild and os.path.exists(libfile): shutil.mv(libfile, libfile+'.bkp') if cmd: return self.dumb_get(libfile, cmd, depdirs) else: return self.smart_get(libfile, sources, depdirs, include_paths, define_symbols, undefine_symbols, extra_opts, working_dir, export_dirs, type_mappers=type_mappers)
def station_up(self): ''' increment station 1 ''' if self.textinput.counter==0: self.textinput.foreground_color = (0.0,0.0,0.0,1.0) self.textinput.counter = self.textinput.counter+1 elif self.textinput.counter==1: self.textinput.foreground_color = (0.0,0.0,0.0,1.0) self.textinput.counter = self.textinput.counter+1 elif self.textinput.counter==2: self.textinput.foreground_color = (0.0,0.0,0.0,1.0) self.textinput.counter = self.textinput.counter+1 elif self.textinput.counter==3: self.textinput.foreground_color = (0.0,0.0,0.0,1.0) self.textinput.counter = self.textinput.counter+1 else: self.textinput.foreground_color = (0.0,0.0,0.0,1.0) self.textinput.counter=0 # try: self.txt_inpt.text = str(int(self.txt_inpt.text)+1) self.textinput.text += '\n'+'___________ '+self.txt_inpt.text+' ___________'+'\n' # except: # # with open('station_start.txt','rb') as f: # # st=str(f.read()).split('\n')[0] # # f.close() # st = STATION_START # self.txt_inpt.text = str(int(st)+1) # self.textinput.text += '___________ '+self.txt_inpt.text+' ___________'+'\n' self.txt_inpt.foreground_color = self.textinput.foreground_color #get the last site visited and add 1, write to station file # fsite = open('station_start.txt','wb') # fsite.write(str(int(self.txt_inpt.text)+1)) # fsite.close() countmax=16; counter=0 with open('lobos.cfg','rb') as oldfile, open('lobos_new.cfg','wb') as newfile: for line in oldfile: counter += 1 if counter==countmax: #newfile.write(" text: '"+st+"'\n") newfile.write('station_start = '+self.txt_inpt.text+'\n') print "leicester"+self.txt_inpt.text else: newfile.write(line) mv('lobos_new.cfg','lobos.cfg') #overwrite the kv file on line 34 with the new station number countmax=33; counter=0 with open('lobos_test.kv','rb') as oldfile, open('lobos_new.kv','wb') as newfile: for line in oldfile: counter += 1 if counter==countmax: newfile.write(" text: '"+self.txt_inpt.text+"'\n") print "leicester"+self.txt_inpt.text else: newfile.write(line) mv('lobos_new.kv','lobos_test.kv')
def TakePictureGravelSand(self, *args): #if hasattr(self, 'laststationchange') & hasattr(self, 'lastimage'): # #self.textinput.text += 'Elapsed: '+str(abs(self.laststationchange - self.lastimage))+'\n' # if abs(self.laststationchange - self.lastimage) > 30: # content1 = Button(text = 'Close') # popup = Popup(title = 'It has been a while ... consider changing station? ', size_hint = (None, None), size = (400,400), auto_dismiss=True, content = content1) # content1.bind(on_press = popup.dismiss) # popup.open() e = self.textinput3.text.split(':')[1].split(',')[0] n = self.textinput3.text.split(':')[2].split(',')[0] z = self.textinput3.text.split(':')[3].split(',')[0] d = self.textinput3.text.split(':')[4].split(',')[0] if hasattr(self, 'st_n'): # distance from station start dist = sqrt((float(n)-float(self.st_n))**2 + (float(e)-float(self.st_e))**2) if dist > float(self.textinput4.text): #increment station self.station_up() self.export_to_png = export_to_png now = time.asctime().replace(' ','_').replace(':','_') self.lastimage = time.clock() filename = 'st'+self.txt_inpt.text+'_gravel_sand_'+now+'_e'+e+'_n'+n+'_z'+z+'_d'+d+'.png' self.export_to_png(self.ids.camera, filename=filename) self.textinput.text += 'Gravel/Sand image collected'+'\n' #: '+filename+'\n' try: mv(filename,'gravelsandimages') except: self.textinput.text += 'ERROR: image could not be stored'
def move_files(): amp_char_pos = 0 uscore_char_pos = 0 person1 = "" person2 = "" for filename in os.listdir(source_path): if filename.endswith(".mp3"): if filename.find("&") > 0: # doubles amp_char_pos = filename.find("&") uscore_char_pos = filename.find("_") person1 = filename[:amp_char_pos] person2 = filename[amp_char_pos + 1:uscore_char_pos] # copy to person 1 if not os.path.exists(output_path + "/" + person1): os.makedirs(output_path + "/" + person1) cp(source_path + "/" + filename, output_path + "/" + person1 + "/" + filename) # move to person 2 if not os.path.exists(output_path + "/" + person2): os.makedirs(output_path + "/" + person2) mv(source_path + "/" + filename, output_path + "/" + person2 + "/" + filename) else: # singles uscore_char_pos = filename.find("_") person1 = filename[:uscore_char_pos] # move to person 1 if not os.path.exists(output_path + "/" + person1): os.makedirs(output_path + "/" + person1) mv(source_path + "/" + filename, output_path + "/" + person1 + "/" + filename)
def on_stop(self): # write session log to file with open(os.path.expanduser("~")+os.sep+'log_'+time.asctime().replace(' ','_').replace(':','_')+'.txt','wb') as f: f.write(self.textinput.text) f.close() with open('station_start.txt','rb') as f: st=str(f.read()).split('\n')[0] f.close() countmax=22; counter=0 with open('eyedaq.kv','rb') as oldfile, open('eyedaq_new.kv','wb') as newfile: for line in oldfile: counter += 1 if counter==countmax: newfile.write(" text: '"+st+"'\n") else: newfile.write(line) mv('eyedaq_new.kv','eyedaq.kv') ## close the csv results files #bedf_csv.close() #print "output files closed" # close the serial port if self.ser!=0: self.ser.close() print "=================" print "GPS is closed" print "================="
def dump_result( results_base_dir, subtest_name, time_taken, data_sent, iterations, ): subtest_dir = os.path.join(results_base_dir, subtest_name) os.mkdir(subtest_dir) bwmng_results_file = os.path.join(subtest_dir,BWM_NG_LOGS_FILENAME) mv(TestUtils.BWMNG_OUTPUT_FILE, bwmng_results_file) time_taken = time_taken / 1000000000 # Seconds goodput = 1.0*(data_sent)*(iterations) / time_taken/1024/1024 # MBytes/s bwm_parser = BwmParser(bwmng_results_file, PLOT_GRAPH) throughputs = deepcopy(bwm_parser.transfers) for direction, thruput in throughputs.items(): for iface, val in thruput.items(): throughputs[direction][iface] = val / time_taken / (1024 * 1024) TestUtils.generate_report( time_taken, goodput, throughputs, iterations, data_sent, subtest_dir, bwm_parser.transfers )
def MOCK_Writer(input_file, parameter, parameter_value): """ Set the value for any of the input parameters within the 'USER INPUT - START' and 'USER INPUT - END' block. """ with open(input_file, 'r') as old_file: input_block_start = False input_block_end = False parameter_updated = False with open(input_file + '.new', 'w') as new_file: for l in old_file: if 'USER INPUT - START' in l: input_block_start = True if 'USER INPUT - END' in l: input_block_end = True if input_block_start == True and input_block_end == False and '#' not in l and parameter in l: parameter_updated = True if isinstance(parameter_value, str): new_file.write('{} = \'{}\'\n'.format(parameter, parameter_value)) else: new_file.write('{} = {}\n'.format(parameter, parameter_value)) else: new_file.write(l) assert input_block_start, '{} not found in input file: {}'.format('USER INPUT - START', input_file) assert input_block_end, '{} not found in input file: {}'.format('USER INPUT - END', input_file) assert parameter_value, '{} not found in input file: {}'.format(parameter, input_file) mv(input_file + '.new', input_file)
def TakePictureSandGravel(self, *args): self.export_to_png = export_to_png e = self.textinput3.text.split(':')[1].split(',')[0] n = self.textinput3.text.split(':')[2].split(',')[0] now = time.asctime().replace(' ','_').replace(':','_') filename = 'st'+self.txt_inpt.text+'_sand_gravel_'+now+'_e'+e+'_n'+n+'.png' self.export_to_png(self.ids.camera, filename=filename) self.textinput.text += 'Sand/Gravel image collected: '+filename+'\n' mv(filename,'sandgravelimages')
def placedb(self): from shutil import move as mv try: mv(raw_input(INFO + 'Enter db file path: '), lc()['databases_root']) print YES + 'Database placed!' except Exception as e: print ERR + 'Error placing database file! Details: ' + e exit(1)
def recursive_rename(path, new_canonical_name, modify=False): sub_files = os.listdir(path) for sub_file in sub_files: target = os.path.join(path, sub_file) #rename directory and recurse down a level if os.path.isdir(target): old_name = sub_file.split('_') new_name = '_'.join([new_canonical_name] + old_name[1:]) if modify: shutil.mv('{}'.format(os.path.join(path, sub_file), os.path.join(path, new_name))) else: print 'Moving:', os.path.join(path, sub_file), 'to', os.path.join( path, new_name) recursive_rename(target, new_canonical_name, modify=modify) #otherwise it is a file that needs to be renamed: two different procedures for #.xyz or for .json else: ext_str = os.path.splitext(sub_file)[1] if ext_str == '.xyz': old_name = sub_file.split('_') new_name = '_'.join([new_canonical_name] + old_name[1:]) if modify: shutil.mv('{}'.format(os.path.join(path, sub_file), os.path.join(path, new_name))) else: print 'Moving:', os.path.join( path, sub_file), 'to', os.path.join(path, new_name) elif ext_str == '.json': with open(os.path.join(path, 'subgb.json'), 'r') as f: subgb_dict = json.load(f) try: subgb_dict['old_gbid'] = subgb_dict['gbid'] except KeyError: subgb_dict['old_gbid'] = subgb_dict['name'] old_name = subgb_dict['old_gbid'] subgb_dict['gbid'] = new_canonical_name + '_' + '_'.join( old_name[1:]) if modify: with open(os.path.join(path, 'subgb.json')) as f: json.dump(subgb_dict, f, indent=2) else: print subgb_dict['gbid'], subgb_dict['old_gbid'] elif ext_str == '.idx': pass elif ext_str == '.pbs': os.remove(os.path.join(path, sub_file)) else: print 'unknown file type', path, sub_file, os.path.splitext( sub_file)
def toPDF(chapRoot,chaps,bookName): with tDir() as t: startDir = getcwd() chdir(t) for c,i in zip(chaps,count()): s_call(["echo","Converting " + chapRoot + c]) s_call(["wkhtmltopdf", chapRoot + c, "Part-{:02d}.pdf".format(i)]) pdfName = "{0}.pdf".format(bookName) args = ["gs","-dBATCH","-dNOPAUSE","-q", "-sDEVICE=pdfwrite","-dPDFSETTINGS=/prepress", "-sOutputFile="+pdfName,"*.pdf"] s_call(["echo","\nJoining PDF's..."]) s_call(" ".join(args),shell=True) mv(pdfName,startDir)
def main(): """ Main program. :return: void. """ curr_folder = None i = 0 imgs = list_files(RAW_IMAGES_DIR, IMG_EXT) for img in imgs: # Create a new empty directory. if i % FOLDER_CONFIG['items'] == 0: curr_folder = create_dir(parent_dir=FOLDERS_DIR, img_dir=FOLDER_CONFIG['img_dir'], chars=FOLDER_CONFIG['chars'], size=FOLDER_CONFIG['size']) print('Generating folder {folder}...'.format( folder=curr_folder['name'])) copy_file(img, curr_folder) i += 1 # If the folder reaches its max size or if it is the last image. if i % FOLDER_CONFIG['items'] == 0 or imgs.index(img) == len(imgs) - 1: print( 'Folder {folder} is full !'.format(folder=curr_folder['name'])) # Generate the CSV file. generate_csv(curr_folder) # If greyscale mode is enabled. if args.greyscale: # Copy folder for future extraction. cpt(src=curr_folder['root_path'], dst=os.path.join(FINAL_FOLDERS_DIR, curr_folder['name'])) # Convert images. convert_folder_greyscale(curr_folder) # Zip directory for annotation. zip_directory(curr_folder) # Remove remaining folder. rt(curr_folder['root_path']) else: # Zip directory for annotation. zip_directory(curr_folder) # Move file for future extraction. mv(curr_folder["root_path"], FINAL_FOLDERS_DIR)
def img_portioner(train_dir, ratio): file_num = len(os.listdir(os.path.join(train_dir, 'hr'))) test_num = int(file_num * ratio) rand_list = random.sample(range(0, file_num), test_num) images = os.listdir(os.path.join(os.path.join(train_dir, 'hr'))) for rand in rand_list: print('movin', images[rand], 'to test folder') mv(os.path.join(train_dir, 'hr', images[rand]), os.path.join(test_dir, 'hr', images[rand])) mv(os.path.join(train_dir, 'lr', images[rand]), os.path.join(test_dir, 'lr', images[rand]))
def __modifyfile(filename, tmpfilename, processline_Fn): try: origf = open(filename, "r") newf = open(tmpfilename, "w+") persistentdata = {} while True: line = origf.readline() if len(line) == 0: # EOF processline_Fn(line, newf, persistentdata) break else: processline_Fn(line, newf, persistentdata) except StopIteration: print("Verified File: {}".format(filename)) newf.close() newf = None del_file(tmpfilename) return () except Exception as err: eprint(err) newf.close() newf = None del_file(tmpfilename) # delete tmp file finally: origf.close() if newf is not None: newf.close() pathsplitter = regex(r'\\') pathmapper = regex(r'^C:/(.*)$') cygwin_filename = pathmapper.sub( # swap filename to path from inside cygwin '/cygdrive/c/\\1', '/'.join(pathsplitter.split(tmpfilename))) try: # convert file type with dos2unix exitcode = __runcygcmd( "/usr/bin/dos2unix.exe {file}".format(file=cygwin_filename)) if exitcode != 0: raise (Exception("Failed to convert file to unix file.")) # replace original file mv(tmpfilename, filename) except Exception as err: eprint(err) raise (err) else: print("Modified: {}".format(filename))
def update(context, request): try: name = request.matchdict['name'] archives = Environment.settings['paths.archives'] new_name = request.params['name'] shutil.mv(os.path.join(archives, name), os.path.join(archives, new_name)) except IOError as e: raise HTTPNotFound() except KeyError as e: raise ParamsError(e) else: raise HTTPNoContent()
def copy_file(src, dest, move=None): """ Copy a file """ if not move: from shutil import copyfile copyfile(src, dest) else: from shutil import move as mv mv(src, dest) return dest
def station_up(self): ''' increment station 1 ''' if self.textinput.counter==0: self.textinput.foreground_color = (0.6,0.5,0.0,1.0) self.textinput.counter = self.textinput.counter+1 elif self.textinput.counter==1: self.textinput.foreground_color = (0.0,0.5,0.5,1.0) self.textinput.counter = self.textinput.counter+1 elif self.textinput.counter==2: self.textinput.foreground_color = (0.0,0.0,1.0,1.0) self.textinput.counter = self.textinput.counter+1 elif self.textinput.counter==3: self.textinput.foreground_color = (1.0,0.0,0.5,1.0) self.textinput.counter = self.textinput.counter+1 else: self.textinput.foreground_color = (0.0,0.0,0.0,1.0) self.textinput.counter=0 try: self.txt_inpt.text = str(int(self.txt_inpt.text)+1) self.textinput.text += 'Station is '+self.txt_inpt.text+'\n' except: # with open('station_start.txt','rb') as f: # st=str(f.read()).split('\n')[0] # f.close() st = STATION_START self.txt_inpt.text = str(int(st)+1) self.textinput.text += 'Station is '+self.txt_inpt.text+'\n' self.txt_inpt.foreground_color = self.textinput.foreground_color #get the last site visited and add 1, write to station file # fsite = open('station_start.txt','wb') # fsite.write(str(int(self.txt_inpt.text)+1)) # fsite.close() countmax=16; counter=0 with open('lobos.cfg','rb') as oldfile, open('lobos_new.cfg','wb') as newfile: for line in oldfile: counter += 1 if counter==countmax: #newfile.write(" text: '"+st+"'\n") newfile.write('station_start = '+self.txt_inpt.text+'\n') else: newfile.write(line) mv('lobos_new.cfg','lobos.cfg')
def point_four_upgrade(self): issueid_regex = re.compile("[0-9a-f]{8}-[0-9a-f]{8}") folders = glob.glob("%s%s*-*" % (tkt.config.datapath(), os.sep)) ticketsfolder = os.path.join(tkt.config.datapath(), "tickets") if not os.path.exists(ticketsfolder): os.makedirs(ticketsfolder) for oldfold in folders: newfold = os.path.join(ticketsfolder, os.path.basename(oldfold)) shutil.mv(oldfold, newfold) issuefiles = glob.glob(os.path.join(ticketsfolder, "*", "issue.yaml")) for oldfile in issuefiles: newfile = os.path.join(os.path.dirname(oldfile), "ticket.yaml") shutil.mv(oldfile, newfile)
def buildlib(self, libfile, sources=(), depdirs=(), include_paths=(), define_symbols=(), undefine_symbols=(), extra_opts=(), working_dir=None, export_dirs=(), cmd=(), force_rebuild=False, extra_mappers=()): if working_dir is None: raise Exception("Must specify working_dir.") if isinstance(sources, basestring): sources = [sources] if not export_dirs and working_dir: export_dirs = (working_dir, ) export_dirs = allabs(export_dirs) depdirs = allabs(list(depdirs) + list(self.depdirs)) include_paths = allabs(list(include_paths) + list(self.include_paths)) define_symbols = list(define_symbols) + list(self.define_symbols) undefine_symbols = list(undefine_symbols) + list(self.undefine_symbols) extra_opts = list(extra_opts) + list(self.extra_opts) type_mappers = list(extra_mappers) + list(self.type_mappers) if working_dir is None: working_dir = self.working_dir else: working_dir = os.path.abspath(working_dir) with chdir(working_dir): if force_rebuild and os.path.exists(libfile): shutil.mv(libfile, libfile + '.bkp') if cmd: return self.dumb_get(libfile, cmd, depdirs) else: return self.smart_get(libfile, sources, depdirs, include_paths, define_symbols, undefine_symbols, extra_opts, working_dir, export_dirs, type_mappers=type_mappers)
def m_(probes, subd, rrtype, do=True, ppi=True): measures = [] # Launch capture print 'Launching capture...' x(cap + out + amp) sleep(30) # sleep 30 seconds print 'Capture running' # Launch measurements thed = dns(dn, rrtype.upper(), do=do, prepend_probe_id=ppi) if subd is not None: thed = dns(subd + '.' + dn, rrtype.upper(), do=do, prepend_probe_id=ppi) thep = probes them = atlas.create(thed, thep) measures += [them['measurements'][0]] print 'Measurement launched' # Wait for results print 'Waiting for results...' sleep(5 * 60) # sleep 5 minutes chkct = 0 chk = list(measures) while len(chk) > 0: for mid in chk: if atlas.measurement(mid).next()['status']['id'] > 2: chk.remove(mid) if chkct < 5: sleep(60) # sleep 1 minute else: sleep(10) # sleep 10 seconds chkct += 1 print str(len(measures) - len(chk)) + ' measurement(s) done' print 'Measurement done: ' + str(measures[0]) # Stop capture print 'Stopping capture...' x('kill $(pidof tshark)') sleep(30) x(outdir + perms) # Get results if subd is None: subd = 'apex' f = outdir + subd + '-' + rrtype.lower() + '-' + str(measures[0]) + '.pcap' mv(out, f) print 'Measurements done'
def on_stop(self): '''write session log to file, close ports, etc ''' outfile = os.path.expanduser("~")+os.sep+'log_'+time.asctime().replace(' ','_').replace(':','_')+'.txt' with open(outfile,'wb') as f: f.write(self.textinput.text) f.close() #read the last station number with open('station_start.txt','rb') as f: st=str(f.read()).split('\n')[0] f.close() #overwrite the kv file on line 34 with the new station number countmax=34; counter=0 with open('lobos.kv','rb') as oldfile, open('lobos_new.kv','wb') as newfile: for line in oldfile: counter += 1 if counter==countmax: newfile.write(" text: '"+st+"'\n") else: newfile.write(line) mv('lobos_new.kv','lobos.kv') # close the serial port for gps if self.ser!=0: self.ser.close() print "=================" print "GPS is closed" print "=================" # close the serial port for camera if self.ser2!=0: self.ser2.close() print "=================" print "serial camera is closed" print "=================" # close the serial port for echosounder if self.ser3!=0: self.ser3.close() print "=================" print "echosounder is closed" print "================="
def move_patchs( test_df, selected_patchs_dir='./selected/selected_patchs/', gbm_val_dir='./selected_patches/val_GBM', lgg_val_dir='./selected_patches/val_LGG', gbm_train_dir='./selected_patches/train_GBM', lgg_train_dir='./selected_patches/train_LGG', ): ids = test_df['Case ID'] labels = test_df['TypeName'] assert len(ids) == len(labels) hash_dict = {} for i in range(len(ids)): hash_dict[ids[i]] = labels[i] patchs_list = os.listdir(selected_patchs_dir) print(len(hash_dict), len(patchs_list)) # assert len(hash_dict) == len(patchs_list) full_paths = [ os.path.join(selected_patchs_dir, item) for item in patchs_list ] patchs_id_list = [patch_case_id[:12] for patch_case_id in patchs_list] for i in range(len(full_paths)): patch_src_path = full_paths[i] patch_id = patchs_id_list[i] patch_name = patch_id + '.jpg' if patch_id not in hash_dict.keys(): continue hash_res = hash_dict[patch_id] if hash_res == "GBM": mv(patch_src_path, os.path.join(gbm_train_dir, patch_name)) elif hash_res == "LGG": mv(patch_src_path, os.path.join(lgg_train_dir, patch_name)) else: raise KeyError
def on_stop(self): # write session log to file with open(os.path.expanduser("~")+os.sep+'log_'+time.asctime().replace(' ','_').replace(':','_')+'.txt','wb') as f: f.write(self.textinput.text) f.close() with open('station_start.txt','rb') as f: st=str(f.read()).split('\n')[0] f.close() countmax=22; counter=0 with open('eyedaq.kv','rb') as oldfile, open('eyedaq_new.kv','wb') as newfile: for line in oldfile: counter += 1 if counter==countmax: newfile.write(" text: '"+st+"'\n") else: newfile.write(line) mv('eyedaq_new.kv','eyedaq.kv')
def m_(probes,subd,rrtype,do=True,ppi=True): measures=[] # Launch capture print 'Launching capture...' x(cap+out+amp) sleep(30) # sleep 30 seconds print 'Capture running' # Launch measurements thed=dns(dn,rrtype.upper(),do=do,prepend_probe_id=ppi) if subd is not None: thed=dns(subd+'.'+dn,rrtype.upper(),do=do,prepend_probe_id=ppi) thep=probes them=atlas.create(thed,thep) measures+=[them['measurements'][0]] print 'Measurement launched' # Wait for results print 'Waiting for results...' sleep(5*60) # sleep 5 minutes chkct=0 chk=list(measures) while len(chk)>0: for mid in chk: if atlas.measurement(mid).next()['status']['id']>2: chk.remove(mid) if chkct<5: sleep(60) # sleep 1 minute else: sleep(10) # sleep 10 seconds chkct+=1 print str(len(measures)-len(chk))+' measurement(s) done' print 'Measurement done: '+str(measures[0]) # Stop capture print 'Stopping capture...' x('kill $(pidof tshark)') sleep(30) x(outdir+perms) # Get results if subd is None: subd='apex' f=outdir+subd+'-'+rrtype.lower()+'-'+str(measures[0])+'.pcap' mv(out,f) print 'Measurements done'
def change_st(self): ''' changes station and prints to log ''' self.textinput.text += 'Station is '+self.txt_inpt.text+'\n' # # get the last site visited and add 1, write to station file # fsite = open('station_start.txt','wb') # fsite.write(str(int(self.txt_inpt.text)+1)) # fsite.close() countmax=16; counter=0 with open('lobos.cfg','rb') as oldfile, open('lobos_new.cfg','wb') as newfile: for line in oldfile: counter += 1 if counter==countmax: #newfile.write(" text: '"+st+"'\n") newfile.write('station_start = '+self.txt_inpt.text+'\n') else: newfile.write(line) mv('lobos_new.cfg','lobos.cfg')
def move(src, dst): if lexists(dst): rm(dst) mv(src, dst) return dst
def reconstruct_tomo(path, name, dfix, init, volt=300, rotate_X=True): """ Reconstruct a tomogram with IMOD-com scripts. This also applies mtffilter after ctfcorrection. A reconstruction log will be placed in the reconstruction-directory. Parameters ---------- path : str Path to the reconstruction-directory. name : str Name of the tomogram (the prefix). dfix : float dfixed parameter of mtffilter: Fixed dose for each image of the input file, in electrons/square Angstrom init : float initial parameter of mtffilter: Dose applied before any of the images in the input file were taken volt : int volt parameter of mtffilter. Microscope voltage in kV; must be either 200 or 300. Default: ``300`` rotate_X : bool If the reconstructed tomogram should be rotated 90 degree about X. Default: ``True`` """ with cd(path): mrc_files = glob('*.mrc') mrc_files.sort() with open(name + "_reconstruction.log", "a") as log: cmd = ['newstack'] + mrc_files + [name + '.st'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() cmd = ['submfg', 'eraser.com'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() mv(name + '.st', name + '_orig.st') mv(name + '_fixed.st', name + '.st') cmd = ['submfg', 'newst.com'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() cmd = ['submfg', 'ctfcorrection.com'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() cmd = [ 'mtffilter', '-dfixed', str(dfix), '-initial', str(init), '-volt', str(volt), name + '_ctfcorr.ali', name + '.ali' ] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() cmd = ['submfg', 'tilt.com'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() if rotate_X: cmd = ['trimvol', '-rx', name + '_full.rec', name + '.rec'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() else: print('mv {0}_full.rec {0}.rec'.format(name)) mv(name + '_full.rec', name + '.rec') os.remove(name + '.st') os.remove(name + '_full.rec') mv(name + '_orig.st', name + '.st') os.remove(name + '.ali') os.remove(name + '_ctfcorr.ali') tomName = name + '.rec' split_name = os.path.basename(os.path.normpath(path)) tomRename = name + '_' + split_name + '.rec' mv(tomName, tomRename)
def merge_wallpapers(self): with open(self.config_path, 'r') as raw_config: config = json.load(raw_config) if not 'meta' in config: error( 'Nothing to merge' ) if not self.app.args.id in config['meta']: error( 'Invalid id to merge' ) wall_ext = config['meta'][self.app.args.id] wall_ext = wall_ext.split('.')[-1] wall_name = self.app.args.id + '.' + wall_ext wall_uri = ( config['download_path'] + wall_name ) new_wall_uri = ( config['wallpaper_path'] + config['meta'][self.app.args.id] ) info( 'Moving the wallpaper ' + config['meta'][self.app.args.id], 'from ' + wall_uri, 'to ' + new_wall_uri ) mv( wall_uri, new_wall_uri ) success( 'Moved the wallpaper', config['meta'][self.app.args.id] ) info( 'Refreshing merge meta information', 'into your config file:', self.config_path ) del config['meta'][self.app.args.id] if len(config['meta']) == 0: del config['meta'] with open(self.config_path, 'w') as raw_config: json.dump(config, raw_config) success( 'Refreshed your merge config', 'Merged the wallpaper with id', self.app.args.id )
def mov(source, dest, flag): if flag == 1: mv(source, dest) return True else: return False
def _rename(src: str, dest: str) -> None: parent = dirname(dest) makedirs(parent, mode=FOLDER_MODE, exist_ok=True) mv(src, dest)
def OpenFOAM_Writer_0_File(file_name, boundaryFace, valueFace, isScalar=True, axis_val=0): """ Write input parameter into any of the variables in the 0 directory. The value of a boundary face must be provided, and where the parameter is a vector quantity, the element of the vector should be specified. This is currently limited to boundary faces with type fixedValue. """ f = ppf(file_name) if isScalar: f['boundaryField'][boundaryFace]['value'] = 'uniform {}'.format(valueFace) else: f['boundaryField'][boundaryFace]['value']['uniform'][axis_val] = valueFace f.writeFile() # Format the written file to similar format as previous with open(file_name + '.new', 'w') as nf: nf.write('{}\n'.format(r'/*--------------------------------*- C++ -*----------------------------------*\ ')) nf.write('{}\n'.format(r'| ========= | | ')) nf.write('{}\n'.format(r'| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox | ')) nf.write('{}\n'.format(r'| \\ / O peration | Version: 2.2.2 | ')) nf.write('{}\n'.format(r'| \\ / A nd | Web: www.OpenFOAM.org | ')) nf.write('{}\n'.format(r'| \\/ M anipulation | | ')) nf.write('{}\n'.format(r'\*---------------------------------------------------------------------------*/ ')) with open(file_name, 'r') as of: Foamfile_block = False boundaryField_block = False for l in of: if 'FoamFile' in l: Foamfile_block = True if Foamfile_block: if l.startswith(' '): nf.write(' ' + l.lstrip()) else: nf.write(l) if l.startswith('}'): nf.write('{}\n'.format(r'// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //')) nf.write('\n') Foamfile_block = False if 'dimensions' in l: nf.write(l) nf.write('\n') if 'internalField' in l: nf.write(l) nf.write('\n') if 'boundaryField' in l: boundaryField_block = True if boundaryField_block: if l.startswith(' '): nf.write(' ' + ' ' + l.lstrip()) elif l.startswith(' '): nf.write(' ' + l.lstrip()) elif l.startswith('}'): nf.write('}\n') nf.write('{}\n'.format(r'// ************************************************************************* //')) boundaryField_block = False else: nf.write(l) mv(file_name + '.new', file_name)
def _cut(src: str, dest: str) -> None: mv(src, dest)
test_set.to_csv(datafile, index = None) dbutils.fs.mv(dirPath+datafile, dataLake) # COMMAND ---------- display(fig_acc) # COMMAND ---------- ## Stop Tensorboard dbutils.tensorboard.stop() # COMMAND ---------- # dbutils.fs.ls('File:/tmp/tensorflow_log_dir') # COMMAND ---------- import shutil shutil.mv(log_dir, "/dbfs/tensorflow/logs") # dbutils.fs.mv("File:/tmp/tensorflow_log_dir/events.out.tfevents.1548840731.0122-055838-bets960-10-139-64-8", "dbfs:/mnt/Exploratory/WCLD/tensorflow/logs/events.out.tfevents.1548840731.0122-055838-bets960-10-139-64-8") # COMMAND ---------- # dbutils.fs.ls("/mnt/Exploratory/WCLD") # COMMAND ---------- # dbutils.fs.mv("file:/databricks/driver/LSTM_REG_submit_train.csv", "dbfs:/mnt/Exploratory/WCLD/") # dbutils.fs.mv("file:/databricks/driver/LSTM_REG_model_regression_loss.png", "dbfs:/mnt/Exploratory/WCLD/")
def OpenFOAM_Writer_BlockMeshDict(file_name, domainSize, gridConfig): """ Write input parameters for domain size and grid configuration in the blockMeshDict file. """ # Save a backup of blockMeshDict cp(file_name, file_name + '.bak') # Read blockMeshDict f = ppf(file_name) # Set the vertices of the domain xlo = domainSize[0] xhi = domainSize[1] ylo = domainSize[2] yhi = domainSize[3] zlo = domainSize[4] zhi = domainSize[5] f['vertices'][0] = [xlo, ylo, zlo] f['vertices'][1] = [xhi, ylo, zlo] f['vertices'][2] = [xhi, yhi, zlo] f['vertices'][3] = [xlo, yhi, zlo] f['vertices'][4] = [xlo, ylo, zhi] f['vertices'][5] = [xhi, ylo, zhi] f['vertices'][6] = [xhi, yhi, zhi] f['vertices'][7] = [xlo, yhi, zhi] # Set the specified grid configuration f['blocks'][2][0] = gridConfig[0] f['blocks'][2][1] = gridConfig[1] f['blocks'][2][2] = gridConfig[2] f.writeFile() # Format the written file to similar format as previous with open(file_name + '.new', 'w') as nf: nf.write('{}\n'.format(r'/*--------------------------------*- C++ -*----------------------------------*\ ')) nf.write('{}\n'.format(r'| ========= | | ')) nf.write('{}\n'.format(r'| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox | ')) nf.write('{}\n'.format(r'| \\ / O peration | Version: 2.2.2 | ')) nf.write('{}\n'.format(r'| \\ / A nd | Web: www.OpenFOAM.org | ')) nf.write('{}\n'.format(r'| \\/ M anipulation | | ')) nf.write('{}\n'.format(r'\*---------------------------------------------------------------------------*/ ')) with open(file_name, 'r') as of: Foamfile_block = False for l in of: if 'FoamFile' in l: Foamfile_block = True if Foamfile_block: if l.startswith(' '): nf.write(' ' + l.lstrip()) else: nf.write(l) if l.startswith('}'): nf.write('{}\n'.format(r'// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //')) nf.write('\n') Foamfile_block = False if 'convertToMeters' in l: nf.write(l) nf.write('\n') if 'vertices' in l: nf.write(l) nf.write('(\n') for i in range(10): l = of.next() if i > 1: if i == 9: nf.write(' ' + l[:-3] + '\n') else: nf.write(' ' + l) nf.write(');\n') nf.write('\n') if 'blocks' in l: nf.write(l) nf.write('(\n') nf.write(' ' + 'hex (0 1 2 3 4 5 6 7) ({:.0f} {:.0f} {:.0f}) simpleGrading (1 1 1)\n'.format( gridConfig[0], gridConfig[1], gridConfig[2])) nf.write(');\n') nf.write('\n') if 'edges' in l: nf.write(l) nf.write('(\n') nf.write(');\n') nf.write('\n') if 'boundary' in l: with open(file_name + '.bak', 'r') as bak: boundaryBlock = False for lb in bak: if 'boundary' in lb: boundaryBlock = True if 'mergePatchPairs' in lb: boundaryBlock = False if boundaryBlock: nf.write(lb) if 'mergePatchPairs' in l: nf.write(l) nf.write('(\n') nf.write(');\n') nf.write('\n') nf.write('{}\n'.format(r'// ************************************************************************* //')) mv(file_name + '.new', file_name) rm(file_name + '.bak')
def _rename(src: PurePath, dest: PurePath) -> None: makedirs(dest.parent, mode=_FOLDER_MODE, exist_ok=True) mv(str(src), str(dest))
def _rename(src: str, dest: str) -> None: parent = dirname(dest) makedirs(parent, mode=folder_mode, exist_ok=True) mv(src, dest)
def _cut(src: PurePath, dest: PurePath) -> None: mv(str(src), str(dest))
import segmentation from glob import glob as ls from shutil import move as mv master = segmentation.Segmentation('datasets/dataset-main') print('Loading master\'s cache') master.Load('master-main') total_sessions = len(ls('sessions/*.csv')) for (filenum, session_path) in enumerate(ls('sessions/*.csv')): print('Learning session ' + str(filenum + 1) + '/' + str(total_sessions)) session_file = session_path.split('/')[1] master.LearningSession(session_path) mv(session_path, './sessions/sessions.old') print('Storing training to master\'s cache') master.Store()
def move(srcs, dest): """Move all the files in srcs to the destination folder dest""" for src in srcs: mv(src, dest)
import shutil import os src_f = "/media/olle/Seagate/thickness_map_prediction/fundus" dest_f = "/media/olle/Seagate/thickness_map_prediction/thickness_maps" src_files = os.listdir(src_f) for sf in src_files: if ".npy" in sf: shutil.mv(os.path.join(src_f,sf),os.path.join(dest_f,src_f))
def reconstruct_tomo(path, name, outname='half-tomo', eraseGold=False, SIRT=False, correctCTF=False): """ Reconstruct a tomogram with IMOD-com scripts. This also applies mtffilter after ctfcorrection. A reconstruction log will be placed in the reconstruction-directory. Parameters ---------- path : str Path to the reconstruction-directory. outname : str Name of the output tomogram. eraseGold : bool Whether or not to remove gold fiducials from the aligned stack prior to reconstruction. SIRT : bool Use SIRT for tomogram reconstruction. """ with cd(path): with open(name + "_reconstruction.log", "a") as log: # make aligned stack print('making aligned stack...') cmd = ['submfg', 'newst.com'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() # correct CTF (currently only simple correction is supported) if correctCTF: print('correcting CTF in aligned stack (simple correction)...') # Modify (only if needed) the ctfcorrection.com file to be compatible with older imod version on hpc04. make_legacy_ctfcorrection('ctfcorrection.com') cmd = ['submfg', 'ctfcorrection.com'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() mv(name + '.ali', name + '_no_ctfcorr.ali') # rename the old aligned stack mv(name + '_ctfcorr.ali', name + '.ali') # we just change the name back for clarity. # remove gold if eraseGold: print('removing gold fiducials from aligned stack...') # Modify (only if needed) the golderaser.com file to be compatible with older imod version on hpc04. make_legacy_golderaser('golderaser.com') # run the gold eraser process, creates a gold-free aligned stack called '{name}_erase.ali' cmd = ['submfg', 'golderaser.com'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() mv(name + '.ali', name + '_with_gold.ali') # rename the old aligned stack mv(name + '_erase.ali', name + '.ali') # we just change the name back for clarity. os.remove(name + '_with_gold.ali' ) # removed old aligned stack to save drive space if SIRT: # prep the SIRT files (-sync.com files and -finish.com) cmd = ['submfg', 'sirtsetup.com'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() # run the SIRT process (i.e. run the -sync.com files) SIRT_runfiles = sorted(glob('*sync.com')) temp = [] #store old file names for runfile in SIRT_runfiles: print('Running SIRT [' + runfile[-10] + '/' + str(len(SIRT_runfiles)) + ']') cmd = ['submfg', runfile] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() # rotate the tomogram so it opens in logical fashion (cosmetic) SIRT_file_name = sorted(glob('*.srec*'), key=os.path.getmtime)[-1] temp.append(SIRT_file_name) new_SIRT_name = outname + '_SIRT_iter_' + SIRT_file_name[ -2:] + '.rec' print('saving as: ' + new_SIRT_name) cmd = ['trimvol', '-rx', SIRT_file_name, new_SIRT_name] result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() # remove the old files (.srecXX) for srec in temp: os.remove(srec) # cleanup the -sync.com files, not essential) cmd = ['submfg', 'tilt_sirt-finish.com'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() else: print('Running regular tomogram reconstruction') # regular tomogram reconstruction cmd = ['submfg', 'tilt.com'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() # rotate the tomogram so it opens in logical fashion (cosmetic) cmd = ['trimvol', '-rx', name + '_full.rec', name + '.rec'] print(" ".join(cmd)) result = subprocess.run(cmd, stdout=log, stderr=log) result.check_returncode() os.remove( name + '_full.rec') # this should no longer exist, but to be safe mv(name + '.rec', outname + '.rec')
def update_plgs_peaks_fastas(db_path, species2urls, verbose=False): """Update fasta files. An example of an input: ('hye', ('')) Args: db_path (str): Path to the folder where we will store the files. species2urls (dict,iterable of tuples): Each tuple consists of the species name and its (potentially several) Uniprot url. verbose (boolean): increase verbosity. Returns: contaminants (Fastas): Fastas with contaminants. """ if db_path == '.': db_path = Path.cwd() db_path = Path(db_path).expanduser() latest = db_path / 'latest' previous = db_path / 'previous' if latest.exists(): previous.mkdir(exist_ok=True, parents=True) for f in latest.iterdir(): mv(src=str(latest / f), dst=str(previous)) NOW = now() latest_NOW = latest / NOW species2urls = dict(species2urls) logger.info("Downloading files.") urls = {url for urls in species2urls.values() for url in urls} if verbose: print('Going to download:') pprint(urls) with ThreadPoolExecutor() as e: raw_fastas = list(e.map(download, urls)) if verbose: print("Downloaded. Parsing fastas.") url2fs = {} for url, raw in zip(urls, raw_fastas): fs = Fastas() fs.parse(raw) url2fs[url] = fs if verbose: print("Parsed. Updating files.") for name, urls in species2urls.items(): if verbose: print("\tUpdating {}.".format(name)) fs = Fastas() for url in urls: fs += url2fs[url] stem = f"{name}_{len(fs)}_{NOW}" fs.write(latest_NOW / (stem + ".fasta")) FS = fs.to_ncbi_general() FS.write(latest_NOW / (stem + "_pipelineFriendly.fasta")) fs.extend(contaminants) stem += f"_contaminants_{len(contaminants)}" fs.write(latest_NOW / (stem + ".fasta")) FS.extend(contaminants.to_ncbi_general()) FS.write(latest_NOW / (stem + "_pipelineFriendly.fasta")) fs.reverse() stem += f"_reversed" fs.write(latest_NOW / (stem + ".fasta")) FS.reverse() FS.write(latest_NOW / (stem + "_pipelineFriendly.fasta")) logger.info("Succeeeded!")
#gets called by lobos.py to crop and fresize and image in a different processing # makes tings faster # daniel buscombe april 2016 from shutil import move as mv from scipy.misc import imresize, imread, imsave import sys, getopt argv = sys.argv[1:] filename = ''; outdirec = '' opts, args = getopt.getopt(argv,"hi:o:") for opt, arg in opts: if opt == '-h': print 'resize_n_move.py -i <filename> -o <outdirec>' sys.exit() elif opt in ("-i"): filename = arg elif opt in ("-o"): outdirec = arg try: imsave(filename, imresize(imread(filename)[100:-100,:,:],2.0)) mv(filename,outdirec) except: print "error"
def deploy(self): """ Build documentation in directory `root/target` and pushes it to `repo`. :Inputs: `root`: root directory where the `conf.py` file is. `repo`: remote repository where generated HTML content should be pushed to. `target`: directory relative to `root`, where generated HTML content should be written to. This directory **must** be added to the repository's `.gitignore` file. (default: `"site"`) `doc_branch`: branch where the generated documentation is pushed. (default: `"gh-pages"`) `latest`: branch that "tracks" the latest generated documentation. (default: `"develop"`) `local_upstream`: remote repository to fetch from. (default: `None`) `make`: list of commands to be used to convert the markdown files to HTML. (default: ['make', 'html']) """ sha = log_and_execute(["git", "rev-parse", "HEAD"]).strip() current_branch = environ['GIT_BRANCH'] logging.debug('current branch: %s' % current_branch) host_user, host_repo = get_github_username_repo(self.repo) logging.debug('host username: %s, host repo: %s', host_user, host_repo) self.upstream = "git@%s:%s/%s.git" % (HOST_URL[self.host], host_user, host_repo) logging.debug('upstream: %s' % self.upstream) if self.is_pull_request(): print_with_color("Skipping documentation deployment", 'magenta') return if self.local_upstream is not None: # Pull the documentation branch to avoid conflicts log_and_execute(["git", "checkout", self.doc_branch]) log_and_execute(["git", "branch"]) log_and_execute(["git", "pull", "origin", self.doc_branch]) log_and_execute(["git", "checkout", "-f", sha]) log_and_execute(["git", "branch"]) enc_key_file = abspath(joinpath(self.root, "docs", ".documenter.enc")) has_ssh_key = isfile(enc_key_file) with open(enc_key_file, "r") as enc_keyfile: enc_key = enc_keyfile.read() self.key_file, _ = splitext(enc_key_file) with open(self.key_file, "w") as keyfile: keyfile.write(b64decode(enc_key)) # Give READ/WRITE permissions chmod(self.key_file, stat.S_IREAD | stat.S_IWRITE) # self.create_ssh_config() tmp_dir = tempfile.mkdtemp() logging.debug("temporary directory is: %s" %tmp_dir) docs = joinpath(self.root, "docs") cd(docs) if not exists(self.target): mkdir(self.target) log_and_execute(self.make) # Versioned docs directories. latest_dir = joinpath(self.dirname, self.latest_dir) stable_dir = joinpath(self.dirname, self.stable_dir) target_dir = joinpath(docs, self.target) unstable_dir = joinpath(self.dirname, self.unstable_dir) # Setup git. cd(tmp_dir) log_and_execute(["git", "init"]) log_and_execute(["git", "config", "user.name", "'autodocs'"]) log_and_execute(["git", "config", "user.email", "'autodocs'"]) # Fetch from remote and checkout the branch. if self.local_upstream is not None: log_and_execute(["git", "remote", "add", "local_upstream", self.local_upstream]) log_and_execute(["git", "remote", "add", "upstream", self.upstream]) log_and_execute(["git", "fetch", "upstream"]) try: log_and_execute(["git", "checkout", "-b", self.doc_branch, "upstream/" + self.doc_branch]) except RuntimeError: try: log_and_execute(["git", "checkout", "--orphan", self.doc_branch]) log_and_execute(["git", "rm", "--cached", "-r", "."]) except: raise RuntimeError("could not checkout remote branch.") # Copy docs to `latest`, or `stable`, `<release>`, and `<version>` # directories. destination_dir = None if current_branch == 'origin/' + self.latest: destination_dir = latest_dir elif current_branch == 'origin/' + self.stable: destination_dir = stable_dir else: destination_dir = unstable_dir if destination_dir is not None: if exists(destination_dir): rm(destination_dir) logging.debug("Copying HTML folder to %s", destination_dir) mv(joinpath(target_dir, "html"), destination_dir) if 'GIT_TAG_NAME' in environ.keys(): logging.debug("This commit (%s) was tagged. A copy of the doc will be stored at %s.", sha, environ['GIT_TAG_NAME']) cp(destination_dir, environ['GIT_TAG_NAME']) # Create a .nojekyll file so that Github pages behaves correctly with folders starting # with an underscore. touch('.nojekyll') with open('index.html', 'w') as f: f.write('<meta http-equiv="refresh" content="0; url=http://%s.github.io/%s/stable"/>' % (host_user, host_repo)) # Add, commit, and push the docs to the remote. log_and_execute(["git", "add", "-A", "."]) log_and_execute(["git", "commit", "-m", "'build based on %s'" % sha]) log_and_execute(["git", "push", "-q", "upstream", "HEAD:%s" % self.doc_branch]) # Clean up temporary directories rm(target_dir) rm(tmp_dir)
def main(argv): global verbose if sys.platform.startswith('freebsd') or sys.platform.startswith( 'linux') or sys.platform.startswith('darwin'): illegal_chars = ['.', '/'] elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'): illegal_chars = ['<', '>', ':', '\"', '/', '\\', '|', '?', '*', '.'] input_dir, output_dir, filename = None, None, None verbose, move, copy_or_move_used, use_file, use_nuke, auto_yes, use_over, use_filename = False, True, False, False, False, False, False, False try: opts, args = getopt.getopt(argv, "cmthyvf:n:i:o:", [ "yes", "help", "legal", "copy", "move", "verbose", "file=", "nuke", "thwomp", "overwrite", "filename=", "input=", "output=" ]) except getopt.GetoptError: print('Invalid syntax. Use -h or --help') sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): print( 'Audiosort is a command line utility to sort your music library\n\nMinimum usage of audiosort:\naudiosort -i [unsorted library path] -o [output path]\n\nIf one of the tags contains an illegal character for your OS it will be replaced by a "#"\n\nPatterns can be used in the output path and filename like so:\n%{tag}\n\nUsable tags:\n\talbum\n\talbumartist, artist, realartist (use artist rather than realartist)\n\taudio_offset\n\tbitrate\n\tcomment\n\tcomposer\n\tdisc\n\tdisc_total\n\tduration\n\tfilesize\n\tgenre\n\tsamplerate\n\ttitle\n\ttrack\n\ttrack_total\n\tyear\n\nOptions:\n\t-c or --copy:\n\t\tCopies the files from [input] to [output]\n\t-f or --file\n\t\tNeeds a path !\n\t\tTake care of unsuported files\n\t-h or --help:\n\t\tShows this message\n\t-i or --input:\n\t\tNeeds a path !\n\t\tIndicates the root directory of the audio library to sort\n\t--legal:\n\t\tShows the license of the program\n\t-m or --move:\n\t\tUsed by default\n\t\tMoves the file from [input] to [output]\n\t-n or --filename:\n\t\tNeeds a filename pattern (with tags)\n\t\tRenames the sorted files to the pattern\n\t--nuke:\n\t\tRemoves the input directory after sorting it\n\t-t or --overwrite or --thwomp:\n\t\tIf a file exist already then it\'s overwritten\n\t-o or --output:\n\t\tNeeds a path !\n\t\tIndicates the directory of the audio library output\n\t-v or --verbose:\n\t\tShows information for everything the program does\n\t-y or --yes:\n\t\tAutomatically respond yes to any query' ) sys.exit(0) elif opt in ('-v', '--verbose'): verbose = True print('Using -v or --verbose') elif (opt == '--legal'): print(legal) sys.exit(0) elif opt in ('-y', '--yes'): auto_yes = True print('Using -y or --yes') for opt, arg in opts: if opt in ("-t", "--overwrite", "thwomp"): really_over = input( 'Are you sure you want to overwrite files ?(Y/n)') if (auto_yes): really_over = 'Y' if (really_over == 'Y'): use_over = True if (verbose): print('Using -t or --overwrite or --thwomp') else: if (verbose): print('Not Using -t or --overwrite or --thwomp then') elif opt in ('-c', '--copy'): if (not copy_or_move_used): move = False copy_or_move_used = True if (verbose): print('Using -c or --copy') else: print( 'Both -c and -m or --copy and --move are used. Using --move by default' ) move = True elif opt in ('-m', '--move'): if (not copy_or_move_used): move = True copy_or_move_used = True if (verbose): print('Using -m or --move') else: print( 'Both -c and -m or --copy and --move are used. Using --move by default' ) move = True elif opt in ('-f', '--file'): use_file = True other_files_path = os.path.abspath(arg) if (not os.path.isdir(other_files_path)): os.makedirs(other_files_path) if (verbose): print('Using -f or --file') elif opt in ('-n', '--filename'): filename = arg use_filename = True if (verbose): print('Using -n or --filename') elif (opt == '--nuke'): really_nuke = input( 'Are you sure you want to nuke the input directory? (Y/n)') if (auto_yes): really_nuke = 'Y' if (really_nuke == 'Y'): use_nuke = True if (verbose): print('Using --nuke') else: use_nuke = False if (verbose): print('Not using -n or --nuke then') elif opt in ("-i", "--input"): input_dir = os.path.abspath(os.path.normpath(arg)) if (os.path.isfile(input_dir)): print('Error, the input path is pointing to a file') sys.exit(2) elif (not os.path.isdir(input_dir)): print('Error, the input path isn\'t real.') sys.exit(2) elif opt in ("-o", "--output"): output_dir = os.path.abspath(arg) if (input_dir == None or output_dir == None): print( 'Invalid syntax. You at least need to use -i and -o or --input and --output' ) sys.exit(2) if (verbose): print('input directory:', input_dir) print('output directory:', output_dir) for root, dirs, files in os.walk(input_dir): for entry in files: file, path = entry, root os.chdir(path) filetags = { 'album': '###UNKNOWN_ALBUM##', 'albumartist': '###UNKNOWN_ALBUM_ARTIST###', 'realartist': '###UNKNOWN_ARTIST###', 'artist': '###UNKNOWN_ARTIST###', 'audio_offset': '###UNKNOWN_AUDIO_OFFSET###', 'bitrate': '###UNKNOWN_BITRATE###', 'comment': '###UNKNOWN_COMMENT###', 'composer': '###UNKNOWN_COMPOSER###', 'disc': '###UNKNOWN_DISC###', 'disc_total': '###UNKNOWN_DISC_TOTAL###', 'duration': '###UNKNOWN_DURATION###', 'filesize': '###UNKNOWN_FILESIZE###', 'genre': '###UNKNOWN_GENRE###', 'samplerate': '###UNKNOWN_SAMPLERATE###', 'title': '###UNKNOWN_TITLE###', 'track': '###UNKNOWN_TRACK###', 'track_total': '###UNKNOWN_TRACK_TOTAL###', 'year': '###UNKNOWN_YEAR###' } if (verbose): print('--------------------') if (not TinyTag.is_supported(file)): if (use_file): file_path = os.path.join(other_files_path, file) os.path.normpath(file_path) if (not os.path.isfile(file_path) or use_over): m_or_c = '' if (move): mv(os.path.join(root, file), file_path) m_or_c = 'Moved' else: copy2(os.path.join(root, file), file_path) m_or_c = 'Copied' if (verbose): print(m_or_c, os.path.basename(file), 'to', file_path) else: print( 'File skipped because it\'s already in the library' ) else: if (verbose): print('File skipped because it\'s not an audio file') if (verbose): print('--------------------\n') continue f = TinyTag.get(file) if (verbose): print('file:', file) if (not f.album == None): filetags['album'] = f.album if (not f.albumartist == None): filetags['artist'] = f.albumartist filetags['albumartist'] = f.albumartist if (not f.artist == None): if (f.albumartist == None): filetags['artist'] = f.artist filetags['realartist'] = f.artist if (not f.year == None): filetags['year'] = str(f.year) if (not f.genre == None): filetags['genre'] = f.genre if (not f.audio_offset == None): filetags['audio_offset'] = str(f.audio_offset) if (not f.bitrate == None): filetags['bitrate'] = str(f.bitrate) if (not f.comment == None): filetags['comment'] = f.comment if (not f.composer == None): filetags['composer'] = f.composer if (not f.disc == None): filetags['disc'] = f.disc if (not f.disc_total == None): filetags['disc_total'] = str(f.disc_total) if (not f.duration == None): filetags['duration'] = str(f.duration) if (not f.filesize == None): filetags['filesize'] = str(f.filesize) if (not f.samplerate == None): filetags['samplerate'] = str(f.samplerate) if (not f.title == None): filetags['title'] = f.title if (not f.track == None): filetags['track'] = str(f.track) if (not f.track_total == None): filetags['track_total'] = str(f.track_total) if (verbose): for x, y in filetags.items(): print('[{0}]:{1}'.format(x, y)) for c in illegal_chars: for i in filetags.values(): i.replace(c, '#') if (not use_filename): filename = file if ((not music(entry, output_dir, filetags, move, use_over, filename)) and verbose): print('File skipped because it\'s already in the library') if (verbose): print('--------------------\n') if (use_nuke): rmtree(input_dir) if (verbose): print('Nuked the input directory') sys.exit(0)