def update(servername): mirrorpath = '/data/contents/' + servername if (os.access(mirrorpath), os.W_OK): os.cwd(mirrorpath) print os.getcwd() else: return
def main(): args = parse_args() if version == "": exit(1) version_nr = re.search(r'jdk(\d{1,2}[u])', version).group(1) if version_nr == "": exit(1) config_params = "--with-version-opt={} --with-version-pre={} --disable-warnings-as-errors".format( datetime.now().strftime("%Y-%m-%d-%H-%M-%S"), version_pre) package_name = "openjdk-{}-{}".format(version_nr, version_pre) if version == "jdk8" or version == "jdk8u": config_params = "--with-milestone={} --disable-debug-symbols --disable-zip-debug-info".format( version_pre) package_name = "openjdk-1.8.0-{}".format(version_pre) makedirs("/{}/usr/lib/jvm/{}".format(package_name, package_name)) cwd("/build") call([ "bash", "configure", "-q", config_params, "--with-native-debug-symbols=none" ]) if args.clean: call(["make", "clean"]) call(["make", "images"]) if args.nopack: exit(0)
def comWrite(self, args): if self.serialRunning == False: print "Must initialize serial port(s) first!" return if len(args) == 1: if self.selectedFilePath == "": print "Must choose a file to write first (use the 'choose' command)." return else: if len(args) == 2: # write a specified file if os.path.isfile(args[1]): self.flashReadWriter.writeData(args[1]) if os.path.isfile(os.cwd() + "\\" + args[1]): self.flashReadWriter.writeData(os.cwd() + "\\" + args[1]) else: print "Not a valid file: " + args[1] return if len(args) >= 3: if args[1] == "-h": for i in range(len(args) - 2): return # write each hex value if args[1] == "-s": for i in range(len(args) - 2): return # write each character self.flashReadWriter.writeData(self.selectedFilePath)
def initStockData(stockName): if not os.path.exists(os.cwd() + stockName): try: os.mkdir(os.cwd() + stockName) except OSError: raise else: print("Created directory for " + stockName + " data")
def mysqlbinlog_bak(args): '''监控mysql二进制备份(备份服务器专用)''' errorlist = {} binlogbak = "/binlogbak" os.cwd(binlogbak) for bakdir in os.listdir(binlogbak): lastfile = getoutput("ls -t %s/mysql-bin*|head -1" % bakdir) lastfile_mtime = int(os.path.getmtime(lastfile)) nowtime = int(time.time()) t = 400 - (nowtime - lastfile_mtime) if t > 0: errorlist[bakdir] = ['备份失败', ''] if errorlist: return WarningData(errorlist) return InfoData(errorlist)
class GzipFileExtractor(FileExtractor): def __init__(self, uri): FileExtractor.__init__(self, uri) def extract(self, path=None): try: gz = gzip.GzipFile(self.uri, 'r') except Exception, e: raise FileExtractorError( "FileExtractor Error: Opening gzip %s: %s" % (self.uri, str(e))) if path is None: path = os.cwd() try: path = os.path.join(path, self.uri.split("/")[-1].replace(".gz", "")) f = open(path, "w") f.write(gz.read()) f.close() except Exception, e: gz.close() raise FileExtractorError( "FileExtractor Error: Extracting gzip %s: %s" % (self.uri, str(e)))
def main(filename=None): """ This is the try/except block removal program. If you do not specify a filename, the program will assume you want to create and use the 'throwaway.py' test file. Else, it will process as usual. This is not a sophisistcated function. It will remove ALL instances of try or except (with colons) and their child statements. It doesn't ask first and it doesn't require confirmation. This cannot be undone using ctrl+z. Make a backup before you run this just in case (or follow proper protocol and keep repos for your code, programmer). Okay. You've been warned """ if filename: filename = filename else: write_throw() filename = os.path.join(os.cwd(), 'throwaway.py') #creating the throwaaway file first data = get_lines(filename) newlines = update_lines(data) write_to_py(newlines, filename)
class Bzip2FileExtractor(FileExtractor): def __init__(self, uri): FileExtractor.__init__(self, uri) def extract(self, path=None): try: bz2 = bz2.BZ2File(self.uri, 'r') except Exception, e: raise FileExtractorError( "FileExtractor Error: Opening bzip2 %s: %s" % (self.uri, str(e))) if path is None: path = os.cwd() try: path = os.path.join(path, self.uri.split("/")[-1].replace(".bz2", "")) f = open(path, "w") f.write(bz2.read()) f.close() except Exception, e: bz2.close() raise FileExtractorError( "FileExtractor Error: Extracting bzip2 %s: %s" % (self.uri, str(e)))
def __init__(self, path): if not os.path.isabs(path): path = os.path.join(os.cwd(), path) self.path = path _, self.filename = os.path.split(self.path) self.title, self.ext = os.path.splitext(self.filename) self.dir = os.path.dirname(self.path)
def Download(self, window='3m'): path = Path(cwd()) / (Path(__file__).parent) columns = [ 'date', 'symbol', 'open', 'high', 'low', 'close', 'volume', 'change', 'changePercent', 'vwap' ] for batch in self.batches: json = self.GetBatch(batch, window=window) for stock in json: print(stock) try: #print(path / PATH_CONFIG["StockDataPath"] / (stock + '.pkl')) df = pd.read_pickle(path / PATH_CONFIG["StockDataPath"] / (stock + '.pkl')) except IOError: df = pd.DataFrame(columns=columns) for day in json[stock]['chart']: df = self.map_to_df(df, stock, day) # df['date'] = pd.to_datetime(df['date']) # df = df.set_index('date') df.to_pickle(path / PATH_CONFIG["StockDataPath"] / (stock + '.pkl'))
def plot_misfit_results(misfits,misfit_count,seismometer,best_index): # plotting count vs misfit for final grid search misfit_max = np.amax(misfits) misfit_min = np.amin(misfits) j = len(misfit_count) plt.figure() #plt.subplot(121) plt.plot(misfit_count, misfits, 'bo', markersize=4 ) plt.plot(misfit_count[best_index], misfits[best_index], 'r^', markersize=8 ) plt.xlabel('Grid Search Interation Count') plt.ylabel( 'RMS Misfit in Log10 Space' ) plt.suptitle('Final Grid Search: Seismometer ' + str(seismometer) ) plt.axis([-5, j + 5, misfit_min * 0.9, misfit_max * 1.1]) plt.grid(True, which='major') plt.grid(True, which='minor') fig = os.cwd()+seismometer + '_misfit' + '.png' txt = "plotted rms misfit vs iteration number - saved in file: %s" % ( fig ) print "\n" print txt plt.savefig( fig ) plt.show() plt.close()
def __init__(self): ## if you want more things to be downloaded, # add it to the stock_data.csv in the StockData directory self.path_main = Path(cwd()) / (Path(__file__).parent) self.path_data = self.path_main / PATH_CONFIG["StockDataPath"] self.stock_meta = pd.read_csv(self.path_data / "stock_data.csv")
def train_remote(node_id): import subprocess import os cmd = "python3 train.py" args = "--node-id={}".format(node_id) cwd = os.cwd() subprocess.run(cmd, args, cwd=cwd)
def main(): pwd = os.cwd() os.chdir("data") for root, dirs, files in os.walk(os.getcwd()): print root for f in files: sanitizeFile("%s/%s" % (root, f)) os.chdir(pwd)
def main(): pwd = os.cwd() os.chdir("data") for root, dirs, files in os.walk(os.getcwd()): print root for f in files: sanitizeFile("%s/%s" % (root,f)) os.chdir(pwd)
def main(): i = 0 path = os.cwd() for filename in os.listdir(path): my_dest = "image" + str(i) + ".jpg" my_source = path + filename my_dest = path + my_dest os.rename(my_source, my_dest) i += 1
def __execute(builder, num_threads, num_conditions, t_step=None): with open('logerr', 'a') as file: if builder == 'gpu': for exe in glob('*-gpu'): file.write('\n' + exe + '\n') try: subprocess.check_call( [pjoin(cwd(), exe), str(num_conditions)], stdout=file) except subprocess.CalledProcessError as e: returncode = e.returncode file.write('Error encountered running {}\n'.format( ' '.join([exe, str(num_conditions)]))) file.write('Error code: {}\n'.format(returncode)) sys.exit(-1) shutil.copy( pjoin('log', exe + '-log.bin'), pjoin('log', exe + '-log_{:.0e}.bin'.format(t_step))) else: for exe in glob('*-int'): if exe in valid_int: continue if 'rk78' in exe: continue file.write('\n' + exe + '\n') try: subprocess.check_call([ pjoin(cwd(), exe), str(num_threads), str(num_conditions) ], stdout=file) except subprocess.CalledProcessError, e: returncode = e.returncode file.write('Error encountered running {}\n'.format( ' '.join([exe, str(num_threads), str(num_conditions)]))) file.write('Error code: {}\n'.format(returncode)) sys.exit(-1) shutil.copy( pjoin('log', exe + '-log.bin'), pjoin('log', exe + '-log_{:.0e}.bin'.format(t_step)))
def write_throw(): throwlines = [ '#throwaway file to test my new function\n', 'def throwmane():', '\ttry:', '\t\tprint("apples are red")', '\t\tprint("violets are blue!")', '\texcept:', '\t\tprint("This is the song that never ends.")', '\tprint("Hey now, youre a rockstar")\n', 'if __name__=="__main__":', '\tthrowmane()' ] filename = os.path.join(os.cwd(), 'throwaway.py') write_to_py(throwlines, filename)
def update(args): "update a project" if not args.proj: args.proj = os.cwd() args.proj = os.path.abspath(args.proj) check_clean_repo(args.proj, "update") cfg = os.path.join(args.proj, "cfg.yml") check_file(exists, cfg) _cookiecutter(args, cfg, output_dir=os.path.dirname(args.proj), overwrite_if_exists=True)
def augment_data(): path = os.cwd() path = os.path.join(path, 'data') os.mkdir(os.path.join(path, 'augmented')) path = os.path.join(path, 'raw') out_path = os.path.join(path, 'data', 'augmented') for file in os.listdir(path): song = AudioSegment.from_wav(os.path.join(path, file)) louder = song + 10 quieter = song - 10 louder.export(os.path.join(out_path, file[:-4]+'_l'+file[-4:]),format='wav') quieter.export(os.path.join(out_path, file[:-4]+'_q'+file[-4:]),format='wav')
def match(self, given, account, verbose=False): try: cwd = os.cwd() for directory in self.dirs: if cwd.samefile(to_path(directory)): if verbose: log(' %s: matches.' % self.get_name()) return self.script except Exception as e: raise PasswordError(str(e), culprit=e.__class__.__name__) if verbose: log(' %s: no match.' % self.get_name())
def save_data(self): # player_name: input input_arrays = {} output_arrays = {} for player_actor_id in self.all_data: # print(self.player_dicts) player_name = self.player_dicts[player_actor_id][ "Engine.PlayerReplicationInfo:PlayerName"] input_arrays[player_name] = self.all_data[ player_actor_id]['inputs'] output_arrays[player_name] = self.all_data[ player_actor_id]['outputs'] print('Saving data for player: %s' % player_name) print('Inputs: %s, Outputs: %s' % (input_arrays[ player_name].shape, output_arrays[player_name].shape)) print('Averages:') print(np.mean(input_arrays[player_name], axis=0)) print(np.mean(output_arrays[player_name], axis=0)) np.savez(os.path.join(os.cwd(), 'input'), **input_arrays) np.savez(os.path.join(os.cwd(), 'output'), **output_arrays)
def download_model(): s3 = boto3.resource('s3') if os.path.exists(os.cwd() + '/checkpoint'): # assume warm return os.mkdir('checkpoint') os.mkdir(f'checkpoint/{run_name}') bucket = s3.Bucket(BUCKET_NAME) for o in bucket.objects.filter(Prefix = 'checkpoint/{run_name}'): bucket.download_file(o.key, o.key) os.mkdir('models') os.mkdir('models/117M') for o in bucket.objects.filter(Prefix = 'models/117M'): bucket.download_file(o.key, o.key)
def add_files(self, filename): if os.path.isfile(filename): self._files.append(filename) print "File added %s" % (filename,) s=filename.replace(os.path.dirname(filename), '') self.lbfiles.insert(END, s) self.lbfiles.update_idletasks() elif not os.path.isfile(filename): filename = str(os.cwd()) + filename if os.path.isfile(filename): self._files.append(filename) print "File added %s" % (filename,) self.lbfiles.insert(END, filename) self.lbfiles.update_idletasks() else: print "File does not exist"
def add(): '''Add template to .templates directory''' try: template_dir = sys.argv[2] except: print('Template directory must be specified as first argument') return try: dir_name = sys.argv[3] except: dir_name = template_dir.split('/')[-1] templates_path = os.path.expanduser('~') + '/.templates' from_dir = os.cwd() + '/' + template_dir to_dir = templates_path + '/' + dir_name copy_dir(from_dir, to_dir)
def getPlayerFiles(): print "Getting player files..." cwd = os.cwd() playersWithFile = [] #Iterates over user directories for userDir in os.listdir('/home'): fpath = '/home/' + str(userDir) + '/python/blackjack/blackjack.py' #Check to see if the blackjack.py file exits in the theoretical directory if os.path.isfile(fpath): playersWithFile.append(str(userDir)) print "Adding file from: " + fpath shutil.copy2(fpath, cwd + '/blackjack' +str(iter) + '.py') else: print "File does not exist in " + fpath print str(len(playersWithFile)) + " player files were added"
def __init__(self, symbol, df): self.path_main = Path(cwd()) / (Path(__file__).parent) self.path_data = self.path_main / PATH_CONFIG["StockDataPath"] self.df = df self.symbol = symbol try: self.df = self.df[PATH_CONFIG['df_columns']] self.df.index = pd.to_datetime(self.df.index) self.df.sort_index(inplace=True) prev_start = self.df.index.values[-1] # will not include the date prev_start downloader = Downloader(self.symbol, pd.to_datetime(prev_start)) df = downloader.get_df() self.df = pd.concat([self.df, df]) self.save_df() except KeyError: print(f"could not assert columns onto df for {self.symbol}")
def saveValue(): try: if len(current.get().split("_")) == 3: if os.path.isdir(output_path): out = output_path else: out = os.cwd() if core.save_info(current.get(), out): logger.putLog(status=Status( True, info="File saved successfully")) else: logger.putLog( status=Status(False, error="Unable to save file")) else: logger.putLog(status=Status( False, error="Select a valid input to save")) except: logger.putLog( status=Status(False, error="Select a valid input to save"))
class ZipFileExtractor(FileExtractor): def __init__(self, uri): FileExtractor.__init__(self, uri) def extract(self, path=None): try: zip = zipfile.ZipFile(self.uri, 'r') except zipfile.BadZipfile, e: raise FileExtractorError("FileExtractor Error: Opening zipfile" " %s: %s" % (self.uri, str(e))) if path is None: path = os.cwd() for name in zip.namelist(): try: fpath = os.path.join(path, name) # Check if 'name' is a directory if name[-1] == '/': try: os.makedirs(fpath) except IOError, e: zip.close() raise FileExtractorError("FileExtractor Error: Write " "error while extracting " "zipfile %s: %s" % (self.uri, str(e))) else: bytes = zip.read(name) f = open(fpath, 'w') try: f.write(bytes) except IOError, e: zip.close() f.close() raise FileExtractorError("FileExtractor Error: Write " "error while extracting " "zipfile %s: %s" % (self.uri, str(e))) f.close()
def get_free(path=None): if path is None: path=os.path.abspath(os.cwd()) if ':' in path: # assume a remote location # need ssh host,path=path.split(':') command='ssh %s "df -k %s"' % (host,path) try: result=subprocess.check_output(command, shell=True) if len(result.split('\n'))==3: return int(result.split('\n')[1].split()[3])*1024 elif len(result.split('\n'))==4: print result.split('\n')[2] # an extra line-break return int(result.split('\n')[2].split()[2])*1024 except Exception, e: logger.error('Unable to check free space on %s:%s\n%s' % (host,path,e)) return 0
class TarFileExtractor(FileExtractor): def __init__(self, uri): FileExtractor.__init__(self, uri) def extract(self, path=None): try: tar = tarfile.open(self.uri, 'r:*') except tarfile.TarError, e: raise FileExtractorError("FileExtractor Error: Opening tarfile " "%s: %s" % (self.uri, str(e))) if path is None: path = os.cwd() try: tar.extractall(path) except tarfile.TarError, e: tar.close() raise FileExtractorError("FileExtractor Error: Extracting tarfile" " %s: %s" % (self.uri, str(e)))
def get_original_complexes(job, mol_sfam, int_sfam, group, int_type, work_dir=None): if work_dir is None: work_dir = os.cwd() complex_files = [] for _, row in group: row = row.iloc[0] RealtimeLogger.info("ROW: {}".format(row)) mol_file, mol_resi, int_file, int_resi = process_interface( job, row, int_type, work_dir=work_dir) # try: # mol_file = download_pdb(job, row.mol_superfam_id, row.mol_pdb, # row.mol_chain, row.mol_sdi_id, row.mol_domNo, work_dir=work_dir) # # int_file = download_pdb(job, row.int_superfam_id, row.int_pdb, # row.int_chain, row.int_sdi_id, row.int_domNo, work_dir=work_dir) # except (KeyboardInterrupt, SystemExit): # raise # except Exception as e: if mol_file is None or int_file is None: #PDB files not found, skip RealtimeLogger.info( "Cannot download PDB {}.{}.{} bc it was not found".format( row.mol_pdb, row.mol_chain, row.mol_sdi_id)) complex_files.append(None) continue merged_file = next( prep((mol_file, "M"), (int_file, "I"), merge=True, work_dir=work_dir)) complex_files.append(merged_file) return complex_files
def get_free(path=None): if path is None: path = os.path.abspath(os.cwd()) if ':' in path: # assume a remote location # need ssh host, path = path.split(':') command = 'ssh %s "df -k %s"' % (host, path) try: result = subprocess.check_output(command, shell=True) if len(result.split('\n')) == 3: return int(result.split('\n')[1].split()[3]) * 1024 elif len(result.split('\n')) == 4: print result.split('\n')[2] # an extra line-break return int(result.split('\n')[2].split()[2]) * 1024 except Exception, e: logger.error('Unable to check free space on %s:%s\n%s' % (host, path, e)) return 0
def write_todoheader(): """ Description: Writes the header for the TODO list file `TODO.md`. Input Args: None Return Args: None """ print("Writing text header for `TODO.md`...") this_dir = cwd() todays_date = datetime.datetime.today().strftime('%Y-%m-%d') header = '''\ # TODO.md ---\n ## Your Honeydew List for Your %s ## Source Directory ### as generated on %s ---\n ''' with open("TODO.md", 'w') as file: ## opens output file for writing. file.write(textwrap.dedent(header % (this_dir, todays_date)))
def Download2(self, window='3m'): path = Path(cwd()) / (Path(__file__).parent) columns = [ 'symbol', 'open', 'high', 'low', 'close', 'volume', 'change', 'changePercent', 'vwap' ] json_obj = self.GetBatch("JNUG,QQQ,ZION", window=window) for stock in json_obj: ##pandas can read from json string, need to dump it df = pd.read_json(json.dumps(json_obj[stock]['chart'])) df.set_index('date', inplace=True) try: prev = pd.read_pickle(path / PATH_CONFIG["StockDataPath"] / (stock + '.pkl')) if prev['date']: prev.drop_columns(['date']) except IOError: prev = pd.DataFrame(columns=columns) self.combine_dfs(prev, df)
def exec(cmd): args = "" path = "" home = str(Path.home()) for cmd in cmd: #peace the path back together if not cmd == "dir": path = path + " " + cmd if path.startswith(" "): path = path.lstrip() if len(path) > 0: if path.startswith("~"): path = path.replace("~", home) try: dir = ls(path) except FileNotFoundError: print("{0}Error: dir: no such file or directory '{1}'".format( Fore.LIGHTRED_EX, path)) return for i in dir: print(i) else: try: dir = ls(path) except Exception: print("{0}Error: dir: no such file or directory '{1}'".format( Fore.LIGHTRED_EX, path)) return for i in dir: print(i) else: for i in ls(cwd()): print(i)
summons = 'TROLL' elif(part=='torsos'): shutil.copy('/home/arthur/Documents/Brumhack2015/Untitled-Fighting-Game-With-Clarifai/SPECIAL images/totem.png', dest + '/' + part) summons = 'POLE' else: shutil.copy('/home/arthur/Documents/Brumhack2015/Untitled-Fighting-Game-With-Clarifai/SPECIAL images/hole.png', dest + '/' + part) summons = 'HOLE' trollparts+=1 print '/!\\ WARNING! /!\\ NO IMAGE FOUND!\nSUMMONING THE %s!' % (summons,) if trollparts==3: os.chdir('/home/arthur/Documents/Brumhack2015/Untitled-Fighting-Game-With-Clarifai/SPECIAL images/') name = 'totempole.png' shutil.copy(os.getcwd() +'/'+ name, dest + '/backgrounds') print '/!\\ WARNING! /!\\ THE TOTEM HAS BEEN SUMMONED!' else: os.chdir('/home/arthur/Documents/Brumhack2015/Untitled-Fighting-Game-With-Clarifai/Tags/backgrounds') name = findBestMatch(backgroundtags).replace(".py", "") os.chdir('/home/arthur/Documents/Brumhack2015/Untitled-Fighting-Game-With-Clarifai/Images/backgrounds') try: shutil.copy(os.cwd()+'/'+ name, dest + '/backgrounds') except: pwd = os.getcwd() backs = os.listdir(pwd) name = backs[random.randint(0,len(backs))] shutil.copy(pwd +'/'+ name, dest+'/backgrounds') print '/!\\ WARNING! /!\\ NO IMAGE FOUND!\nGENERATING RANDOM BATTLEFIELD'
from glob import glob from os import cwd from os.path import join, split, splitext from frames import execute MANIFEST = { # Declare the algorithm info. "doc": "Loads all clouds of given format from the CWD.", "author": "John Doe", "version": "0.6", "multithreaded": False, } DEF_IN_PARAMS["cloud_extension"] = "copsxml" # Declare default input params. if __name__ == "__main__": # Run the script if it's not imported but executed. cloud_extension = IN_PARAMS["cloud_extension"] # Fetch input params. cloud_ids = [] for path in glob(join(cwd(), "*." + cloud_extension)): name = split(splitext(path)[0])[-1] cloud_id = execute("import_cloud", path, name) cloud_ids.append(cloud_id) OUT_PARAMS["cloud_ids"] = cloud_ids # Form results
def getCurrentFolder(act): uri = act.vibase.doc.get_uri() if uri: return os.sep.join(uri.split(os.sep)[:-1]) else: return os.cwd()
def abs(path, parent=None): if not isabs(path): return join(parent or cwd(), path) return path
def scrape_dbf(verbose = True): #for now just reimports if file not present #TODO: reimport based on time rules or detection of new online version.... shapefile_zip_filename = 'address_points_wgs84.zip' shapefile_url = 'http://opendata.toronto.ca/gcc/' + shapefile_zip_filename shapefile_dir = 'shapefiles' dbf_relative_path = os.path.join(shapefile_dir, 'ADDRESS_POINT_WGS84.dbf') shapefile_zip_relative_path = os.path.join(shapefile_dir, shapefile_zip_filename) print 'Testing:' print dbf_relative_path #TODO: create file helpers to check for properties if os.path.exists(dbf_relative_path): #TODO LOOK FOR DEPENDENCIES, ZIP. DELETE THE whole directory (400M) if verbose: OK('DBF file found: ' ,dbf_relative_path ) else: if verbose: WARN('DBF file not found', dbf_relative_path) MSG('Downloading from ', shapefile_url) start_time = date.utc() #download file and unzip #TODO: try/catch request_shapefile = requests.get(shapefile_url) #move the file to the shapefile subdirectory if not os.path.exists(shapefile_dir): os.mkdir(shapefile_dir) else: #move the zip file to the shapefiles directory os.rename(shapefile_zip_filename, shapefile_zip_relative_path) with open(shapefile_zip_relative_path, 'wb') as code: code.write(request_shapefile.content) timestamp = str(date.utc()) shapefile_zip = zipfile.ZipFile(shapefile_zip_relative_path).extractall(shapefile_dir) #check for success and record in ADMIN db if os.path.exists(dbf_relative_path): if verbose: OK('Shape file imported and extracted, zip file will be deleted') shapefiles_admin_db.insert({'timestamp': int(timestamp), 'date': date.getdatetime(), 'log': 'downloaded and extracted', 'file': dbf_relative_path, 'cwd': os.cwd() }) else: if verbose: ERR('File not found') shapefiles_admin_db.insert({'timestamp': int(timestamp), 'date': date.getdatetime(), 'log' : 'error' , 'file exists': is_file, 'filename': dbf_relative_path, 'cwd': os.cwd() }) sys.exit(1) #delete zip archive os.remove(shapefile_zip_relative_path) end_time = date.utc() if verbose: MSG('Download, unpack, cleanup took: ', str (end_time - start_time) + ' seconds') start_time = date.utc() #truncating geo databases try: geodb.remove() except pymongo.errors.PyMongoError, e: sys.stderr.write('Error truncating database: %s' %e) sys.exit(1)
#! /usr/bin/python3 import os import re from bs4 import BeautifulSoup as bs from urllib.request import urlopen, urlretrieve # Directory we're getting busy in. base_dir = os.cwd() # Pass in CM file string to check date (signifying the version) def check_cm_version(cmfile): date_check = re.compile(r'[0-9]{8}') return date_check.findall(cmfile)[0] # Returns the latest version of CM from CM's download page. # This function needs a lot of work. It assumes way too much about the structure of # the page and its consistency over time. def pull_web_version(rom_url): available_files = [] html_data = urlopen(rom_url) soup = bs(html_data) for links in soup.findAll(href = re.compile(r'\.zip$')): links = links.get("href") available_files.append(links) return available_files[0] # Returns the version in my local directory. This too needs work. Right now # the script just deletes the old version, but I'd like it to be able to handle
def setUp(self): self.cwd = cwd() if not self.cwd.endswith("/test"): self.cwd = "%s/test" % self.cwd self.groups = g.GroupSet(self.cwd + "/hostgroups")
import subprocess import os os.cwd("../../") subprocess.call(['./CATS.sh']) print("asdfghjkl")
print """Usage: indexer.py <doc root> [<out dir>] Example: indexer.py projects/rust/doc indexer.py projects/rust/doc out-2014-jul-08 """ if __name__ == "__main__": if len(sys.argv) < 2: print_usage() else: src_dir = os.path.abspath(sys.argv[1]) if len(sys.argv) >= 3: out_dir = os.path.abspath(sys.argv[2]) else: out_dir = os.cwd() if not os.path.exists(out_dir) or not os.path.isdir(out_dir): log.error("Output directory either doesn't exist or is not directory") sys.exit(1) try: settings = importlib.import_module("nightly_settings") except ImportError as e: raise ImportError("Please specify settings!") build_docset(settings, src_dir, out_dir)
from sys import argv, exit from subprocess import call from GitFSClient import GitFSClient if __name__ == "__main__": logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) parser = ArgumentParser(description="run a build on a remote host.") parser.add_argument("-b", "--build") parser.add_argument("-r", "--remote", action="store_true", default=False) parser.add_argument("directory") cmdline = parser.parse_args(argv[1:]) logging.debug("cmdline=%s" % cmdline) if options["remote"]: os.cwd(options["directory"]) sys.execvp("make".options["args"]) raise Exception("Can't Happen") p, f = os.path.split(cmdline.directory) f = "." + f d = os.path.join(p, f) os.chdir(d) client = GitFSClient(d) info = client.getInfoRemote() logging.debug("received info: %s" % info) if "origin" not in info: info["origin"] = "origin" if "branch" not in info: