def build_geckolib(self, jobs=None, verbose=False, release=False): self.set_use_stable_rust() self.ensure_bootstrapped() ret = None opts = [] if jobs is not None: opts += ["-j", jobs] if verbose: opts += ["-v"] if release: opts += ["--release"] env = self.build_env(is_build=True) env["CARGO_TARGET_DIR"] = path.join(self.context.topdir, "target", "geckolib").encode("UTF-8") build_start = time() with cd(path.join("ports", "geckolib")): ret = call(["cargo", "build"] + opts, env=env, verbose=verbose) elapsed = time() - build_start # Generate Desktop Notification if elapsed-time > some threshold value notify_build_done(elapsed) print("GeckoLib build completed in %s" % format_duration(elapsed)) return ret
def module_names(path, recursive=False): """ Return a list of modules which can be imported from *path*. :arg path: a directory to scan. :type path: string :arg recursive: Also return submodule names for packages. :type recursive: bool :return: a list of string pairs (module_name, module_file). :rtype: list """ from os.path import join, isfile modules = [] for filename in sorted(_os.listdir(path)): if filename == "modules": pass # XXX, hard coded exception. elif filename.endswith(".py") and filename != "__init__.py": fullpath = join(path, filename) modules.append((filename[0:-3], fullpath)) elif "." not in filename: directory = join(path, filename) fullpath = join(directory, "__init__.py") if isfile(fullpath): modules.append((filename, fullpath)) if recursive: for mod_name, mod_path in module_names(directory, True): modules.append(("%s.%s" % (filename, mod_name), mod_path, )) return modules
def handle_page(self, pagename, ctx, templatename='page.html', outfilename=None, event_arg=None): ctx['current_page_name'] = pagename sidebarfile = self.config.html_sidebars.get(pagename) if sidebarfile: ctx['customsidebar'] = sidebarfile if not outfilename: outfilename = path.join(self.outdir, os_path(pagename) + self.out_suffix) self.app.emit('html-page-context', pagename, templatename, ctx, event_arg) ensuredir(path.dirname(outfilename)) f = open(outfilename, 'wb') try: self.implementation.dump(ctx, f, 2) finally: f.close() # if there is a source file, copy the source file for the # "show source" link if ctx.get('sourcename'): source_name = path.join(self.outdir, '_sources', os_path(ctx['sourcename'])) ensuredir(path.dirname(source_name)) copyfile(self.env.doc2path(pagename), source_name)
def find_bowtie2_index(r, path_to_bowtie2='bowtie2'): """check for bowtie2 index as given. return True if found, else return False """ args = [path_to_bowtie2 + '-inspect', '-v', '-s', r] debug(' '.join(args)) P = Popen(args, stdout=open(devnull, 'w'), stderr=PIPE, cwd=mkdtemp()) stderr = P.communicate()[1].splitlines() if not stderr[0].startswith('Could not locate'): for line in stderr: if line.startswith('Opening'): index_bt2 = line[(1 + line.find('"')):line.rfind('"')] index_basename = index_bt2[0:index_bt2.find('.1.bt2')] return index_basename for d in [getcwd(), os.path.split(path_to_bowtie2)[0], join(os.path.split(path_to_bowtie2)[0], 'indexes')]: rprime = join(d, r) args = [path_to_bowtie2 + '-inspect', '-v', '-s', rprime] debug(' '.join(args)) P = Popen(args, stdout=open(devnull, 'w'), stderr=PIPE, cwd=mkdtemp()) stderr = P.communicate()[1].splitlines() if not stderr[0].startswith('Could not locate'): for line in stderr: if line.startswith('Opening'): index_bt2 = line[(1 + line.find('"')):line.rfind('"')] index_basename = index_bt2[0:index_bt2.find('.1.bt2')] return index_basename return None
def walk_prefix(prefix, ignore_predefined_files=True, windows_forward_slashes=True): """ Return the set of all files in a given prefix directory. """ res = set() prefix = abspath(prefix) ignore = {'pkgs', 'envs', 'conda-bld', 'conda-meta', '.conda_lock', 'users', 'LICENSE.txt', 'info', 'conda-recipes', '.index', '.unionfs', '.nonadmin'} binignore = {'conda', 'activate', 'deactivate'} if sys.platform == 'darwin': ignore.update({'python.app', 'Launcher.app'}) for fn in os.listdir(prefix): if ignore_predefined_files and fn in ignore: continue if isfile(join(prefix, fn)): res.add(fn) continue for root, dirs, files in os.walk(join(prefix, fn)): should_ignore = ignore_predefined_files and root == join(prefix, 'bin') for fn2 in files: if should_ignore and fn2 in binignore: continue res.add(relpath(join(root, fn2), prefix)) for dn in dirs: path = join(root, dn) if islink(path): res.add(relpath(path, prefix)) if on_win and windows_forward_slashes: return {path.replace('\\', '/') for path in res} else: return res
def test_upload_with_progress(self): def callback(path, nbytes, history=defaultdict(list)): history[path].append(nbytes) return history dpath = mkdtemp() try: path1 = osp.join(dpath, 'foo') with open(path1, 'w') as writer: writer.write('hello!') os.mkdir(osp.join(dpath, 'bar')) path2 = osp.join(dpath, 'bar', 'baz') with open(path2, 'w') as writer: writer.write('the world!') self.client.upload( 'up', dpath, chunk_size=4, n_threads=1, # Callback isn't thread-safe. progress=callback ) eq_(self._read('up/foo'), b'hello!') eq_(self._read('up/bar/baz'), b'the world!') eq_( callback('', 0), {path1: [4, 6, -1], path2: [4, 8, 10, -1], '': [0]} ) finally: rmtree(dpath)
def compile(args): """ %prog compile directory Extract telomere length and ccn. """ p = OptionParser(compile.__doc__) p.set_outfile(outfile="age.tsv") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) dfs = [] for folder in args: ofolder = os.listdir(folder) # telomeres subdir = [x for x in ofolder if x.startswith("telomeres")][0] subdir = op.join(folder, subdir) filename = op.join(subdir, "tel_lengths.txt") df = pd.read_csv(filename, sep="\t") d1 = df.ix[0].to_dict() # ccn subdir = [x for x in ofolder if x.startswith("ccn")][0] subdir = op.join(folder, subdir) filename = iglob(subdir, "*.ccn.json")[0] js = json.load(open(filename)) d1.update(js) df = pd.DataFrame(d1, index=[0]) dfs.append(df) df = pd.concat(dfs, ignore_index=True) df.to_csv(opts.outfile, sep="\t", index=False)
def get_soup(*path): from bs4 import BeautifulSoup assert exists(join(*path)) index_html = None with open(join(*path),"rb") as f: index_html = f.read().decode("utf-8") return BeautifulSoup(index_html,"html5lib")
def make_image_path(outdoc, src): filename = outdoc.name basedir = join(dirname(filename), dirname(src)) if not exists(basedir): makedirs(basedir) path = join(dirname(filename), src) return path
def snapshot(source, destination, name=None): """Snapshot one directory to another. Specify names to snapshot small, named differences.""" source = source + sep destination = destination + sep if not path.isdir(source): raise RuntimeError("source is not a directory") if path.exists(destination): if not path.isdir(destination): raise RuntimeError("destination is not a directory") if name is None: raise RuntimeError("can't snapshot base snapshot if destination exists") snapdir = path.join(destination, ".snapdir") if path.exists(path.join(source, ".snapdir")): raise RuntimeError("snapdir exists in source directory") if name is None: check_call(["rsync", "--del", "-av", source, destination]) makedirs(snapdir) else: if not path.exists(snapdir): raise RuntimeError("No snapdir in destination directory") check_call(["rsync", "--del", "-av", "--only-write-batch={}".format(path.join(snapdir, name)), source, destination])
def test_populate_areas(): from webpages.populate import populate, save_expander populate(save_expander,prep_site_config("w9",**{"browser": "desktop"})) assert exists(join(pages_test_root,"output","index.html")) soup = get_soup(join(pages_test_root,"output","index.html")) a1 = soup.find(id="a1") a2 = soup.find(id="a2") config = eval_config_script(soup("script")[2].string) # The elements with parts content and matter assert a1.contents[0].strip() == "top bit" assert a1.contents[1].string.strip() == "section one" assert a1.contents[3].string.strip() == "section two" assert a1["class"].split() == [u"splash-area-inactive", u"upper-area-inactive", u"lower-area-inactive"] assert soup.find("section",id="s1")["class"].split() == [u"in-splash-area", u"in-splash-order-0", u"in-upper-area", u"in-upper-order-0", u"in-upper-order-last"] assert soup.find("section",id="s1")["role"] == "deck" assert soup.find("section",id="s2")["class"].split() == [u"in-splash-area", u"in-splash-order-1", u"in-lower-area", u"in-lower-order-0", u"in-lower-order-last", u"in-splash-order-last"] assert config["a1"] == {"area-names": ["splash","upper", "lower"], "charset": "utf-8", "layouter": "area-stage"} assert config["s2"] == {"area-names": ["splash","lower"], "charset": "utf-8", "laidout": "area-member"} assert config["s1"] == {"area-names": ["splash","upper"], "charset": "utf-8", "laidout": "area-member"} # The elements without parts, simply inline in HTML assert soup.find(id="a2") assert config["s4"] == {"area-names": ["second"], "laidout": "area-member"} assert config["s3"] == {"area-names": ["first"], "laidout": "area-member"} assert config["a2"] == {"area-names": ["first", "second"], "layouter": "area-stage"}
def setup_python3(): # Taken from "distribute" setup.py from distutils.filelist import FileList from distutils import dir_util, file_util, util, log from os.path import join tmp_src = join("build", "src") log.set_verbosity(1) fl = FileList() for line in open("MANIFEST.in"): if not line.strip(): continue fl.process_template_line(line) dir_util.create_tree(tmp_src, fl.files) outfiles_2to3 = [] for f in fl.files: outf, copied = file_util.copy_file(f, join(tmp_src, f), update=1) if copied and outf.endswith(".py"): outfiles_2to3.append(outf) util.run_2to3(outfiles_2to3) # arrange setup to use the copy sys.path.insert(0, tmp_src) return tmp_src
def get_conf_path(filename=None): """Return absolute path for configuration file with specified filename""" # Define conf_dir if PYTEST: import py from _pytest.tmpdir import get_user conf_dir = osp.join(str(py.path.local.get_temproot()), 'pytest-of-{}'.format(get_user()), SUBFOLDER) elif sys.platform.startswith('linux'): # This makes us follow the XDG standard to save our settings # on Linux, as it was requested on Issue 2629 xdg_config_home = os.environ.get('XDG_CONFIG_HOME', '') if not xdg_config_home: xdg_config_home = osp.join(get_home_dir(), '.config') if not osp.isdir(xdg_config_home): os.makedirs(xdg_config_home) conf_dir = osp.join(xdg_config_home, SUBFOLDER) else: conf_dir = osp.join(get_home_dir(), SUBFOLDER) # Create conf_dir if not osp.isdir(conf_dir): if PYTEST: os.makedirs(conf_dir) else: os.mkdir(conf_dir) if filename is None: return conf_dir else: return osp.join(conf_dir, filename)
def test_source_space_from_label(): """Test generating a source space from volume label.""" tempdir = _TempDir() aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') label_names = get_volume_labels_from_aseg(aseg_fname) volume_label = label_names[int(np.random.rand() * len(label_names))] # Test pos as dict pos = dict() pytest.raises(ValueError, setup_volume_source_space, 'sample', pos=pos, volume_label=volume_label, mri=aseg_fname) # Test no mri provided pytest.raises(RuntimeError, setup_volume_source_space, 'sample', mri=None, volume_label=volume_label) # Test invalid volume label pytest.raises(ValueError, setup_volume_source_space, 'sample', volume_label='Hello World!', mri=aseg_fname) src = setup_volume_source_space('sample', subjects_dir=subjects_dir, volume_label=volume_label, mri=aseg_fname, add_interpolator=False) assert_equal(volume_label, src[0]['seg_name']) # test reading and writing out_name = op.join(tempdir, 'temp-src.fif') write_source_spaces(out_name, src) src_from_file = read_source_spaces(out_name) _compare_source_spaces(src, src_from_file, mode='approx')
def test_fetch_file(url, tmpdir): """Test URL retrieval.""" tempdir = str(tmpdir) archive_name = op.join(tempdir, "download_test") with catch_logging() as log: _fetch_file(url, archive_name, timeout=30., verbose='debug') log = log.getvalue() assert 'Resuming at' not in log with open(archive_name, 'rb') as fid: data = fid.read() stop = len(data) // 2 assert 0 < stop < len(data) with open(archive_name + '.part', 'wb') as fid: fid.write(data[:stop]) with catch_logging() as log: _fetch_file(url, archive_name, timeout=30., verbose='debug') log = log.getvalue() assert 'Resuming at %s' % stop in log with pytest.raises(Exception, match='unknown url type'): _fetch_file('NOT_AN_ADDRESS', op.join(tempdir, 'test'), verbose=False) resume_name = op.join(tempdir, "download_resume") # touch file with open(resume_name + '.part', 'w'): os.utime(resume_name + '.part', None) _fetch_file(url, resume_name, resume=True, timeout=30., verbose=False) with pytest.raises(ValueError, match='Bad hash value'): _fetch_file(url, archive_name, hash_='a', verbose=False) with pytest.raises(RuntimeError, match='Hash mismatch'): _fetch_file(url, archive_name, hash_='a' * 32, verbose=False)
def _check_line_endings(): """Check all files in the repository for CR characters""" if sys.platform == 'win32': print('Skipping line endings check on Windows') sys.stdout.flush() return print('Running line endings check... ') sys.stdout.flush() report = [] root_dir, dev = _get_root_dir() if not dev: root_dir = op.join(root_dir, 'vispy') for dirpath, dirnames, filenames in os.walk(root_dir): for fname in filenames: if op.splitext(fname)[1] in ('.pyc', '.pyo', '.so', '.dll'): continue # Get filename filename = op.join(dirpath, fname) relfilename = op.relpath(filename, root_dir) # Open and check try: text = open(filename, 'rb').read().decode('utf-8') except UnicodeDecodeError: continue # Probably a binary file crcount = text.count('\r') if crcount: lfcount = text.count('\n') report.append('In %s found %i/%i CR/LF' % (relfilename, crcount, lfcount)) # Process result if len(report) > 0: raise RuntimeError('Found %s files with incorrect endings:\n%s' % (len(report), '\n'.join(report)))
def test_upload_cleanup(self): dpath = mkdtemp() _write = self.client.write def write(hdfs_path, *args, **kwargs): if 'bar' in hdfs_path: raise RuntimeError() return _write(hdfs_path, *args, **kwargs) try: self.client.write = write npath = osp.join(dpath, 'hi') os.mkdir(npath) with open(osp.join(npath, 'foo'), 'w') as writer: writer.write('hello!') os.mkdir(osp.join(npath, 'bar')) with open(osp.join(npath, 'bar', 'baz'), 'w') as writer: writer.write('world!') try: self.client.upload('foo', dpath) except RuntimeError: ok_(not self._exists('foo')) else: ok_(False) # This shouldn't happen. finally: rmtree(dpath) self.client.write = _write
def handle(self, **options): for party_slug in sorted(os.listdir(ppc_data_directory)): json_directory = join( ppc_data_directory, party_slug ) for leafname in sorted(os.listdir(json_directory)): if not leafname.endswith('.json'): continue filename = join(json_directory, leafname) image = re.sub(r'\.json$', '-cropped.png', filename) if not exists(image): image = None print '===============================================================' print "filename:", filename with open(filename) as f: ppc_data = json.load(f) ppc_data['party_slug'] = party_slug ppc_data['party_object'] = party_slug_to_popit_party[party_slug] ppc_data['constituency_object'] = get_constituency_from_name( ppc_data['constituency'] ) if options['check']: continue self.handle_person(ppc_data, image)
def main_menu(): global screen menu_song = pygame.mixer.music.load(path.join(sound_folder, "menu.ogg")) pygame.mixer.music.play(-1) title = pygame.image.load(path.join(img_dir, "main.png")).convert() title = pygame.transform.scale(title, (WIDTH, HEIGHT), screen) screen.blit(title, (0,0)) pygame.display.update() while True: ev = pygame.event.poll() if ev.type == pygame.KEYDOWN: if ev.key == pygame.K_RETURN: break elif ev.key == pygame.K_q: pygame.quit() quit() else: draw_text(screen, "Press [ENTER] To Begin", 30, WIDTH/2, HEIGHT/2) draw_text(screen, "or [Q] To Quit", 30, WIDTH/2, (HEIGHT/2)+40) pygame.display.update() #pygame.mixer.music.stop() ready = pygame.mixer.Sound(path.join(sound_folder,'getready.ogg')) ready.play() screen.fill(BLACK) draw_text(screen, "GET READY!", 40, WIDTH/2, HEIGHT/2) pygame.display.update()
def testNoStartDateIfLastLogLineIsDeletedIds(self): f = open(join(self.tempdir, 'repository.stats'), 'w') f.write('''Started: 2005-01-03 16:10:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-03 16:11:45, ResumptionToken: Started: 2005-01-04 16:12:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-04 16:13:45, ResumptionToken: ^^^oai_dc^45231 Started: 2005-01-06 16:16:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-06 16:17:45, ResumptionToken: Started: 2005-01-07 16:18:56, Harvested/Uploaded/Deleted/Total: 0/0/0/0, Done: Deleted all ids ''') f.close() s = State(self.tempdir, 'repository') self.assertEquals(None, s.from_) self.assertEquals(None, s.token) # and now with 'ids' misspelled as used to be the case f = open(join(self.tempdir, 'repository.stats'), 'w') f.write('''Started: 2005-01-03 16:10:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-03 16:11:45, ResumptionToken: Started: 2005-01-04 16:12:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-04 16:13:45, ResumptionToken: ^^^oai_dc^45231 Started: 2005-01-06 16:16:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-06 16:17:45, ResumptionToken: Started: 2005-01-07 16:18:56, Harvested/Uploaded/Deleted/Total: 0/0/0/0, Done: Deleted all id's ''') f.close() s = State(self.tempdir, 'repository') self.assertEquals(None, s.from_) self.assertEquals(None, s.token)
def getdeps(self): """Download and Extract Sources""" for source in self.sources: self.log.info("") self.log.info("#####################################################") # Skip anything already extracted extractdir = abspath(join(DepSource.RootExtractDir, source.destsubdir)) if exists(extractdir): self.log.warn("Deps Subdir: " + source.destsubdir + " already exists, skipping") continue extracted = False downloaded = source.download() if downloaded == False: self.log.error("Download Failed") else: extracted = source.extract() # Remove the archive file if source.destsubdir != "atmel-asf": source.remove_archivefile() # Re-jig the directories for those that need it for source in self.sources: source.movetoparent_multiple() return # Check for ASF Sources if not exists(join(self.DepsDirectory, "atmel-asf")): self.log.warn("There was no Atmel ASF Archive file found") self.log.warn("asf is not required but you can manually download the below file for the Atmel Source") self.log.warn("http://www.atmel.com/tools/avrsoftwareframework.aspx?tab=overview") self.log.warn("So far this is only used for porting mbed to sam based mcu's") return
def test_io_inverse_operator(): """Test IO of inverse_operator """ tempdir = _TempDir() inverse_operator = read_inverse_operator(fname_inv) x = repr(inverse_operator) assert_true(x) assert_true(isinstance(inverse_operator['noise_cov'], Covariance)) # just do one example for .gz, as it should generalize _compare_io(inverse_operator, '.gz') # test warnings on bad filenames with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') inv_badname = op.join(tempdir, 'test-bad-name.fif.gz') write_inverse_operator(inv_badname, inverse_operator) read_inverse_operator(inv_badname) assert_naming(w, 'test_inverse.py', 2) # make sure we can write and read inv_fname = op.join(tempdir, 'test-inv.fif') args = (10, 1. / 9., 'dSPM') inv_prep = prepare_inverse_operator(inverse_operator, *args) write_inverse_operator(inv_fname, inv_prep) inv_read = read_inverse_operator(inv_fname) _compare(inverse_operator, inv_read) inv_read_prep = prepare_inverse_operator(inv_read, *args) _compare(inv_prep, inv_read_prep) inv_prep_prep = prepare_inverse_operator(inv_prep, *args) _compare(inv_prep, inv_prep_prep)
def cat_counter_references(counter_references=None, target_dir=curdir, path_to_bowtie2='bowtie2', logger=None, **kwargs): if counter_references is None: return try: makedirs(target_dir, mode=0755) except OSError: pass debug('Validating counter-references and building counter-reference index') valid_references = validate_references(references=counter_references, target_dir=target_dir, path_to_bowtie2=path_to_bowtie2, logger=logger, environ_key= 'SOT_DEFAULT_COUNTER_REFERENCES') crefs_fa = open(join(target_dir, 'counter_references.fa'), 'w') for ref in valid_references: Popen([path_to_bowtie2 + '-inspect', ref], stdout=crefs_fa).wait() crefs_index = join(target_dir, counter_references) args = [path_to_bowtie2 + '-build', crefs_fa, crefs_index] P = Popen(args, stderr=PIPE) stderr = P.communicate()[1] if stderr.startswith('Error'): critical(stderr) critical('No counter-references will be used.') return crefs_index
def setMaster(): if exists('/etc/hosts0'): print 'etc/hosts0 exists' else: sudo('cp /etc/hosts /etc/hosts0') sudo('rm /etc/hosts') sudo('cp /etc/hosts0 /etc/hosts') put('hosts') sudo('cat hosts|sudo tee -a /etc/hosts') run('rm hosts') run('cat /etc/hosts') path1 = '/home/{0}'.format(parm['USER']) rsync_project(path1, exclude=['result']) path2 = join(path1, basename(realpath('.'))) path3 = join(path2, parm['programdir']) for dst in (path2, path3): fi = '{0}/{1}'.format(dst, parm['keyfile']) if not exists(fi, use_sudo=True): put(parm['keyfile'], dst) sudo('chmod 400 {0}'.format(fi)) execute('genkey')
def _preprocess(self, library_name, file_name, preprocessors): """ Preprocess file_name within library_name using explicit preprocessors if preprocessors is None then use implicit globally defined processors """ # @TODO dependency checking etc... if preprocessors is None: preprocessors = [self._location_preprocessor, self._check_preprocessor] preprocessors = [p for p in preprocessors if p is not None] preprocessors = self._external_preprocessors + preprocessors if len(preprocessors) == 0: return file_name code = ostools.read_file(file_name) for preprocessor in preprocessors: code = preprocessor.run(code, basename(file_name)) pp_file_name = join(self._preprocessed_path, library_name, basename(file_name)) idx = 1 while ostools.file_exists(pp_file_name): LOGGER.debug("Preprocessed file exists '%s', adding prefix", pp_file_name) pp_file_name = join(self._preprocessed_path, library_name, "%i_%s" % (idx, basename(file_name))) idx += 1 ostools.write_file(pp_file_name, code) return pp_file_name
def rpt_list_testdates(args): parser = ArgumentParser(prog='mx rpt-list-testdates') _add_common_args(parser) parser.add_argument('--printdir', action='store_true', help='print directory containing tests') _add_pattern_arg(parser) args = _check_verbose(parser.parse_args(args)) fastr = dict() local_dirs = get_local_dirs(args.logdir) for local_dir in local_dirs: resultInfo = ResultInfo(local_dir) result_outputs = _gather_test_outputs(join(args.logdir, local_dir, "test")) for pkg, _ in result_outputs.iteritems(): if re.search(args.pattern, pkg) is None: continue if not fastr.has_key(pkg): testdates = [] fastr[pkg] = testdates else: testdates = fastr[pkg] testdates.append(resultInfo) for pkg, testdates in fastr.iteritems(): sortedList = sorted(testdates, reverse=True) print pkg for resultInfo in sortedList: if args.printdir: print ' ' + join(args.logdir, resultInfo.localdir) else: print ' ' + str(resultInfo.date)
def handle_finish(self): self.info(bold('dumping search index... '), nonl=True) self.indexer.prune(self.env.all_docs) searchindexfn = path.join(self.outdir, self.searchindex_filename) # first write to a temporary file, so that if dumping fails, # the existing index won't be overwritten f = open(searchindexfn + '.tmp', 'wb') try: self.indexer.dump(f, self.indexer_format) finally: f.close() movefile(searchindexfn + '.tmp', searchindexfn) self.info('done') self.info(bold('dumping object inventory... '), nonl=True) f = open(path.join(self.outdir, INVENTORY_FILENAME), 'w') try: f.write('# Sphinx inventory version 1\n') f.write('# Project: %s\n' % self.config.project.encode('utf-8')) f.write('# Version: %s\n' % self.config.version) for modname, info in self.env.modules.iteritems(): f.write('%s mod %s\n' % (modname, self.get_target_uri(info[0]))) for refname, (docname, desctype) in self.env.descrefs.iteritems(): f.write('%s %s %s\n' % (refname, desctype, self.get_target_uri(docname))) finally: f.close() self.info('done')
def rpt_compare(args): ''' Analyze package test results by comparing test output with GnuR output. Uses either a specific directory, i.e. the 'test' subdirectory of the --testdir argument or (default) the latest downloaded results from the --logdir directory Return 0 if passed, non-zero if failed ''' parser = ArgumentParser(prog='mx rpt-compare') _add_common_args(parser) parser.add_argument('--testdir', action='store', help='specific dir containing fastr results') parser.add_argument('--pkg', action='store', help='pkg to compare') parser.add_argument('--diff', action='store_true', help='execute given diff program on differing outputs') parser.add_argument('--difftool', action='store', help='diff tool', default='diff') _add_pattern_arg(parser) args = _check_verbose(parser.parse_args(args)) if args.pkg: # backwards compatibility args.pattern = args.pkg gnur = _gather_test_outputs(join(os.getcwd(), "test_gnur")) if args.testdir: fastr = _gather_test_outputs(join(args.testdir, "test")) else: fastr = _get_test_outputs(_gather_all_test_outputs(args.logdir)) rdict = _rpt_compare_pkgs(gnur, fastr, args.verbose, args.pattern, args.diff, args.difftool) for _, rc in rdict.iteritems(): if rc == 1: return 1 return 0
def _setup_region_dict(self, name, reg, outsubdir, systematic, ntuple, basedir): regdic = reg.get_config_dict() modedir = regdic['hists'].lower() regdic['name'] = name if outsubdir: systdir = outsubdir else: if systematic == 'NONE': systdir = 'baseline' else: systdir = systematic.lower() full_out_dir = join(basedir, modedir, systdir) if not isdir(full_out_dir): if self.make_dirs: make_dir_if_none(full_out_dir) else: raise IOError(99,"no dir",full_out_dir) histname = '{}.h5'.format(basename(splitext(ntuple)[0])) full_out_path = join(full_out_dir, histname) if isfile(full_out_path): if self.rerun: os.remove(full_out_path) else: return None regdic['output_name'] = full_out_path regdic['systematic'] = systematic return regdic
def _copy_contents(dst_dir, contents): items = {"dirs": set(), "files": set()} for path in contents: if isdir(path): items['dirs'].add(path) elif isfile(path): items['files'].add(path) dst_dir_name = basename(dst_dir) if dst_dir_name == "src" and len(items['dirs']) == 1: copytree(list(items['dirs']).pop(), dst_dir, symlinks=True) else: makedirs(dst_dir) for d in items['dirs']: copytree(d, join(dst_dir, basename(d)), symlinks=True) if not items['files']: return if dst_dir_name == "lib": dst_dir = join(dst_dir, mkdtemp(dir=dst_dir)) for f in items['files']: copyfile(f, join(dst_dir, basename(f)))