def builder(srcdir): """ :param str srcdir: app.srcdir """ srcdir = path(srcdir) for dirpath, dirs, files in os.walk(srcdir): dirpath = path(dirpath) for f in [f for f in files if f.endswith('.po')]: po = dirpath / f mo = srcdir / 'xx' / 'LC_MESSAGES' / ( os.path.relpath(po[:-3], srcdir) + '.mo') if not mo.parent.exists(): mo.parent.makedirs() write_mo(mo, read_po(po))
def groupDocument(self, doc): """Called, if grouping is enabled, to group the document.""" i = self._items[doc] p = util.path(doc.url()) new_parent = self._paths.get(p) if new_parent is None: new_parent = self._paths[p] = QTreeWidgetItem(self) new_parent._path = p new_parent.setText(0, p or _("Untitled")) new_parent.setIcon(0, icons.get("folder-open")) new_parent.setFlags(Qt.ItemIsEnabled) new_parent.setExpanded(True) self.sortItems(0, Qt.AscendingOrder) old_parent = i.parent() if old_parent == new_parent: return if old_parent: old_parent.takeChild(old_parent.indexOfChild(i)) if old_parent.childCount() == 0: self.takeTopLevelItem(self.indexOfTopLevelItem(old_parent)) del self._paths[old_parent._path] else: self.takeTopLevelItem(self.indexOfTopLevelItem(i)) new_parent.addChild(i) new_parent.sortChildren(0, Qt.AscendingOrder)
def setup_module(): if not root.exists(): (rootdir / 'roots' / 'test-intl').copytree(root) # Delete remnants left over after failed build # Compile all required catalogs into binary format (*.mo). for dirpath, dirs, files in os.walk(root): dirpath = path(dirpath) for f in [f for f in files if f.endswith('.po')]: po = dirpath / f mo = root / 'xx' / 'LC_MESSAGES' / ( os.path.relpath(po[:-3], root) + '.mo') if not mo.parent.exists(): mo.parent.makedirs() try: p = Popen(['msgfmt', po, '-o', mo], stdout=PIPE, stderr=PIPE) except OSError: raise SkipTest # most likely msgfmt was not found else: stdout, stderr = p.communicate() if p.returncode != 0: print(stdout) print(stderr) assert False, \ 'msgfmt exited with return code %s' % p.returncode assert mo.isfile(), 'msgfmt failed'
def __init__(self, meta, p): self.p = p path_builder = path("{topdir}/{p}") self.path_builder = path_builder.fill(topdir=meta.topdir, p=self.p) self.key = key.key(p=p) # perparam differs from meta self.ndims = meta.params[self.p] self.allzs = meta.allzs[:self.ndims+1]#sorted(set(self.zlos+self.zhis)) self.zlos = self.allzs[:-1]#meta.allzlos[:self.ndims] self.zhis = self.allzs[1:]#meta.allzhis[:self.ndims] self.zmids = (self.zlos+self.zhis)/2. self.zavg = sum(self.zmids)/self.ndims # define realistic underlying P(z) for this number of parameters self.realsum = sum(meta.realistic[:self.ndims]) self.realistic_pdf = np.array([meta.realistic[k]/self.realsum/meta.zdifs[k] for k in xrange(0,self.ndims)]) self.truePz = self.realistic_pdf self.logtruePz = np.array([m.log(max(tPz,sys.float_info.epsilon)) for tPz in self.truePz]) # define flat P(z) for this number of parameters self.avgprob = 1./self.ndims/meta.zdif self.logavgprob = m.log(self.avgprob) self.flatPz = [self.avgprob]*self.ndims self.logflatPz = [self.logavgprob]*self.ndims print('initialized '+str(self.ndims)+' parameter test')
def test_multibyte_path(app): srcdir = path(app.srcdir) mb_name = u"\u65e5\u672c\u8a9e" (srcdir / mb_name).makedirs() (srcdir / mb_name / (mb_name + ".txt")).write_text( dedent( """ multi byte file name page ========================== """ ) ) master_doc = srcdir / "contents.txt" master_doc.write_bytes( ( master_doc.text() + dedent( """ .. toctree:: %(mb_name)s/%(mb_name)s """ % locals() ) ).encode("utf-8") ) app.builder.build_all()
def _script_names(src_dir): if not src_dir: return [] is_script = lambda f: isfile(path(src_dir, f)) and f.endswith('.sh') return [f for f in listdir(src_dir) if is_script(f)]
def db_connect(password): db = dbapi2.connect(path(app.config['DB_NAME'])) # TODO: Use something better than re.escape for this # For some reason, normal '?' placeholders don't work for PRAGMA's db.execute("PRAGMA key = '%s'" % re.escape(password)) db.execute("PRAGMA foreign_keys = ON") return db
def _get_db_log_path(conf): logs = [] if conf.db_logs_dir: log_file_path = lambda f : path(conf.db_logs_dir) + f collect_logs = conf.db_logs_files if conf.db_logs_files else ls(conf.db_logs_dir) for file in collect_logs: logs.append(log_file_path(file)) return logs
def setDocumentStatus(self, doc): if doc in self.docs: index = self.docs.index(doc) text = doc.documentName().replace('&', '&&') if self.tabText(index) != text: self.setTabText(index, text) tooltip = util.path(doc.url()) self.setTabToolTip(index, tooltip) self.setTabIcon(index, documenticon.icon(doc, self.window()))
def test_docutils_source_link_with_nonascii_file(app, status, warning): srcdir = path(app.srcdir) mb_name = u'\u65e5\u672c\u8a9e' try: (srcdir / (mb_name + '.txt')).write_text('') except UnicodeEncodeError: from path import FILESYSTEMENCODING raise SkipTest( 'nonascii filename not supported on this filesystem encoding: ' '%s', FILESYSTEMENCODING) app.builder.build_all()
def parse_docs_configuration(): doc_path = util.path("docs", "configuration.rst") with open(doc_path, encoding="utf-8") as file: doc_lines = file.readlines() sections = {} sec_name = None options = None opt_name = None opt_desc = None name = None last = last2 = None for line in doc_lines: # start of new section if re.match(r"^=+$", line): if sec_name and options: sections[sec_name] = options sec_name = last.strip() options = {} elif re.match(r"^=+ =+$", line): # start of option table if re.match(r"^-+$", last): opt_name = last2.strip() opt_desc = {} # end of option table elif opt_desc: options[opt_name] = opt_desc opt_name = None name = None # inside option table elif opt_name: if line[0].isalpha(): name, _, line = line.partition(" ") opt_desc[name] = "" line = line.strip() if line.startswith(("* ", "- ")): line = "\n" + line elif line.startswith("| "): line = line[2:] + "\n.br" opt_desc[name] += line + "\n" last2 = last last = line sections[sec_name] = options return sections
def test_second_update(): # delete, add and "edit" (change saved mtime) some files and update again env.all_docs['contents'] = 0 root = path(app.srcdir) # important: using "autodoc" because it is the last one to be included in # the contents.txt toctree; otherwise section numbers would shift (root / 'autodoc.txt').unlink() (root / 'new.txt').write_text('New file\n========\n') updated = env.update(app.config, app.srcdir, app.doctreedir, app) # "includes" and "images" are in there because they contain references # to nonexisting downloadable or image files, which are given another # chance to exist assert set(updated) == set(['contents', 'new', 'includes', 'images']) assert 'autodoc' not in env.all_docs assert 'autodoc' not in env.found_docs
def test_image_glob(app, status, warning): app.builder.build_all() # index.rst doctree = pickle.loads((app.doctreedir / 'index.doctree').bytes()) assert isinstance(doctree[0][1], nodes.image) assert doctree[0][1]['candidates'] == {'*': 'rimg.png'} assert doctree[0][1]['uri'] == 'rimg.png' assert isinstance(doctree[0][2], nodes.figure) assert isinstance(doctree[0][2][0], nodes.image) assert doctree[0][2][0]['candidates'] == {'*': 'rimg.png'} assert doctree[0][2][0]['uri'] == 'rimg.png' assert isinstance(doctree[0][3], nodes.image) assert doctree[0][3]['candidates'] == {'application/pdf': 'img.pdf', 'image/gif': 'img.gif', 'image/png': 'img.png'} assert doctree[0][3]['uri'] == 'img.*' assert isinstance(doctree[0][4], nodes.figure) assert isinstance(doctree[0][4][0], nodes.image) assert doctree[0][4][0]['candidates'] == {'application/pdf': 'img.pdf', 'image/gif': 'img.gif', 'image/png': 'img.png'} assert doctree[0][4][0]['uri'] == 'img.*' # subdir/index.rst doctree = pickle.loads((app.doctreedir / 'subdir/index.doctree').bytes()) assert isinstance(doctree[0][1], nodes.image) sub = path('subdir') assert doctree[0][1]['candidates'] == {'*': sub / 'rimg.png'} assert doctree[0][1]['uri'] == sub / 'rimg.png' assert isinstance(doctree[0][2], nodes.image) assert doctree[0][2]['candidates'] == {'application/pdf': 'subdir/svgimg.pdf', 'image/svg+xml': 'subdir/svgimg.svg'} assert doctree[0][2]['uri'] == sub / 'svgimg.*' assert isinstance(doctree[0][3], nodes.figure) assert isinstance(doctree[0][3][0], nodes.image) assert doctree[0][3][0]['candidates'] == {'application/pdf': 'subdir/svgimg.pdf', 'image/svg+xml': 'subdir/svgimg.svg'} assert doctree[0][3][0]['uri'] == sub / 'svgimg.*'
def setDocumentStatus(self, doc): try: i = self._items[doc] except KeyError: # this fails when a document is closed that had a job running, # in that case setDocumentStatus is called twice (the second time # when the job quits, but then we already removed the document) return # set properties according to document i.setText(0, doc.documentName()) i.setIcon(0, documenticon.icon(doc, self.parentWidget().mainwindow())) i.setToolTip(0, util.path(doc.url())) # handle ordering in groups if desired if self._group: self.groupDocument(doc) else: self.sortItems(0, Qt.AscendingOrder)
def setDocumentStatus(self, doc): # create accels accels = [self._accels[d] for d in self._accels if d is not doc] name = doc.documentName().replace('&', '&&') for index, char in enumerate(name): if char.isalnum() and char.lower() not in accels: name = name[:index] + '&' + name[index:] self._accels[doc] = char.lower() break else: self._accels[doc] = '' # add [sticky] mark if necessary if doc == engrave.engraver(self.mainwindow()).stickyDocument(): # L10N: 'always engraved': the document is marked as 'Always Engrave' in the LilyPond menu name += " " + _("[always engraved]") self._acts[doc].setText(name) self._acts[doc].setToolTip(util.path(doc.url())) icon = documenticon.icon(doc, self.mainwindow()) if icon.name() == "text-plain": icon = QIcon() self._acts[doc].setIcon(icon)
def save_registered_trojans(self, filename): """ saves the already registered trojans from the internal list into a given textfile :param filename: (string) the name of the text file in which the trojan info is saved :return: (void) """ if "\\" in filename: filepath = filename else: filepath = path()+"\\"+filename+".txt" # opens the file of the filename passed, in case it exists if not os.path.exists(filepath): createfile(filepath) with open(filepath, "w") as file: # itering through the list of registered trojans and writing the information into the lines, separated # by semicolons for trojan in self.registered: file.write(trojan.ip + ";") file.write(trojan.port + ";") file.write(trojan.name) file.write("\n")
def load_registered_trojans(self, filename): """ loads the already registered trojans form a given text file into the internal list of the object :param filename: (string) the name of the text file in which the trojan info is saved :return: (void) """ if "\\" in filename: filepath = filename else: filepath = path()+"\\"+filename+".txt" # opens the file of the filename passed with open(filepath, "r") as file: # now reads the contents of the file, and converts them into a list consisting of lists each containing the # relevant information for the represented trojan content = file.read() # first dividing it by columns and then by semicolons temp_list = [] for line in content.split("\n"): if ";" in line: temp_list.append(line.split(";")) # then going through the finished list and build trojan connection objects with the data from the file for sublist in temp_list: self.registered.append(TrojanConnection(sublist[0], sublist[1], sublist[2]))
def _test_nonascii_path(app): srcdir = path(app.srcdir) mb_name = u'\u65e5\u672c\u8a9e' try: (srcdir / mb_name).makedirs() except UnicodeEncodeError: from path import FILESYSTEMENCODING raise SkipTest( 'nonascii filename not supported on this filesystem encoding: ' '%s', FILESYSTEMENCODING) (srcdir / mb_name / (mb_name + '.txt')).write_text(dedent(""" multi byte file name page ========================== """)) master_doc = srcdir / 'contents.txt' master_doc.write_bytes((master_doc.text() + dedent(""" .. toctree:: %(mb_name)s/%(mb_name)s """ % {'mb_name': mb_name}) ).encode('utf-8')) app.builder.build_all()
# -*- coding: utf-8 -*- """ test_filter_syntax_error ~~~~~~~~~~~~~~~~~~~~~~~~ Test response on syntax errors in filter. """ import nose.tools from six import StringIO import re from util import path, with_app srcdir = path(__file__).parent.joinpath('filter_syntax_error').abspath() warnfile = StringIO() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warning=warnfile) def test_filter_syntax_error(app): app.builder.build_all() warnings = warnfile.getvalue() nose.tools.assert_equal( len(re.findall('syntax error in :filter: expression', warnings)), 9)
def init_log(): fmt_str = '%(relativeCreated)6d {testcase} %(filename)12s:%(lineno)-3d %(message)s' log.basicConfig(level=loglvls[args.log], format=fmt_str.format(testcase=args.testcase)) loglvls = { 'debug': log.DEBUG, 'info': log.INFO, 'warning': log.WARNING, 'error': log.ERROR, 'critical': log.CRITICAL } if __name__ == '__main__': args = get_args() args.testcase = path(args.testcase).name config = ConfigParser() config.read(['default.cfg', 'main.cfg', args.config]) init_log() update_config(config, 'score', args.score) update_config(config, 'solve', args.solve) sc_fn = get_function('score', config) with open('in/' + args.testcase + '.in') as f: inp = f.read() get_ans = get_ans_fn(config, inp, log) def run(seed):
# -*- coding: utf-8 -*- """ test_issue1 ~~~~~~~~~~~ Test Tinkerer and check output. """ import nose.tools from util import path, with_app srcdir = path(__file__).parent.joinpath('issue1').abspath() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warningiserror=True) def test_tinker(app): app.builder.build_all() nose.tools.assert_true(app.env.bibtex_cache.is_cited(u"2011:BabikerIPv6")) nose.tools.assert_equal( app.env.bibtex_cache.get_label_from_key(u"2011:BabikerIPv6"), u"1")
def benchmark_remote_logs_dir(self): remote_logs_dir = not_empty(self._conf['benchmark_remote_logs_dir'], self._CURRENT_DIR) return path(self.benchmark_remote_home_dir, remote_logs_dir, self.db_profile)
def build_gallery_dl_1(path=None): OPTS_FMT = """.TP\n.B "{}" {}\n{}""" TEMPLATE = r""" .TH "GALLERY-DL" "1" "%(date)s" "%(version)s" "gallery-dl Manual" .\" disable hyphenation .nh .SH NAME gallery-dl \- download image-galleries and -collections .SH SYNOPSIS .B gallery-dl [OPTION]... URL... .SH DESCRIPTION .B gallery-dl is a command-line program to download image-galleries and -collections from several image hosting sites. It is a cross-platform tool with many configuration options and powerful filenaming capabilities. .SH OPTIONS %(options)s .SH EXAMPLES .TP gallery-dl \f[I]URL\f[] Download images from \f[I]URL\f[]. .TP gallery-dl -g -u <username> -p <password> \f[I]URL\f[] Print direct URLs from a site that requires authentication. .TP gallery-dl --filter 'type == "ugoira"' --range '2-4' \f[I]URL\f[] Apply filter and range expressions. This will only download the second, third, and fourth file where its type value is equal to "ugoira". .TP gallery-dl r:\f[I]URL\f[] Scan \f[I]URL\f[] for other URLs and invoke \f[B]gallery-dl\f[] on them. .TP gallery-dl oauth:\f[I]SITE\-NAME\f[] Gain OAuth authentication tokens for .IR deviantart , .IR flickr , .IR reddit , .IR smugmug ", and" .IR tumblr . .SH FILES .TP .I /etc/gallery-dl.conf The system wide configuration file. .TP .I ~/.config/gallery-dl/config.json Per user configuration file. .TP .I ~/.gallery-dl.conf Alternate per user configuration file. .SH BUGS https://github.com/mikf/gallery-dl/issues .SH AUTHORS Mike Fährmann <*****@*****.**> .br and https://github.com/mikf/gallery-dl/graphs/contributors .SH "SEE ALSO" .BR gallery-dl.conf (5) """ options = [] for action in gallery_dl.option.build_parser()._actions: if action.help.startswith("=="): continue options.append( OPTS_FMT.format( ", ".join(action.option_strings).replace("-", r"\-"), r"\f[I]{}\f[]".format(action.metavar) if action.metavar else "", action.help, )) if not path: path = util.path("data/man/gallery-dl.1") with open(path, "w", encoding="utf-8") as file: file.write( TEMPLATE.lstrip() % { "options": "\n".join(options), "version": gallery_dl.version.__version__, "date": datetime.datetime.now().strftime("%Y-%m-%d"), })
def _to_file_paths(prefix_path, file_names): return map(lambda n: path(prefix_path, n), file_names)
def setup_db_remote_dir(self): return path(self._base_conf.benchmark_remote_home_dir, self.db_parameters.get('setup_remote_dir'))
# -*- coding: utf-8 -*- """ test_list_invalid ~~~~~~~~~~~~~~~~~ Test invalid ``:list:`` option. """ from StringIO import StringIO import re from util import path, with_app srcdir = path(__file__).parent.joinpath('list_invalid').abspath() warnfile = StringIO() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warning=warnfile) def test_list_invalid(app): app.builder.build_all() warnings = warnfile.getvalue() assert re.search( "unknown bibliography list type 'thisisintentionallyinvalid'", warnings)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Build a standalone executable using PyInstaller""" import PyInstaller.__main__ import util PyInstaller.__main__.run([ "--onefile", "--console", "--name", "gallery-dl." + ("exe" if PyInstaller.is_win else "bin"), "--additional-hooks-dir", util.path("scripts"), "--distpath", util.path("dist"), "--workpath", util.path("build"), "--specpath", util.path("build"), util.path("gallery_dl", "__main__.py"), ])
import datetime import util from gallery_dl import extractor, job, config from test.test_results import setup_test_config # filter test cases tests = [(idx, extr, url, result) for extr in extractor.extractors() if hasattr(extr, "test") and extr.test if len(sys.argv) <= 1 or extr.category in sys.argv for idx, (url, result) in enumerate(extr._get_tests()) if result] # setup target directory path = util.path("archive", "testdb", str(datetime.date.today())) os.makedirs(path, exist_ok=True) for idx, extr, url, result in tests: # filename name = "{}-{}-{}.json".format(extr.category, extr.subcategory, idx) print(name) # config values setup_test_config() if "options" in result: for key, value in result["options"]: config.set(key.split("."), value) if "range" in result:
# -*- coding: utf-8 -*- """ test_citationnotfound ~~~~~~~~~~~~~~~~~~~~~ Citation not found check. """ import re from StringIO import StringIO from util import path, with_app srcdir = path(__file__).parent.joinpath('citationnotfound').abspath() warnfile = StringIO() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warning=warnfile) def test_citationnotfound(app): app.builder.build_all() warnings = warnfile.getvalue() assert re.search('citation not found: nosuchkey', warnings)
# -*- coding: utf-8 -*- """ test_bibfilenotfound ~~~~~~~~~~~~~~~~~~~~ Bib file not found check. """ import re from six import StringIO from util import path, with_app srcdir = path(__file__).parent.joinpath('bibfilenotfound').abspath() warnfile = StringIO() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warning=warnfile) def test_bibfilenotfound(app): app.builder.build_all() warnings = warnfile.getvalue() assert re.search('could not open bibtex file .*unknown[.]bib', warnings)
def path(self, thumb=False): suffix = '' return path('media/%s%s' % (self.id, ('_thumb' if thumb else '')))
def _get_workload_log_path(conf, host): log_file = log_file_name_formatter(conf.workload_name, host) return path(conf.benchmark_remote_logs_dir, log_file)
tests = [ (idx, extr, url, result) for extr in extractor.extractors() if hasattr(extr, "test") and extr.test if len(sys.argv) <= 1 or extr.category in sys.argv for idx, (url, result) in enumerate(extr._get_tests()) if result ] # setup target directory path = util.path("archive", "testdb", str(datetime.date.today())) os.makedirs(path, exist_ok=True) for idx, extr, url, result in tests: # filename name = "{}-{}-{}.json".format(extr.category, extr.subcategory, idx) print(name) # config values setup_test_config() if "options" in result: for key, value in result["options"]: config.set(key.split("."), value)
def test_docutils_source_link(app): srcdir = path(app.srcdir) (srcdir / 'conf.py').write_text('') (srcdir / 'contents.rst').write_text('') app.builder.build_all()
def build_gallery_dl_conf_5(path=None): TEMPLATE = r""" .TH "GALLERY-DL.CONF" "5" "%(date)s" "%(version)s" "gallery-dl Manual" .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .SH NAME gallery-dl.conf \- gallery-dl configuration file .SH DESCRIPTION gallery-dl will search for configuration files in the following places every time it is started, unless .B --ignore-config is specified: .PP .RS 4 .nf .I /etc/gallery-dl.conf .I $HOME/.config/gallery-dl/config.json .I $HOME/.gallery-dl.conf .fi .RE .PP It is also possible to specify additional configuration files with the .B -c/--config command-line option or to add further option values with .B -o/--option as <key>=<value> pairs, Configuration files are JSON-based and therefore don't allow any ordinary comments, but, since unused keys are simply ignored, it is possible to utilize those as makeshift comments by settings their values to arbitrary strings. .SH EXAMPLE { .RS 4 "base-directory": "/tmp/", .br "extractor": { .RS 4 "pixiv": { .RS 4 "directory": ["Pixiv", "Works", "{user[id]}"], .br "filename": "{id}{num}.{extension}", .br "username": "******", .br "password": "******" .RE }, .br "flickr": { .RS 4 "_comment": "OAuth keys for account 'foobar'", .br "access-token": "0123456789-0123456789abcdef", .br "access-token-secret": "fedcba9876543210" .RE } .RE }, .br "downloader": { .RS 4 "retries": 3, .br "timeout": 2.5 .RE } .RE } %(options)s .SH BUGS https://github.com/mikf/gallery-dl/issues .SH AUTHORS Mike Fährmann <*****@*****.**> .br and https://github.com/mikf/gallery-dl/graphs/contributors .SH "SEE ALSO" .BR gallery-dl (1) """ sections = parse_docs_configuration() content = [] for sec_name, section in sections.items(): content.append(".SH " + sec_name.upper()) for opt_name, option in section.items(): content.append(".SS " + opt_name) for field, text in option.items(): if field in ("Type", "Default"): content.append('.IP "{}:" {}'.format( field, len(field) + 2)) content.append(strip_rst(text)) else: content.append('.IP "{}:" 4'.format(field)) content.append(strip_rst(text, field != "Example")) if not path: path = util.path("data/man/gallery-dl.conf.5") with open(path, "w", encoding="utf-8") as file: file.write( TEMPLATE.lstrip() % { "options": "\n".join(content), "version": gallery_dl.version.__version__, "date": datetime.datetime.now().strftime("%Y-%m-%d"), })
# -*- coding: utf-8 -*- """ test_invalid_cite_option ~~~~~~~~~~~~~~~~~~~~~~~~ Test behaviour when invalid cite option is given. """ import re from six import StringIO from util import path, with_app srcdir = path(__file__).parent.joinpath('invalid_cite_option').abspath() warnfile = StringIO() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warning=warnfile) def test_invalid_cite_option(app): app.builder.build_all() warnings = warnfile.getvalue() assert re.search('unknown option: "thisisintentionallyinvalid"', warnings)
def logs_remote_dir(self): if self.db_parameters.get('logs_remote_dir'): return path(self.db_parameters.get('logs_remote_dir'))
# caption w("Supported Sites\n") w("===============\n") # table head sep = " ".join("=" * c[1] for c in columns) + "\n" w(sep) w(" ".join(pad(c[0], c) for c in columns).strip() + "\n") w(sep) # table body for lst in extractors: w(" ".join( pad(col[2](lst), col, lst[0].category) for col in columns ).strip()) w("\n") # table bottom w(sep) w("\n") # substitutions for sub, value in subs: w(".. {} replace:: {}\n".format(sub, value)) outfile = sys.argv[1] if len(sys.argv) > 1 else "supportedsites.rst" with open(util.path("docs", outfile), "w") as file: write_output(file, COLUMNS, build_extractor_list())
def setup_remote_dir(self): return path(self._base_conf.benchmark_remote_home_dir, self._srv_conf.get('setup_remote_dir'))
# -*- coding: utf-8 -*- """ test_sphinx ~~~~~~~~~~~ General Sphinx test and check output. """ import re from StringIO import StringIO from util import path, with_app srcdir = path(__file__).parent.joinpath('sphinx').abspath() warnfile = StringIO() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warning=warnfile) def test_sphinx(app): app.builder.build_all() warnings = warnfile.getvalue() assert re.search(u'could not relabel citation \\[Test01\\]', warnings) assert re.search(u'could not relabel citation \\[Test02\\]', warnings) assert re.search(u'could not relabel citation \\[Wa04\\]', warnings) assert re.search( u'could not relabel citation reference \\[Test01\\]', warnings)
def benchmark_local_logs_dir(self): return path(self.benchmark_local_home_dir, self.db_profile)
# -*- coding: utf-8 -*- """ test_invalid_cite_option ~~~~~~~~~~~~~~~~~~~~~~~~ Test behaviour when invalid cite option is given. """ import re from StringIO import StringIO from util import path, with_app srcdir = path(__file__).parent.joinpath('invalid_cite_option').abspath() warnfile = StringIO() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warning=warnfile) def test_invalid_cite_option(app): app.builder.build_all() warnings = warnfile.getvalue() assert re.search('unknown option: "thisisintentionallyinvalid"', warnings)
_arguments -C -S \\ %(opts)s \\ '*:URL:_urls' && rc=0 return rc """ opts = [] for action in option.build_parser()._actions: if not action.option_strings: continue elif len(action.option_strings) == 1: opt = action.option_strings[0] else: opt = "{" + ",".join(action.option_strings) + "}" opt += "'[" + action.help.replace("'", '"') + "]'" if action.metavar: opt += ":'<" + action.metavar.lower() + ">'" if action.metavar in ("FILE", "CFG", "DEST"): opt += ":_files" opts.append(opt) PATH = util.path("data/completion/_gallery-dl") with open(PATH, "w", encoding="utf-8") as file: file.write(TEMPLATE % {"opts": " \\\n".join(opts)})
if __name__ == '__main__': args = get_args() if args.rescore: clean_max() config = ConfigParser() config.read(['default.cfg', 'main.cfg', args.config]) update_config(config, 'score', args.score) sc_fn = get_function('score', config) if not (args and (args.inp or args.ans)): file_lst = glob('*'.join(ans_f if args.rescore else sub_f)) files = [(ans2in(ans), ans) for ans in file_lst] else: if not args.ans: pth = path(args.inp) args.inp = pth.name.join(in_f) args.ans = pth.name.join(sub_f) files = [(args.inp, args.ans)] for inpf, ansf in files: ipth, apth = path(inpf), path(ansf) inp = ipth.read() ans = apth.read() case, seed = ipth.name, None m = fname_re.match(apth.name) if m: seed = m.group(3) process(inp, ans, seed, sc_fn, case)
# -*- coding: utf-8 -*- """ test_issue1 ~~~~~~~~~~~ Test Tinkerer and check output. """ import nose.tools from util import path, with_app srcdir = path(__file__).parent.joinpath('issue1').abspath() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warningiserror=True) def test_tinker(app): app.builder.build_all() nose.tools.assert_equal( app.env.bibtex_cache.get_cited_docnames(u"2011:BabikerIPv6"), {u"2012/07/24/hello_world_"}) nose.tools.assert_equal( app.env.bibtex_cache.get_label_from_key(u"2011:BabikerIPv6"), u"BNC11")
else COMPREPLY=( $(compgen -W "%(opts)s" -- "${cur}") ) fi } complete -F _gallery_dl gallery-dl """ opts = [] diropts = [] fileopts = [] for action in option.build_parser()._actions: if action.metavar in ("DEST",): diropts.extend(action.option_strings) elif action.metavar in ("FILE", "CFG"): fileopts.extend(action.option_strings) for opt in action.option_strings: if opt.startswith("--"): opts.append(opt) PATH = util.path("gallery-dl.bash_completion") with open(PATH, "w", encoding="utf-8") as file: file.write(TEMPLATE % { "opts" : " ".join(opts), "diropts" : "|".join(diropts), "fileopts": "|".join(fileopts), })
# -*- coding: utf-8 -*- """ test_sphinx ~~~~~~~~~~~ General Sphinx test and check output. """ import re from six import StringIO from util import path, with_app srcdir = path(__file__).parent.joinpath('sphinx').abspath() warnfile = StringIO() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warning=warnfile) def test_sphinx(app): app.builder.build_all() warnings = warnfile.getvalue() assert re.search(u'could not relabel citation \\[Test01\\]', warnings) assert re.search(u'could not relabel citation \\[Test02\\]', warnings) assert re.search(u'could not relabel citation \\[Wa04\\]', warnings) assert re.search(u'could not relabel citation reference \\[Test01\\]', warnings) assert re.search(u'could not relabel citation reference \\[Test02\\]',
# -*- coding: utf-8 -*- """ test_citationnotfound ~~~~~~~~~~~~~~~~~~~~~ Citation not found check. """ import re from six import StringIO from util import path, with_app srcdir = path(__file__).parent.joinpath('citationnotfound').abspath() warnfile = StringIO() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warning=warnfile) def test_citationnotfound(app): app.builder.build_all() warnings = warnfile.getvalue() assert re.search('citation not found: nosuchkey', warnings)
def load_themes(): roots = path(__file__).abspath().parent / 'roots' yield roots / 'test-double-inheriting-theme' / 'base_themes_dir' for t in load_theme_plugins(): yield t
#!/user/bin/env pypy3 import glob from util import path, score2str try: import json j = json.loads(open('max.json', 'r').read()) except: j = {} S = 0 for name in sorted(j.keys()): v = j[name]['score'] f = j[name]['folder'] pys = glob.glob(f'{f}/*.py') sol_name = '' if pys: sol_name = ' '.join(path(pyf).name for pyf in pys) print('{:25}: {:20} {:20} {}'.format(name, score2str(v), sol_name, f)) S += v print('{:25}: {:20}'.format('Total', score2str(S)))
def ans2in(ans): pth = path(ans) m = fname_re.match(pth.name) return (m.group(1) if m else path(ans).name).join(in_f)
domain = domains[category] content = column[2](category, subcategories, domain) append(" <td>" + content + "</td>") append("</tr>") TEMPLATE = """# Supported Sites <!-- auto-generated by {} --> Consider all sites to be NSFW unless otherwise known. <table> <thead valign="bottom"> {} </thead> <tbody valign="top"> {} </tbody> </table> """ return TEMPLATE.format( "/".join(os.path.normpath(__file__).split(os.sep)[-2:]), "\n".join(thead), "\n".join(tbody), ) categories, domains = build_extractor_list() outfile = sys.argv[1] if len(sys.argv) > 1 else "supportedsites.md" with open(util.path("docs", outfile), "w") as fp: fp.write(generate_output(COLUMNS, categories, domains))
def tempdir(tmpdir): """ temporary directory that wrapped with `path` class. this fixture is for compat with old test implementation. """ return util.path(tmpdir)
import discord import os import logging import bot_modules import util import config import data import log_config import hook from hook import Hook # set up console logging, defer logging channel setup until client is initialised logging.getLogger().setLevel(logging.INFO) log_config.configure_console() log_config.configure_file(util.path("log.txt")) logger = logging.getLogger(__name__) initialised = False client = discord.Client() config.init_configuration() bot_modules.import_modules() @client.event async def on_ready(): global initialised if not initialised: log_config.configure_discord(client) await data.update_repositories() await Hook.get("on_init")(client)
# -*- coding: utf-8 -*- """ test_filter_option_clash ~~~~~~~~~~~~~~~~~~~~~~~~ Test filter option clash with all, cited, and notcited. """ from StringIO import StringIO import re from util import path, with_app srcdir = path(__file__).parent.joinpath('filter_option_clash').abspath() warnfile = StringIO() def teardown_module(): (srcdir / '_build').rmtree(True) @with_app(srcdir=srcdir, warning=warnfile) def test_filter_option_clash(app): app.builder.build_all() warnings = warnfile.getvalue() assert re.search(':filter: overrides :all:', warnings) assert re.search(':filter: overrides :cited:', warnings) assert re.search(':filter: overrides :notcited:', warnings)
def do_pandoc_generation(notes_folder: str, temp_folder: str, html_folder: str) -> None: logger: Logger = get_logger() for folder in [notes_folder, temp_folder, html_folder]: logger.info('creating folder: \'%s\' if it doesn\'t exist already', folder) util.create_folder(folder) # only queue up files for pandoc generation if they (or the files that # point to them) have been modified recently, so that we don't have to # regenerate everything each time we make one change in one file. state_file: dict = util.read_existing_json_state_file(location=temp_folder) relevant_file_names: Set[str] = set() for file_name in os.listdir(notes_folder): if not util.is_md(file_name): continue key: str = util.strip_file_extension(file_name) if state_file['files'][key]['last_checked'] == state_file['runtime']: relevant_file_names.add(file_name) # ensure that we also refresh the backlinks for the files that are # referenced by this file (since the links go two ways) with open(util.path(notes_folder, file_name), 'r') as f: contents = f.read() # the results of re.findall() will look something like # [('Page B', 'pageB.md')] # where the link in markdown would've been [Page B](pageB.md) for _, link in util.md_links.findall(contents): if util.is_md(link): relevant_file_names.add(link) for file in relevant_file_names: # the path to the note is always gonna be in the notes_folder file_full_path: str = util.path(notes_folder, file) note_title = util.note_title(file_full_path) # the output HTML file should have the same name as the note but with # the .html suffix and it should be in the html folder file_html: str = util.path(html_folder, file) file_html: str = util.change_file_extension(file_html, '.html') # the backlinks file should have the same name as the note but with # the .md.backlinks suffix, and it should be in the temp folder file_backlinks: str = util.path(temp_folder, file + '.backlinks') logger.info('converting %s to html, title=%s', file, note_title) util.do_run(cmd=[ 'pandoc', file_full_path, file_backlinks, f'--defaults=pandoc.yaml', f'--id-prefix={util.to_footnote_id(file)}', f'--output={file_html}', f'--metadata=pagetitle:{note_title}' ]) # if the index.md was generated in the temp folder, pandocify it index_file_name = 'index.md' generated_index_file = util.path(temp_folder, index_file_name) if util.check_file_exists(generated_index_file): output_file = util.path( html_folder, util.change_file_extension(index_file_name, '.html')) index_title = util.note_title(generated_index_file) logger.debug('converting %s to html, title=%s', generated_index_file, index_title) util.do_run(cmd=[ 'pandoc', generated_index_file, f'--defaults=pandoc.yaml', f'--id-prefix={util.to_footnote_id(index_file_name)}', f'--output={output_file}', f'--metadata=pagetitle:{index_title}' ])